diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 3412a0baff..921b9c8a53 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -20,8 +20,8 @@ use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; -use util::{async_iife, ResultExt}; use util::channel::ReleaseChannel; +use util::{async_iife, ResultExt}; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA foreign_keys=TRUE; @@ -42,14 +42,17 @@ lazy_static::lazy_static! { static ref ZED_STATELESS: bool = std::env::var("ZED_STATELESS").map_or(false, |v| !v.is_empty()); static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); - pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. /// This will retry a couple times if there are failures. If opening fails once, the db directory /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. -pub async fn open_db(db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { +pub async fn open_db( + db_dir: &Path, + release_channel: &ReleaseChannel, +) -> ThreadSafeConnection { if *ZED_STATELESS { return open_fallback_db().await; } @@ -69,11 +72,11 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel // // Basically: Don't ever push invalid migrations to stable or everyone will have // a bad time. - + // If no db folder, create one at 0-{channel} create_dir_all(&main_db_dir).context("Could not create db directory")?; let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); - + // Optimistically open databases in parallel if !DB_FILE_OPERATIONS.is_locked() { // Try building a connection @@ -81,7 +84,7 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel return Ok(connection) }; } - + // Take a lock in the failure case so that we move the db once per process instead // of potentially multiple times from different threads. This shouldn't happen in the // normal path @@ -89,12 +92,12 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel if let Some(connection) = open_main_db(&db_path).await { return Ok(connection) }; - + let backup_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("System clock is set before the unix timestamp, Zed does not support this region of spacetime") .as_millis(); - + // If failed, move 0-{channel} to {current unix timestamp}-{channel} let backup_db_dir = db_dir.join(Path::new(&format!( "{}-{}", @@ -110,7 +113,7 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel let mut guard = BACKUP_DB_PATH.write(); *guard = Some(backup_db_dir); } - + // Create a new 0-{channel} create_dir_all(&main_db_dir).context("Should be able to create the database directory")?; let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); @@ -122,10 +125,10 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel if let Some(connection) = connection { return connection; } - + // Set another static ref so that we can escalate the notification ALL_FILE_DB_FAILED.store(true, Ordering::Release); - + // If still failed, create an in memory db with a known name open_fallback_db().await } @@ -179,15 +182,15 @@ macro_rules! define_connection { &self.0 } } - + impl $crate::sqlez::domain::Domain for $t { fn name() -> &'static str { stringify!($t) } - + fn migrations() -> &'static [&'static str] { $migrations - } + } } #[cfg(any(test, feature = "test-support"))] @@ -210,15 +213,15 @@ macro_rules! define_connection { &self.0 } } - + impl $crate::sqlez::domain::Domain for $t { fn name() -> &'static str { stringify!($t) } - + fn migrations() -> &'static [&'static str] { $migrations - } + } } #[cfg(any(test, feature = "test-support"))] @@ -237,134 +240,157 @@ macro_rules! define_connection { mod tests { use std::{fs, thread}; - use sqlez::{domain::Domain, connection::Connection}; + use sqlez::{connection::Connection, domain::Domain}; use sqlez_macros::sql; use tempdir::TempDir; use crate::{open_db, DB_FILE_NAME}; - + // Test bad migration panics #[gpui::test] #[should_panic] async fn test_bad_migration_panics() { enum BadDB {} - + impl Domain for BadDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { - &[sql!(CREATE TABLE test(value);), + &[ + sql!(CREATE TABLE test(value);), // failure because test already exists - sql!(CREATE TABLE test(value);)] + sql!(CREATE TABLE test(value);), + ] } } - + let tempdir = TempDir::new("DbTests").unwrap(); let _bad_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } - + /// Test that DB exists but corrupted (causing recreate) #[gpui::test] async fn test_db_corruption() { enum CorruptedDB {} - + impl Domain for CorruptedDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test(value);)] } } - + enum GoodDB {} - + impl Domain for GoodDB { fn name() -> &'static str { "db_tests" //Notice same name } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test2(value);)] //But different migration } } - + let tempdir = TempDir::new("DbTests").unwrap(); { - let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = + open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - - + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); - - let mut corrupted_backup_dir = fs::read_dir( - tempdir.path() - ).unwrap().find(|entry| { - !entry.as_ref().unwrap().file_name().to_str().unwrap().starts_with("0") - } - ).unwrap().unwrap().path(); + assert!( + good_db.select_row::("SELECT * FROM test2").unwrap()() + .unwrap() + .is_none() + ); + + let mut corrupted_backup_dir = fs::read_dir(tempdir.path()) + .unwrap() + .find(|entry| { + !entry + .as_ref() + .unwrap() + .file_name() + .to_str() + .unwrap() + .starts_with("0") + }) + .unwrap() + .unwrap() + .path(); corrupted_backup_dir.push(DB_FILE_NAME); - + dbg!(&corrupted_backup_dir); - + let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); - assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); + assert!(backup.select_row::("SELECT * FROM test").unwrap()() + .unwrap() + .is_none()); } - + /// Test that DB exists but corrupted (causing recreate) #[gpui::test] async fn test_simultaneous_db_corruption() { enum CorruptedDB {} - + impl Domain for CorruptedDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test(value);)] } } - + enum GoodDB {} - + impl Domain for GoodDB { fn name() -> &'static str { "db_tests" //Notice same name } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test2(value);)] //But different migration } } - + let tempdir = TempDir::new("DbTests").unwrap(); { // Setup the bad database - let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = + open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - + // Try to connect to it a bunch of times at once let mut guards = vec![]; for _ in 0..10 { let tmp_path = tempdir.path().to_path_buf(); let guard = thread::spawn(move || { - let good_db = smol::block_on(open_db::(tmp_path.as_path(), &util::channel::ReleaseChannel::Dev)); - assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + let good_db = smol::block_on(open_db::( + tmp_path.as_path(), + &util::channel::ReleaseChannel::Dev, + )); + assert!( + good_db.select_row::("SELECT * FROM test2").unwrap()() + .unwrap() + .is_none() + ); }); - + guards.push(guard); - } - - for guard in guards.into_iter() { - assert!(guard.join().is_ok()); - } + + for guard in guards.into_iter() { + assert!(guard.join().is_ok()); + } } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 8e909583de..bd4e6653d9 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -8,7 +8,7 @@ use anyhow::{anyhow, bail, Context, Result}; use db::{define_connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use util::{ unzip_option, ResultExt}; +use util::{unzip_option, ResultExt}; use crate::dock::DockPosition; use crate::WorkspaceId; @@ -31,7 +31,7 @@ define_connection! { timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -43,7 +43,7 @@ define_connection! { ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -52,7 +52,7 @@ define_connection! { ON DELETE CASCADE ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, parent_group_id INTEGER, // NULL means that this is a root pane @@ -61,7 +61,7 @@ define_connection! { ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, @@ -96,10 +96,10 @@ impl WorkspaceDb { WorkspaceLocation, bool, DockPosition, - ) = + ) = self.select_row_bound(sql!{ SELECT workspace_id, workspace_location, left_sidebar_open, dock_visible, dock_anchor - FROM workspaces + FROM workspaces WHERE workspace_location = ? }) .and_then(|mut prepared_statement| (prepared_statement)(&workspace_location)) @@ -119,7 +119,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, - left_sidebar_open + left_sidebar_open, }) } @@ -158,7 +158,12 @@ impl WorkspaceDb { dock_visible = ?4, dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.left_sidebar_open, workspace.dock_position)) + ))?(( + workspace.id, + &workspace.location, + workspace.left_sidebar_open, + workspace.dock_position, + )) .context("Updating workspace")?; // Save center pane group and dock pane @@ -191,20 +196,20 @@ impl WorkspaceDb { query! { fn recent_workspaces() -> Result> { - SELECT workspace_id, workspace_location + SELECT workspace_id, workspace_location FROM workspaces WHERE workspace_location IS NOT NULL - ORDER BY timestamp DESC + ORDER BY timestamp DESC } } - + query! { async fn delete_stale_workspace(id: WorkspaceId) -> Result<()> { DELETE FROM workspaces WHERE workspace_id IS ? } } - + // Returns the recent locations which are still valid on disk and deletes ones which no longer // exist. pub async fn recent_workspaces_on_disk(&self) -> Result> { @@ -217,7 +222,7 @@ impl WorkspaceDb { delete_tasks.push(self.delete_stale_workspace(id)); } } - + futures::future::join_all(delete_tasks).await; Ok(result) } @@ -233,10 +238,16 @@ impl WorkspaceDb { } fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { - Ok(self.get_pane_group(workspace_id, None)? + Ok(self + .get_pane_group(workspace_id, None)? .into_iter() .next() - .unwrap_or_else(|| SerializedPaneGroup::Pane(SerializedPane { active: true, children: vec![] }))) + .unwrap_or_else(|| { + SerializedPaneGroup::Pane(SerializedPane { + active: true, + children: vec![], + }) + })) } fn get_pane_group( @@ -248,7 +259,7 @@ impl WorkspaceDb { type GroupOrPane = (Option, Option, Option, Option); self.select_bound::(sql!( SELECT group_id, axis, pane_id, active - FROM (SELECT + FROM (SELECT group_id, axis, NULL as pane_id, @@ -256,18 +267,18 @@ impl WorkspaceDb { position, parent_group_id, workspace_id - FROM pane_groups + FROM pane_groups UNION - SELECT + SELECT + NULL, NULL, - NULL, center_panes.pane_id, panes.active as active, position, parent_group_id, panes.workspace_id as workspace_id FROM center_panes - JOIN panes ON center_panes.pane_id = panes.pane_id) + JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position ))?((group_id, workspace_id))? @@ -290,13 +301,12 @@ impl WorkspaceDb { // Filter out panes and pane groups which don't have any children or items .filter(|pane_group| match pane_group { Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), - Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), _ => true, }) .collect::>() } - fn save_pane_group( conn: &Connection, workspace_id: WorkspaceId, @@ -308,15 +318,10 @@ impl WorkspaceDb { let (parent_id, position) = unzip_option(parent); let group_id = conn.select_row_bound::<_, i64>(sql!( - INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) - VALUES (?, ?, ?, ?) + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) RETURNING group_id - ))?(( - workspace_id, - parent_id, - position, - *axis, - ))? + ))?((workspace_id, parent_id, position, *axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -337,9 +342,7 @@ impl WorkspaceDb { SELECT pane_id, active FROM panes WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?) - ))?( - workspace_id, - )? + ))?(workspace_id)? .context("No dock pane for workspace")?; Ok(SerializedPane::new( @@ -356,8 +359,8 @@ impl WorkspaceDb { dock: bool, ) -> Result { let pane_id = conn.select_row_bound::<_, i64>(sql!( - INSERT INTO panes(workspace_id, active) - VALUES (?, ?) + INSERT INTO panes(workspace_id, active) + VALUES (?, ?) RETURNING pane_id ))?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; @@ -399,14 +402,13 @@ impl WorkspaceDb { Ok(()) } - query!{ + query! { pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> { UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ? } } - } #[cfg(test)] @@ -495,7 +497,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: true + left_sidebar_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -504,7 +506,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: false + left_sidebar_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -610,7 +612,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, - left_sidebar_open: true + left_sidebar_open: true, }; db.save_workspace(workspace.clone()).await; @@ -683,7 +685,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: false + left_sidebar_open: false, }; db.save_workspace(workspace_3.clone()).await; @@ -718,7 +720,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, - left_sidebar_open: true + left_sidebar_open: true, } }