Done first draft of strongly typed migrations

This commit is contained in:
Mikayla Maki 2022-11-10 15:29:29 -08:00
parent 4a00f0b062
commit c84201fc9f
18 changed files with 396 additions and 448 deletions

View file

@ -2,7 +2,7 @@ mod update_notification;
use anyhow::{anyhow, Context, Result};
use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN};
use db::Db;
use db::{kvp::KeyValue, Db};
use gpui::{
actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle,
MutableAppContext, Task, WeakViewHandle,
@ -42,7 +42,7 @@ pub struct AutoUpdater {
current_version: AppVersion,
http_client: Arc<dyn HttpClient>,
pending_poll: Option<Task<()>>,
db: project::Db,
db: project::Db<KeyValue>,
server_url: String,
}
@ -57,7 +57,7 @@ impl Entity for AutoUpdater {
}
pub fn init(
db: Db,
db: Db<KeyValue>,
http_client: Arc<dyn HttpClient>,
server_url: String,
cx: &mut MutableAppContext,
@ -126,7 +126,7 @@ impl AutoUpdater {
fn new(
current_version: AppVersion,
db: project::Db,
db: project::Db<KeyValue>,
http_client: Arc<dyn HttpClient>,
server_url: String,
) -> Self {

View file

@ -11,7 +11,7 @@ use async_tungstenite::tungstenite::{
error::Error as WebsocketError,
http::{Request, StatusCode},
};
use db::Db;
use db::{kvp::KeyValue, Db};
use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt};
use gpui::{
actions,
@ -1218,7 +1218,7 @@ impl Client {
self.peer.respond_with_error(receipt, error)
}
pub fn start_telemetry(&self, db: Db) {
pub fn start_telemetry(&self, db: Db<KeyValue>) {
self.telemetry.start(db.clone());
}

View file

@ -1,5 +1,5 @@
use crate::http::HttpClient;
use db::Db;
use db::{kvp::KeyValue, Db};
use gpui::{
executor::Background,
serde_json::{self, value::Map, Value},
@ -148,7 +148,7 @@ impl Telemetry {
Some(self.state.lock().log_file.as_ref()?.path().to_path_buf())
}
pub fn start(self: &Arc<Self>, db: Db) {
pub fn start(self: &Arc<Self>, db: Db<KeyValue>) {
let this = self.clone();
self.executor
.spawn(

View file

@ -1,46 +0,0 @@
use std::{fs::File, path::Path};
const TEST_FILE: &'static str = "test-db.db";
fn main() -> anyhow::Result<()> {
env_logger::init();
let db = db::Db::open_in_memory("db");
let file = Path::new(TEST_FILE);
let f = File::create(file)?;
drop(f);
// let workspace_1 = db.workspace_for_roots(&["/tmp"]);
// let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
// let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]);
// db.save_dock_pane(
// &workspace_1.workspace_id,
// &SerializedDockPane {
// anchor_position: DockAnchor::Expanded,
// visible: true,
// },
// );
// db.save_dock_pane(
// &workspace_2.workspace_id,
// &SerializedDockPane {
// anchor_position: DockAnchor::Bottom,
// visible: true,
// },
// );
// db.save_dock_pane(
// &workspace_3.workspace_id,
// &SerializedDockPane {
// anchor_position: DockAnchor::Right,
// visible: false,
// },
// );
db.write_to(file).ok();
println!("Wrote database!");
Ok(())
}

View file

@ -1,30 +0,0 @@
use std::{fs::File, path::Path};
const TEST_FILE: &'static str = "test-db.db";
fn main() -> anyhow::Result<()> {
env_logger::init();
let db = db::Db::open_in_memory("db");
let file = Path::new(TEST_FILE);
let f = File::create(file)?;
drop(f);
db.write_kvp("test", "1")?;
db.write_kvp("test-2", "2")?;
db.workspace_for_roots(&["/tmp1"]);
db.workspace_for_roots(&["/tmp1", "/tmp2"]);
db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]);
db.workspace_for_roots(&["/tmp2", "/tmp3"]);
db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]);
db.workspace_for_roots(&["/tmp2", "/tmp4"]);
db.workspace_for_roots(&["/tmp2"]);
db.write_to(file).ok();
println!("Wrote database!");
Ok(())
}

View file

@ -7,18 +7,23 @@ use std::path::Path;
use anyhow::Result;
use indoc::indoc;
use kvp::KVP_MIGRATION;
use sqlez::connection::Connection;
use sqlez::domain::Domain;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use workspace::items::ITEM_MIGRATIONS;
use workspace::pane::PANE_MIGRATIONS;
pub use workspace::*;
#[derive(Clone)]
pub struct Db(ThreadSafeConnection);
const INITIALIZE_QUERY: &'static str = indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"};
impl Deref for Db {
#[derive(Clone)]
pub struct Db<D: Domain>(ThreadSafeConnection<D>);
impl<D: Domain> Deref for Db<D> {
type Target = sqlez::connection::Connection;
fn deref(&self) -> &Self::Target {
@ -26,7 +31,7 @@ impl Deref for Db {
}
}
impl Db {
impl<D: Domain> Db<D> {
/// Open or create a database at the given directory path.
pub fn open(db_dir: &Path, channel: &'static str) -> Self {
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM
@ -35,17 +40,15 @@ impl Db {
.expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
Db(initialize_connection(ThreadSafeConnection::new(
db_path.to_string_lossy().as_ref(),
true,
)))
Db(
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
.with_initialize_query(INITIALIZE_QUERY),
)
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory(db_name: &str) -> Self {
Db(initialize_connection(ThreadSafeConnection::new(
db_name, false,
)))
Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY))
}
pub fn persisting(&self) -> bool {
@ -56,19 +59,8 @@ impl Db {
let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref());
self.backup_main(&destination)
}
}
fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection {
conn.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"})
.with_migrations(&[
KVP_MIGRATION,
WORKSPACES_MIGRATION,
PANE_MIGRATIONS,
ITEM_MIGRATIONS,
])
pub fn open_as<D2: Domain>(&self) -> Db<D2> {
Db(self.0.for_domain())
}
}

View file

@ -1,7 +1,7 @@
use super::Db;
use anyhow::Result;
use indoc::indoc;
use sqlez::migrations::Migration;
use sqlez::{connection::Connection, domain::Domain, migrations::Migration};
pub(crate) const KVP_MIGRATION: Migration = Migration::new(
"kvp",
@ -13,7 +13,16 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new(
"}],
);
impl Db {
#[derive(Clone)]
pub enum KeyValue {}
impl Domain for KeyValue {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
KVP_MIGRATION.run(conn)
}
}
impl Db<KeyValue> {
pub fn read_kvp(&self, key: &str) -> Result<Option<String>> {
self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key)
}

View file

@ -1,14 +1,24 @@
pub(crate) mod items;
pub mod model;
pub(crate) mod pane;
use anyhow::Context;
use util::{iife, ResultExt};
use anyhow::{bail, Context, Result};
use util::{iife, unzip_option, ResultExt};
use std::path::{Path, PathBuf};
use indoc::indoc;
use sqlez::migrations::Migration;
use sqlez::{domain::Domain, migrations::Migration};
use self::model::{
Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup,
SerializedWorkspace, WorkspaceId,
};
use super::Db;
// 1) Move all of this into Workspace crate
// 2) Deserialize items fully
// 3) Typed prepares (including how you expect to pull data out)
// 4) Investigate Tree column impls
pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
"workspace",
@ -22,11 +32,58 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
"}],
);
use self::model::{SerializedWorkspace, WorkspaceId};
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL indicates that this is a root node
position INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL, this is a dock pane
position INTEGER, -- NULL, this is a dock pane
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
"}],
);
use super::Db;
pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new(
"item",
&[indoc! {"
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
workspace_id BLOB NOT NULL,
pane_id INTEGER NOT NULL,
kind TEXT NOT NULL,
position INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Db {
#[derive(Clone)]
pub enum Workspace {}
impl Domain for Workspace {
fn migrate(conn: &sqlez::connection::Connection) -> anyhow::Result<()> {
WORKSPACES_MIGRATION.run(&conn)?;
PANE_MIGRATIONS.run(&conn)?;
ITEM_MIGRATIONS.run(&conn)
}
}
impl Db<Workspace> {
/// Returns a serialized workspace for the given worktree_roots. If the passed array
/// is empty, the most recent workspace is returned instead. If no workspace for the
/// passed roots is stored, returns none.
@ -129,6 +186,142 @@ impl Db {
.log_err()
.unwrap_or_default()
}
pub(crate) fn get_center_pane_group(
&self,
workspace_id: &WorkspaceId,
) -> Result<SerializedPaneGroup> {
self.get_pane_group_children(workspace_id, None)?
.into_iter()
.next()
.context("No center pane group")
}
fn get_pane_group_children<'a>(
&self,
workspace_id: &WorkspaceId,
group_id: Option<GroupId>,
) -> Result<Vec<SerializedPaneGroup>> {
self.select_bound::<(Option<GroupId>, &WorkspaceId), (Option<GroupId>, Option<Axis>, Option<PaneId>)>(indoc! {"
SELECT group_id, axis, pane_id
FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id
FROM pane_groups
UNION
SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id
FROM panes
-- Remove the dock panes from the union
WHERE parent_group_id IS NOT NULL and position IS NOT NULL)
WHERE parent_group_id IS ? AND workspace_id = ?
ORDER BY position
"})?((group_id, workspace_id))?
.into_iter()
.map(|(group_id, axis, pane_id)| {
if let Some((group_id, axis)) = group_id.zip(axis) {
Ok(SerializedPaneGroup::Group {
axis,
children: self.get_pane_group_children(
workspace_id,
Some(group_id),
)?,
})
} else if let Some(pane_id) = pane_id {
Ok(SerializedPaneGroup::Pane(SerializedPane {
children: self.get_items(pane_id)?,
}))
} else {
bail!("Pane Group Child was neither a pane group or a pane");
}
})
.collect::<Result<_>>()
}
pub(crate) fn save_pane_group(
&self,
workspace_id: &WorkspaceId,
pane_group: &SerializedPaneGroup,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) {
bail!("Pane groups must have a SerializedPaneGroup::Group at the root")
}
let (parent_id, position) = unzip_option(parent);
match pane_group {
SerializedPaneGroup::Group { axis, children } => {
let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")?
((workspace_id, parent_id, position, *axis))?;
for (position, group) in children.iter().enumerate() {
self.save_pane_group(workspace_id, group, Some((parent_id, position)))?
}
Ok(())
}
SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent),
}
}
pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result<SerializedPane> {
let pane_id = self.select_row_bound(indoc! {"
SELECT pane_id FROM panes
WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?(
workspace_id,
)?
.context("No dock pane for workspace")?;
Ok(SerializedPane::new(
self.get_items(pane_id).context("Reading items")?,
))
}
pub(crate) fn save_pane(
&self,
workspace_id: &WorkspaceId,
pane: &SerializedPane,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
let (parent_id, order) = unzip_option(parent);
let pane_id = self.insert_bound(
"INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)",
)?((workspace_id, parent_id, order))?;
self.save_items(workspace_id, pane_id, &pane.children)
.context("Saving items")
}
pub(crate) fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(self.select_bound(indoc! {"
SELECT item_id, kind FROM items
WHERE pane_id = ?
ORDER BY position"})?(pane_id)?
.into_iter()
.map(|(item_id, kind)| match kind {
SerializedItemKind::Terminal => SerializedItem::Terminal { item_id },
_ => unimplemented!(),
})
.collect())
}
pub(crate) fn save_items(
&self,
workspace_id: &WorkspaceId,
pane_id: PaneId,
items: &[SerializedItem],
) -> Result<()> {
let mut delete_old = self
.exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?")
.context("Preparing deletion")?;
let mut insert_new = self.exec_bound(
"INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)",
).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() {
delete_old((workspace_id, pane_id, item.item_id()))?;
insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?;
}
Ok(())
}
}
#[cfg(test)]
@ -214,4 +407,89 @@ mod tests {
workspace_3
);
}
use crate::model::{SerializedItem, SerializedPane, SerializedPaneGroup};
fn default_workspace(
dock_pane: SerializedPane,
center_group: &SerializedPaneGroup,
) -> SerializedWorkspace {
SerializedWorkspace {
dock_anchor: crate::model::DockAnchor::Right,
dock_visible: false,
center_group: center_group.clone(),
dock_pane,
}
}
#[test]
fn test_basic_dock_pane() {
env_logger::try_init().ok();
let db = Db::open_in_memory("basic_dock_pane");
let dock_pane = crate::model::SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 2 },
SerializedItem::Terminal { item_id: 3 },
],
};
let workspace = default_workspace(dock_pane, &Default::default());
db.save_workspace(&["/tmp"], None, &workspace);
let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
assert_eq!(workspace.dock_pane, new_workspace.dock_pane);
}
#[test]
fn test_simple_split() {
env_logger::try_init().ok();
let db = Db::open_in_memory("simple_split");
// -----------------
// | 1,2 | 5,6 |
// | - - - | |
// | 3,4 | |
// -----------------
let center_pane = SerializedPaneGroup::Group {
axis: crate::model::Axis::Horizontal,
children: vec![
SerializedPaneGroup::Group {
axis: crate::model::Axis::Vertical,
children: vec![
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 2 },
],
}),
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 3 },
],
}),
],
},
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 5 },
SerializedItem::Terminal { item_id: 6 },
],
}),
],
};
let workspace = default_workspace(Default::default(), &center_pane);
db.save_workspace(&["/tmp"], None, &workspace);
assert_eq!(workspace.center_group, center_pane);
}
}

View file

@ -1,63 +0,0 @@
use anyhow::{Context, Result};
use indoc::indoc;
use sqlez::migrations::Migration;
use crate::{
model::{PaneId, SerializedItem, SerializedItemKind, WorkspaceId},
Db,
};
// 1) Move all of this into Workspace crate
// 2) Deserialize items fully
// 3) Typed prepares (including how you expect to pull data out)
// 4) Investigate Tree column impls
pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new(
"item",
&[indoc! {"
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
workspace_id BLOB NOT NULL,
pane_id INTEGER NOT NULL,
kind TEXT NOT NULL,
position INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Db {
pub(crate) fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(self.select_bound(indoc! {"
SELECT item_id, kind FROM items
WHERE pane_id = ?
ORDER BY position"})?(pane_id)?
.into_iter()
.map(|(item_id, kind)| match kind {
SerializedItemKind::Terminal => SerializedItem::Terminal { item_id },
_ => unimplemented!(),
})
.collect())
}
pub(crate) fn save_items(
&self,
workspace_id: &WorkspaceId,
pane_id: PaneId,
items: &[SerializedItem],
) -> Result<()> {
let mut delete_old = self
.exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?")
.context("Preparing deletion")?;
let mut insert_new = self.exec_bound(
"INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)",
).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() {
delete_old((workspace_id, pane_id, item.item_id()))?;
insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?;
}
Ok(())
}
}

View file

@ -1,232 +0,0 @@
use anyhow::{bail, Context, Result};
use indoc::indoc;
use sqlez::migrations::Migration;
use util::unzip_option;
use crate::model::{Axis, GroupId, PaneId, SerializedPane};
use super::{
model::{SerializedPaneGroup, WorkspaceId},
Db,
};
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL indicates that this is a root node
position INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL, this is a dock pane
position INTEGER, -- NULL, this is a dock pane
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
"}],
);
impl Db {
pub(crate) fn get_center_pane_group(
&self,
workspace_id: &WorkspaceId,
) -> Result<SerializedPaneGroup> {
self.get_pane_group_children(workspace_id, None)?
.into_iter()
.next()
.context("No center pane group")
}
fn get_pane_group_children<'a>(
&self,
workspace_id: &WorkspaceId,
group_id: Option<GroupId>,
) -> Result<Vec<SerializedPaneGroup>> {
self.select_bound::<(Option<GroupId>, &WorkspaceId), (Option<GroupId>, Option<Axis>, Option<PaneId>)>(indoc! {"
SELECT group_id, axis, pane_id
FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id
FROM pane_groups
UNION
SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id
FROM panes
-- Remove the dock panes from the union
WHERE parent_group_id IS NOT NULL and position IS NOT NULL)
WHERE parent_group_id IS ? AND workspace_id = ?
ORDER BY position
"})?((group_id, workspace_id))?
.into_iter()
.map(|(group_id, axis, pane_id)| {
if let Some((group_id, axis)) = group_id.zip(axis) {
Ok(SerializedPaneGroup::Group {
axis,
children: self.get_pane_group_children(
workspace_id,
Some(group_id),
)?,
})
} else if let Some(pane_id) = pane_id {
Ok(SerializedPaneGroup::Pane(SerializedPane {
children: self.get_items(pane_id)?,
}))
} else {
bail!("Pane Group Child was neither a pane group or a pane");
}
})
.collect::<Result<_>>()
}
pub(crate) fn save_pane_group(
&self,
workspace_id: &WorkspaceId,
pane_group: &SerializedPaneGroup,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) {
bail!("Pane groups must have a SerializedPaneGroup::Group at the root")
}
let (parent_id, position) = unzip_option(parent);
match pane_group {
SerializedPaneGroup::Group { axis, children } => {
let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")?
((workspace_id, parent_id, position, *axis))?;
for (position, group) in children.iter().enumerate() {
self.save_pane_group(workspace_id, group, Some((parent_id, position)))?
}
Ok(())
}
SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent),
}
}
pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result<SerializedPane> {
let pane_id = self.select_row_bound(indoc! {"
SELECT pane_id FROM panes
WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?(
workspace_id,
)?
.context("No dock pane for workspace")?;
Ok(SerializedPane::new(
self.get_items(pane_id).context("Reading items")?,
))
}
pub(crate) fn save_pane(
&self,
workspace_id: &WorkspaceId,
pane: &SerializedPane,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
let (parent_id, order) = unzip_option(parent);
let pane_id = self.insert_bound(
"INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)",
)?((workspace_id, parent_id, order))?;
self.save_items(workspace_id, pane_id, &pane.children)
.context("Saving items")
}
}
#[cfg(test)]
mod tests {
use crate::{
model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace},
Db,
};
fn default_workspace(
dock_pane: SerializedPane,
center_group: &SerializedPaneGroup,
) -> SerializedWorkspace {
SerializedWorkspace {
dock_anchor: crate::model::DockAnchor::Right,
dock_visible: false,
center_group: center_group.clone(),
dock_pane,
}
}
#[test]
fn test_basic_dock_pane() {
env_logger::try_init().ok();
let db = Db::open_in_memory("basic_dock_pane");
let dock_pane = crate::model::SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 2 },
SerializedItem::Terminal { item_id: 3 },
],
};
let workspace = default_workspace(dock_pane, &Default::default());
db.save_workspace(&["/tmp"], None, &workspace);
let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
assert_eq!(workspace.dock_pane, new_workspace.dock_pane);
}
#[test]
fn test_simple_split() {
env_logger::try_init().ok();
let db = Db::open_in_memory("simple_split");
// -----------------
// | 1,2 | 5,6 |
// | - - - | |
// | 3,4 | |
// -----------------
let center_pane = SerializedPaneGroup::Group {
axis: crate::model::Axis::Horizontal,
children: vec![
SerializedPaneGroup::Group {
axis: crate::model::Axis::Vertical,
children: vec![
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 2 },
],
}),
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 3 },
],
}),
],
},
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 5 },
SerializedItem::Terminal { item_id: 6 },
],
}),
],
};
let workspace = default_workspace(Default::default(), &center_pane);
db.save_workspace(&["/tmp"], None, &workspace);
assert_eq!(workspace.center_group, center_pane);
}
}

View file

@ -63,7 +63,7 @@ use std::{
use thiserror::Error;
use util::{defer, post_inc, ResultExt, TryFutureExt as _};
pub use db::Db;
pub use db::{kvp::KeyValue, Db};
pub use fs::*;
pub use worktree::*;

View file

@ -0,0 +1,39 @@
use crate::connection::Connection;
pub trait Domain: Send + Sync + Clone {
fn migrate(conn: &Connection) -> anyhow::Result<()>;
}
impl<D1: Domain, D2: Domain> Domain for (D1, D2) {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
D1::migrate(conn)?;
D2::migrate(conn)
}
}
impl<D1: Domain, D2: Domain, D3: Domain> Domain for (D1, D2, D3) {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
D1::migrate(conn)?;
D2::migrate(conn)?;
D3::migrate(conn)
}
}
impl<D1: Domain, D2: Domain, D3: Domain, D4: Domain> Domain for (D1, D2, D3, D4) {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
D1::migrate(conn)?;
D2::migrate(conn)?;
D3::migrate(conn)?;
D4::migrate(conn)
}
}
impl<D1: Domain, D2: Domain, D3: Domain, D4: Domain, D5: Domain> Domain for (D1, D2, D3, D4, D5) {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
D1::migrate(conn)?;
D2::migrate(conn)?;
D3::migrate(conn)?;
D4::migrate(conn)?;
D5::migrate(conn)
}
}

View file

@ -1,5 +1,6 @@
pub mod bindable;
pub mod connection;
pub mod domain;
pub mod migrations;
pub mod savepoint;
pub mod statement;

View file

@ -1,5 +1,5 @@
use anyhow::Result;
use indoc::{formatdoc, indoc};
use indoc::formatdoc;
use crate::connection::Connection;

View file

@ -1,26 +1,26 @@
use std::{ops::Deref, sync::Arc};
use std::{marker::PhantomData, ops::Deref, sync::Arc};
use connection::Connection;
use thread_local::ThreadLocal;
use crate::{connection, migrations::Migration};
use crate::{connection, domain::Domain};
pub struct ThreadSafeConnection {
pub struct ThreadSafeConnection<D: Domain> {
uri: Arc<str>,
persistent: bool,
initialize_query: Option<&'static str>,
migrations: Option<&'static [Migration]>,
connection: Arc<ThreadLocal<Connection>>,
_pd: PhantomData<D>,
}
impl ThreadSafeConnection {
impl<D: Domain> ThreadSafeConnection<D> {
pub fn new(uri: &str, persistent: bool) -> Self {
Self {
uri: Arc::from(uri),
persistent,
initialize_query: None,
migrations: None,
connection: Default::default(),
_pd: PhantomData,
}
}
@ -31,13 +31,6 @@ impl ThreadSafeConnection {
self
}
/// Migrations have to be run per connection because we fallback to memory
/// so this needs
pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self {
self.migrations = Some(migrations);
self
}
/// Opens a new db connection with the initialized file path. This is internal and only
/// called from the deref function.
/// If opening fails, the connection falls back to a shared memory connection
@ -50,21 +43,33 @@ impl ThreadSafeConnection {
fn open_shared_memory(&self) -> Connection {
Connection::open_memory(self.uri.as_ref())
}
// Open a new connection for the given domain, leaving this
// connection intact.
pub fn for_domain<D2: Domain>(&self) -> ThreadSafeConnection<D2> {
ThreadSafeConnection {
uri: self.uri.clone(),
persistent: self.persistent,
initialize_query: self.initialize_query,
connection: Default::default(),
_pd: PhantomData,
}
}
}
impl Clone for ThreadSafeConnection {
impl<D: Domain> Clone for ThreadSafeConnection<D> {
fn clone(&self) -> Self {
Self {
uri: self.uri.clone(),
persistent: self.persistent,
initialize_query: self.initialize_query.clone(),
migrations: self.migrations.clone(),
connection: self.connection.clone(),
_pd: PhantomData,
}
}
}
impl Deref for ThreadSafeConnection {
impl<D: Domain> Deref for ThreadSafeConnection<D> {
type Target = Connection;
fn deref(&self) -> &Self::Target {
@ -83,13 +88,7 @@ impl Deref for ThreadSafeConnection {
.unwrap();
}
if let Some(migrations) = self.migrations {
for migration in migrations {
migration
.run(&connection)
.expect(&format!("Migrations failed to execute: {:?}", migration));
}
}
D::migrate(&connection).expect("Migrations failed");
connection
})

View file

@ -1925,7 +1925,7 @@ mod tests {
let project = Project::test(fs, None, cx).await;
let (_, workspace) =
cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx));
cx.add_window(|cx| Workspace::new(None, project, |_, _| unimplemented!(), cx));
let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone());
add_labled_item(&workspace, &pane, "A", cx);

View file

@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result};
use call::ActiveCall;
use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
use collections::{hash_map, HashMap, HashSet};
use db::{model::SerializedWorkspace, Db};
use db::{kvp::KeyValue, model::SerializedWorkspace, Db};
use dock::{DefaultItemFactory, Dock, ToggleDockButton};
use drag_and_drop::DragAndDrop;
use fs::{self, Fs};
@ -1288,7 +1288,8 @@ impl Workspace {
// Use the resolved worktree roots to get the serialized_db from the database
let serialized_workspace = cx.read(|cx| {
cx.global::<Db>()
cx.global::<Db<KeyValue>>()
.open_as::<db::Workspace>()
.workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..])
});

View file

@ -57,7 +57,7 @@ fn main() {
init_panic_hook(app_version, http.clone(), app.background());
let db = app.background().spawn(async move {
project::Db::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str())
project::Db::<project::KeyValue>::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str())
});
load_embedded_fonts(&app);
@ -150,7 +150,7 @@ fn main() {
let db = cx.background().block(db);
cx.set_global(db);
client.start_telemetry(cx.global::<Db>().clone());
client.start_telemetry(cx.global::<Db<project::KeyValue>>().clone());
client.report_event("start app", Default::default());
let app_state = Arc::new(AppState {
@ -165,7 +165,7 @@ fn main() {
default_item_factory,
});
auto_update::init(
cx.global::<Db>().clone(),
cx.global::<Db<project::KeyValue>>().clone(),
http,
client::ZED_SERVER_URL.clone(),
cx,