CHANGE LOCK TO NOT BE DROPPED INSTANTLY. DANG U RUST

co-authored-by: kay@zed.dev
This commit is contained in:
Mikayla Maki 2022-12-02 12:43:02 -08:00
parent 5e240f98f0
commit 5262e8c77e
6 changed files with 39 additions and 29 deletions

View file

@ -27,5 +27,5 @@ smol = "1.2"
[dev-dependencies]
gpui = { path = "../gpui", features = ["test-support"] }
env_logger = "0.9.1"
tempdir = { version = "0.3.7" }
env_logger = "0.9.1"

View file

@ -40,7 +40,7 @@ lazy_static::lazy_static! {
static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(());
static ref DB_WIPED: RwLock<bool> = RwLock::new(false);
pub static ref BACKUP_DB_PATH: RwLock<Option<PathBuf>> = RwLock::new(None);
pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false);
pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false);
}
/// Open or create a database at the given directory path.
@ -58,7 +58,6 @@ pub async fn open_db<M: Migrator + 'static>(wipe_db: bool, db_dir: &Path, releas
let mut db_wiped = DB_WIPED.write();
if !*db_wiped {
remove_dir_all(&main_db_dir).ok();
*db_wiped = true;
}
}
@ -71,7 +70,7 @@ pub async fn open_db<M: Migrator + 'static>(wipe_db: bool, db_dir: &Path, releas
// cause errors in the log and so should be observed by developers while writing
// soon-to-be good migrations. If user databases are corrupted, we toss them out
// and try again from a blank. As long as running all migrations from start to end
// is ok, this race condition will never be triggered.
// on a blank database is ok, this race condition will never be triggered.
//
// Basically: Don't ever push invalid migrations to stable or everyone will have
// a bad time.
@ -137,7 +136,7 @@ pub async fn open_db<M: Migrator + 'static>(wipe_db: bool, db_dir: &Path, releas
}
async fn open_main_db<M: Migrator>(db_path: &PathBuf) -> Option<ThreadSafeConnection<M>> {
println!("Opening main db");
log::info!("Opening main db");
ThreadSafeConnection::<M>::builder(db_path.to_string_lossy().as_ref(), true)
.with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
@ -147,7 +146,7 @@ async fn open_main_db<M: Migrator>(db_path: &PathBuf) -> Option<ThreadSafeConnec
}
async fn open_fallback_db<M: Migrator>() -> ThreadSafeConnection<M> {
println!("Opening fallback db");
log::info!("Opening fallback db");
ThreadSafeConnection::<M>::builder(FALLBACK_DB_NAME, false)
.with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)

View file

@ -13,4 +13,4 @@ smol = "1.2"
thread_local = "1.1.4"
lazy_static = "1.4"
parking_lot = "0.11.1"
futures = "0.3"
futures = "0.3"

View file

@ -266,12 +266,10 @@ pub fn background_thread_queue() -> WriteQueueConstructor {
pub fn locking_queue() -> WriteQueueConstructor {
Box::new(|| {
let mutex = Mutex::new(());
let write_mutex = Mutex::new(());
Box::new(move |queued_write| {
eprintln!("Write started");
let _ = mutex.lock();
let _lock = write_mutex.lock();
queued_write();
eprintln!("Write finished");
})
})
}

View file

@ -10,9 +10,37 @@ lazy_static::lazy_static! {
#[proc_macro]
pub fn sql(tokens: TokenStream) -> TokenStream {
let (spans, sql) = make_sql(tokens);
let error = SQLITE.sql_has_syntax_error(sql.trim());
let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default());
if let Some((error, error_offset)) = error {
create_error(spans, error_offset, error, &formatted_sql)
} else {
format!("r#\"{}\"#", &formatted_sql).parse().unwrap()
}
}
fn create_error(
spans: Vec<(usize, Span)>,
error_offset: usize,
error: String,
formatted_sql: &String,
) -> TokenStream {
let error_span = spans
.into_iter()
.skip_while(|(offset, _)| offset <= &error_offset)
.map(|(_, span)| span)
.next()
.unwrap_or(Span::call_site());
let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql);
TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error())
}
fn make_sql(tokens: TokenStream) -> (Vec<(usize, Span)>, String) {
let mut sql_tokens = vec![];
flatten_stream(tokens.clone(), &mut sql_tokens);
// Lookup of spans by offset at the end of the token
let mut spans: Vec<(usize, Span)> = Vec::new();
let mut sql = String::new();
@ -20,23 +48,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream {
sql.push_str(&token_text);
spans.push((sql.len(), span));
}
let error = SQLITE.sql_has_syntax_error(sql.trim());
let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default());
if let Some((error, error_offset)) = error {
let error_span = spans
.into_iter()
.skip_while(|(offset, _)| offset <= &error_offset)
.map(|(_, span)| span)
.next()
.unwrap_or(Span::call_site());
let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql);
TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error())
} else {
format!("r#\"{}\"#", &formatted_sql).parse().unwrap()
}
(spans, sql)
}
/// This method exists to normalize the representation of groups

View file

@ -54,3 +54,4 @@ gpui = { path = "../gpui", features = ["test-support"] }
project = { path = "../project", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }
fs = { path = "../fs", features = ["test-support"] }
db = { path = "../db", features = ["test-support"] }