mirror of
https://github.com/zed-industries/zed.git
synced 2025-02-03 00:39:51 +00:00
Fix management of buffers' dirty state
* Don't consider files deleted if they just haven't been scanned yet. * Consider a buffer dirty if its file has been deleted. Co-Authored-By: Nathan Sobo <nathan@zed.dev>
This commit is contained in:
parent
01b20d524d
commit
ff5561d402
3 changed files with 214 additions and 83 deletions
|
@ -501,7 +501,7 @@ impl Buffer {
|
|||
}
|
||||
|
||||
pub fn is_dirty(&self) -> bool {
|
||||
self.version > self.saved_version
|
||||
self.version > self.saved_version || self.file.as_ref().map_or(false, |f| f.is_deleted())
|
||||
}
|
||||
|
||||
pub fn version(&self) -> time::Global {
|
||||
|
@ -2376,11 +2376,15 @@ impl ToPoint for usize {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{test::temp_tree, worktree::Worktree};
|
||||
use cmp::Ordering;
|
||||
use gpui::App;
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
use serde_json::json;
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::BTreeMap,
|
||||
fs,
|
||||
rc::Rc,
|
||||
sync::atomic::{self, AtomicUsize},
|
||||
};
|
||||
|
||||
|
@ -2979,13 +2983,24 @@ mod tests {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_modified() {
|
||||
App::test((), |app| {
|
||||
let model = app.add_model(|ctx| Buffer::new(0, "abc", ctx));
|
||||
fn test_is_dirty() {
|
||||
use crate::worktree::WorktreeHandle;
|
||||
|
||||
App::test_async((), |mut app| async move {
|
||||
let dir = temp_tree(json!({
|
||||
"file1": "",
|
||||
}));
|
||||
let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
|
||||
let file = app.read(|ctx| tree.file("file1", ctx));
|
||||
let model = app.add_model(|ctx| {
|
||||
Buffer::from_history(0, History::new("abc".into()), Some(file), ctx)
|
||||
});
|
||||
let events = Rc::new(RefCell::new(Vec::new()));
|
||||
|
||||
// initially, the buffer isn't dirty.
|
||||
model.update(app, |buffer, ctx| {
|
||||
model.update(&mut app, |buffer, ctx| {
|
||||
ctx.subscribe(&model, {
|
||||
let events = events.clone();
|
||||
move |_, event, _| events.borrow_mut().push(event.clone())
|
||||
|
@ -2998,7 +3013,7 @@ mod tests {
|
|||
});
|
||||
|
||||
// after the first edit, the buffer is dirty, and emits a dirtied event.
|
||||
model.update(app, |buffer, ctx| {
|
||||
model.update(&mut app, |buffer, ctx| {
|
||||
assert!(buffer.text() == "ac");
|
||||
assert!(buffer.is_dirty());
|
||||
assert_eq!(*events.borrow(), &[Event::Edited, Event::Dirtied]);
|
||||
|
@ -3008,7 +3023,7 @@ mod tests {
|
|||
});
|
||||
|
||||
// after saving, the buffer is not dirty, and emits a saved event.
|
||||
model.update(app, |buffer, ctx| {
|
||||
model.update(&mut app, |buffer, ctx| {
|
||||
assert!(!buffer.is_dirty());
|
||||
assert_eq!(*events.borrow(), &[Event::Saved]);
|
||||
events.borrow_mut().clear();
|
||||
|
@ -3018,7 +3033,7 @@ mod tests {
|
|||
});
|
||||
|
||||
// after editing again, the buffer is dirty, and emits another dirty event.
|
||||
model.update(app, |buffer, ctx| {
|
||||
model.update(&mut app, |buffer, ctx| {
|
||||
assert!(buffer.text() == "aBDc");
|
||||
assert!(buffer.is_dirty());
|
||||
assert_eq!(
|
||||
|
@ -3034,8 +3049,22 @@ mod tests {
|
|||
assert!(buffer.is_dirty());
|
||||
});
|
||||
|
||||
model.update(app, |_, _| {
|
||||
assert_eq!(*events.borrow(), &[Event::Edited]);
|
||||
assert_eq!(*events.borrow(), &[Event::Edited]);
|
||||
|
||||
// When the file is deleted, the buffer is considered dirty.
|
||||
model.update(&mut app, |buffer, ctx| {
|
||||
buffer.did_save(buffer.version(), None, ctx);
|
||||
assert!(!buffer.is_dirty());
|
||||
});
|
||||
events.borrow_mut().clear();
|
||||
|
||||
tree.flush_fs_events(&app).await;
|
||||
fs::remove_file(dir.path().join("file1")).unwrap();
|
||||
app.read(|ctx| tree.read(ctx).next_scan_complete()).await;
|
||||
|
||||
model.update(&mut app, |buffer, _| {
|
||||
assert_eq!(*events.borrow(), &[Event::FileHandleChanged]);
|
||||
assert!(buffer.is_dirty());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -344,17 +344,17 @@ impl Workspace {
|
|||
|
||||
pub fn open_paths(
|
||||
&mut self,
|
||||
paths: &[PathBuf],
|
||||
abs_paths: &[PathBuf],
|
||||
ctx: &mut ViewContext<Self>,
|
||||
) -> impl Future<Output = ()> {
|
||||
let entries = paths
|
||||
let entries = abs_paths
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|path| self.file_for_path(&path, ctx))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bg = ctx.background_executor().clone();
|
||||
let tasks = paths
|
||||
let tasks = abs_paths
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(entries.into_iter())
|
||||
|
@ -489,11 +489,6 @@ impl Workspace {
|
|||
};
|
||||
|
||||
let file = worktree.file(path.clone(), ctx.as_ref());
|
||||
if file.is_deleted() {
|
||||
log::error!("path {:?} does not exist", path);
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Entry::Vacant(entry) = self.loading_items.entry(entry.clone()) {
|
||||
let (mut tx, rx) = postage::watch::channel();
|
||||
entry.insert(rx);
|
||||
|
@ -935,14 +930,16 @@ mod tests {
|
|||
})
|
||||
.await;
|
||||
app.read(|ctx| {
|
||||
workspace
|
||||
.read(ctx)
|
||||
.active_pane()
|
||||
.read(ctx)
|
||||
.active_item()
|
||||
.unwrap()
|
||||
.title(ctx)
|
||||
== "a.txt"
|
||||
assert_eq!(
|
||||
workspace
|
||||
.read(ctx)
|
||||
.active_pane()
|
||||
.read(ctx)
|
||||
.active_item()
|
||||
.unwrap()
|
||||
.title(ctx),
|
||||
"a.txt"
|
||||
);
|
||||
});
|
||||
|
||||
// Open a file outside of any existing worktree.
|
||||
|
@ -952,7 +949,7 @@ mod tests {
|
|||
})
|
||||
})
|
||||
.await;
|
||||
app.update(|ctx| {
|
||||
app.read(|ctx| {
|
||||
let worktree_roots = workspace
|
||||
.read(ctx)
|
||||
.worktrees()
|
||||
|
@ -965,16 +962,16 @@ mod tests {
|
|||
.into_iter()
|
||||
.collect(),
|
||||
);
|
||||
});
|
||||
app.read(|ctx| {
|
||||
workspace
|
||||
.read(ctx)
|
||||
.active_pane()
|
||||
.read(ctx)
|
||||
.active_item()
|
||||
.unwrap()
|
||||
.title(ctx)
|
||||
== "b.txt"
|
||||
assert_eq!(
|
||||
workspace
|
||||
.read(ctx)
|
||||
.active_pane()
|
||||
.read(ctx)
|
||||
.active_item()
|
||||
.unwrap()
|
||||
.title(ctx),
|
||||
"b.txt"
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -273,6 +273,25 @@ impl Snapshot {
|
|||
&self.root_name
|
||||
}
|
||||
|
||||
fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
|
||||
if self.entries.is_empty() {
|
||||
return true;
|
||||
}
|
||||
let path = path.as_ref();
|
||||
let mut cursor = self.entries.cursor::<_, ()>();
|
||||
if cursor.seek(&PathSearch::Exact(path), SeekBias::Left, &()) {
|
||||
let entry = cursor.item().unwrap();
|
||||
if entry.path.as_ref() == path {
|
||||
return matches!(entry.kind, EntryKind::PendingDir);
|
||||
}
|
||||
}
|
||||
if let Some(entry) = cursor.prev_item() {
|
||||
matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
|
||||
let mut cursor = self.entries.cursor::<_, ()>();
|
||||
if cursor.seek(&PathSearch::Exact(path.as_ref()), SeekBias::Left, &()) {
|
||||
|
@ -766,6 +785,7 @@ impl BackgroundScanner {
|
|||
});
|
||||
}
|
||||
|
||||
self.mark_deleted_file_handles();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -977,19 +997,7 @@ impl BackgroundScanner {
|
|||
});
|
||||
|
||||
self.update_ignore_statuses();
|
||||
|
||||
let mut handles = self.handles.lock();
|
||||
let snapshot = self.snapshot.lock();
|
||||
handles.retain(|path, handle_state| {
|
||||
if let Some(handle_state) = Weak::upgrade(&handle_state) {
|
||||
let mut handle_state = handle_state.lock();
|
||||
handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
self.mark_deleted_file_handles();
|
||||
true
|
||||
}
|
||||
|
||||
|
@ -1079,6 +1087,20 @@ impl BackgroundScanner {
|
|||
self.snapshot.lock().entries.edit(edits, &());
|
||||
}
|
||||
|
||||
fn mark_deleted_file_handles(&self) {
|
||||
let mut handles = self.handles.lock();
|
||||
let snapshot = self.snapshot.lock();
|
||||
handles.retain(|path, handle_state| {
|
||||
if let Some(handle_state) = Weak::upgrade(&handle_state) {
|
||||
let mut handle_state = handle_state.lock();
|
||||
handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
|
||||
let metadata = match fs::metadata(&abs_path) {
|
||||
Err(err) => {
|
||||
|
@ -1137,6 +1159,12 @@ struct UpdateIgnoreStatusJob {
|
|||
|
||||
pub trait WorktreeHandle {
|
||||
fn file(&self, path: impl AsRef<Path>, app: &AppContext) -> FileHandle;
|
||||
|
||||
#[cfg(test)]
|
||||
fn flush_fs_events<'a>(
|
||||
&self,
|
||||
app: &'a gpui::TestAppContext,
|
||||
) -> futures_core::future::LocalBoxFuture<'a, ()>;
|
||||
}
|
||||
|
||||
impl WorktreeHandle for ModelHandle<Worktree> {
|
||||
|
@ -1155,7 +1183,7 @@ impl WorktreeHandle for ModelHandle<Worktree> {
|
|||
} else {
|
||||
FileHandleState {
|
||||
path: path.into(),
|
||||
is_deleted: true,
|
||||
is_deleted: !tree.path_is_pending(path),
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1169,6 +1197,40 @@ impl WorktreeHandle for ModelHandle<Worktree> {
|
|||
state,
|
||||
}
|
||||
}
|
||||
|
||||
// When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
|
||||
// occurred before the worktree was constructed. These events can cause the worktree to perfrom
|
||||
// extra directory scans, and emit extra scan-state notifications.
|
||||
//
|
||||
// This function mutates the worktree's directory and waits for those mutations to be picked up,
|
||||
// to ensure that all redundant FS events have already been processed.
|
||||
#[cfg(test)]
|
||||
fn flush_fs_events<'a>(
|
||||
&self,
|
||||
app: &'a gpui::TestAppContext,
|
||||
) -> futures_core::future::LocalBoxFuture<'a, ()> {
|
||||
use smol::future::FutureExt;
|
||||
|
||||
let filename = "fs-event-sentinel";
|
||||
let root_path = app.read(|ctx| self.read(ctx).abs_path.clone());
|
||||
let tree = self.clone();
|
||||
async move {
|
||||
fs::write(root_path.join(filename), "").unwrap();
|
||||
tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
|
||||
tree.entry_for_path(filename).is_some()
|
||||
})
|
||||
.await;
|
||||
|
||||
fs::remove_file(root_path.join(filename)).unwrap();
|
||||
tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
|
||||
tree.entry_for_path(filename).is_none()
|
||||
})
|
||||
.await;
|
||||
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
}
|
||||
.boxed_local()
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FileIter<'a> {
|
||||
|
@ -1282,7 +1344,7 @@ mod tests {
|
|||
use crate::editor::Buffer;
|
||||
use crate::test::*;
|
||||
use anyhow::Result;
|
||||
use gpui::{App, TestAppContext};
|
||||
use gpui::App;
|
||||
use rand::prelude::*;
|
||||
use serde_json::json;
|
||||
use std::env;
|
||||
|
@ -1402,18 +1464,33 @@ mod tests {
|
|||
}));
|
||||
|
||||
let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
flush_fs_events(&tree, &app).await;
|
||||
|
||||
let (file2, file3, file4, file5) = app.read(|ctx| {
|
||||
let (file2, file3, file4, file5, non_existent_file) = app.read(|ctx| {
|
||||
(
|
||||
tree.file("a/file2", ctx),
|
||||
tree.file("a/file3", ctx),
|
||||
tree.file("b/c/file4", ctx),
|
||||
tree.file("b/c/file5", ctx),
|
||||
tree.file("a/filex", ctx),
|
||||
)
|
||||
});
|
||||
|
||||
// The worktree hasn't scanned the directories containing these paths,
|
||||
// so it can't determine that the paths are deleted.
|
||||
assert!(!file2.is_deleted());
|
||||
assert!(!file3.is_deleted());
|
||||
assert!(!file4.is_deleted());
|
||||
assert!(!file5.is_deleted());
|
||||
assert!(!non_existent_file.is_deleted());
|
||||
|
||||
// After scanning, the worktree knows which files exist and which don't.
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
assert!(!file2.is_deleted());
|
||||
assert!(!file3.is_deleted());
|
||||
assert!(!file4.is_deleted());
|
||||
assert!(!file5.is_deleted());
|
||||
assert!(non_existent_file.is_deleted());
|
||||
|
||||
tree.flush_fs_events(&app).await;
|
||||
std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
|
||||
std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
|
||||
std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
|
||||
|
@ -1469,7 +1546,7 @@ mod tests {
|
|||
|
||||
let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
flush_fs_events(&tree, &app).await;
|
||||
tree.flush_fs_events(&app).await;
|
||||
app.read(|ctx| {
|
||||
let tree = tree.read(ctx);
|
||||
let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
|
||||
|
@ -1493,6 +1570,59 @@ mod tests {
|
|||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_is_pending() {
|
||||
let mut snapshot = Snapshot {
|
||||
id: 0,
|
||||
scan_id: 0,
|
||||
abs_path: Path::new("").into(),
|
||||
entries: Default::default(),
|
||||
ignores: Default::default(),
|
||||
root_name: Default::default(),
|
||||
};
|
||||
|
||||
snapshot.entries.edit(
|
||||
vec![
|
||||
Edit::Insert(Entry {
|
||||
path: Path::new("b").into(),
|
||||
kind: EntryKind::Dir,
|
||||
inode: 0,
|
||||
is_ignored: false,
|
||||
is_symlink: false,
|
||||
}),
|
||||
Edit::Insert(Entry {
|
||||
path: Path::new("b/a").into(),
|
||||
kind: EntryKind::Dir,
|
||||
inode: 0,
|
||||
is_ignored: false,
|
||||
is_symlink: false,
|
||||
}),
|
||||
Edit::Insert(Entry {
|
||||
path: Path::new("b/c").into(),
|
||||
kind: EntryKind::PendingDir,
|
||||
inode: 0,
|
||||
is_ignored: false,
|
||||
is_symlink: false,
|
||||
}),
|
||||
Edit::Insert(Entry {
|
||||
path: Path::new("b/e").into(),
|
||||
kind: EntryKind::Dir,
|
||||
inode: 0,
|
||||
is_ignored: false,
|
||||
is_symlink: false,
|
||||
}),
|
||||
],
|
||||
&(),
|
||||
);
|
||||
|
||||
assert!(!snapshot.path_is_pending("b/a"));
|
||||
assert!(!snapshot.path_is_pending("b/b"));
|
||||
assert!(snapshot.path_is_pending("b/c"));
|
||||
assert!(snapshot.path_is_pending("b/c/x"));
|
||||
assert!(!snapshot.path_is_pending("b/d"));
|
||||
assert!(!snapshot.path_is_pending("b/e"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mounted_volume_paths() {
|
||||
let paths = mounted_volume_paths();
|
||||
|
@ -1776,29 +1906,4 @@ mod tests {
|
|||
paths
|
||||
}
|
||||
}
|
||||
|
||||
// When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
|
||||
// occurred before the worktree was constructed. These events can cause the worktree to perfrom
|
||||
// extra directory scans, and emit extra scan-state notifications.
|
||||
//
|
||||
// This function mutates the worktree's directory and waits for those mutations to be picked up,
|
||||
// to ensure that all redundant FS events have already been processed.
|
||||
async fn flush_fs_events(tree: &ModelHandle<Worktree>, app: &TestAppContext) {
|
||||
let filename = "fs-event-sentinel";
|
||||
let root_path = app.read(|ctx| tree.read(ctx).abs_path.clone());
|
||||
|
||||
fs::write(root_path.join(filename), "").unwrap();
|
||||
tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
|
||||
tree.entry_for_path(filename).is_some()
|
||||
})
|
||||
.await;
|
||||
|
||||
fs::remove_file(root_path.join(filename)).unwrap();
|
||||
tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
|
||||
tree.entry_for_path(filename).is_none()
|
||||
})
|
||||
.await;
|
||||
|
||||
app.read(|ctx| tree.read(ctx).scan_complete()).await;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue