mirror of
https://github.com/zed-industries/zed.git
synced 2024-12-26 10:40:54 +00:00
Merge branch 'main' into window_context_2
This commit is contained in:
commit
136e599051
12 changed files with 638 additions and 172 deletions
9
.github/pull_request_template.md
vendored
9
.github/pull_request_template.md
vendored
|
@ -1,9 +0,0 @@
|
|||
## Description of feature or change
|
||||
|
||||
## Link to related issues from zed or community
|
||||
|
||||
## Before Merging
|
||||
|
||||
- [ ] Does this have tests or have existing tests been updated to cover this change?
|
||||
- [ ] Have you added the necessary settings to configure this feature?
|
||||
- [ ] Has documentation been created or updated (including above changes to settings)?
|
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
|
@ -54,7 +54,7 @@ jobs:
|
|||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
node-version: '18'
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
|
@ -102,7 +102,7 @@ jobs:
|
|||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
node-version: '18'
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
|
|
43
.github/workflows/randomized_tests.yml
vendored
Normal file
43
.github/workflows/randomized_tests.yml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
name: Randomized Tests
|
||||
|
||||
concurrency: randomized-tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- randomized-tests-runner
|
||||
schedule:
|
||||
- cron: '0 * * * *'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUST_BACKTRACE: 1
|
||||
ZED_SERVER_URL: https://zed.dev
|
||||
ZED_CLIENT_SECRET_TOKEN: ${{ secrets.ZED_CLIENT_SECRET_TOKEN }}
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Run randomized tests
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- randomized-tests
|
||||
steps:
|
||||
- name: Install Rust
|
||||
run: |
|
||||
rustup set profile minimal
|
||||
rustup update stable
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
clean: false
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Run randomized tests
|
||||
run: script/randomized-test-ci
|
|
@ -459,9 +459,7 @@ impl CollabTitlebarItem {
|
|||
.with_child(
|
||||
MouseEventHandler::<ShareUnshare, Self>::new(0, cx, |state, _| {
|
||||
//TODO: Ensure this button has consistant width for both text variations
|
||||
let style = titlebar
|
||||
.share_button
|
||||
.style_for(state, self.contacts_popover.is_some());
|
||||
let style = titlebar.share_button.style_for(state, false);
|
||||
Label::new(label, style.text.clone())
|
||||
.contained()
|
||||
.with_style(style.container)
|
||||
|
@ -710,11 +708,9 @@ impl CollabTitlebarItem {
|
|||
}
|
||||
})?;
|
||||
|
||||
let location = remote_participant.map(|p| p.location);
|
||||
|
||||
Some(Self::render_face(
|
||||
avatar.clone(),
|
||||
Self::location_style(workspace, location, follower_style, cx),
|
||||
follower_style,
|
||||
background_color,
|
||||
))
|
||||
}))
|
||||
|
|
|
@ -2352,53 +2352,66 @@ impl Editor {
|
|||
let id = post_inc(&mut self.next_completion_id);
|
||||
let task = cx.spawn_weak(|this, mut cx| {
|
||||
async move {
|
||||
let completions = completions.await?;
|
||||
if completions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut menu = CompletionsMenu {
|
||||
id,
|
||||
initial_position: position,
|
||||
match_candidates: completions
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(id, completion)| {
|
||||
StringMatchCandidate::new(
|
||||
id,
|
||||
completion.label.text[completion.label.filter_range.clone()].into(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
buffer,
|
||||
completions: completions.into(),
|
||||
matches: Vec::new().into(),
|
||||
selected_item: 0,
|
||||
list: Default::default(),
|
||||
let menu = if let Some(completions) = completions.await.log_err() {
|
||||
let mut menu = CompletionsMenu {
|
||||
id,
|
||||
initial_position: position,
|
||||
match_candidates: completions
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(id, completion)| {
|
||||
StringMatchCandidate::new(
|
||||
id,
|
||||
completion.label.text[completion.label.filter_range.clone()]
|
||||
.into(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
buffer,
|
||||
completions: completions.into(),
|
||||
matches: Vec::new().into(),
|
||||
selected_item: 0,
|
||||
list: Default::default(),
|
||||
};
|
||||
menu.filter(query.as_deref(), cx.background()).await;
|
||||
if menu.matches.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(menu)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
menu.filter(query.as_deref(), cx.background()).await;
|
||||
let this = this
|
||||
.upgrade(&cx)
|
||||
.ok_or_else(|| anyhow!("editor was dropped"))?;
|
||||
this.update(&mut cx, |this, cx| {
|
||||
this.completion_tasks.retain(|(task_id, _)| *task_id > id);
|
||||
|
||||
if let Some(this) = this.upgrade(&cx) {
|
||||
this.update(&mut cx, |this, cx| {
|
||||
match this.context_menu.as_ref() {
|
||||
None => {}
|
||||
Some(ContextMenu::Completions(prev_menu)) => {
|
||||
if prev_menu.id > menu.id {
|
||||
return;
|
||||
}
|
||||
match this.context_menu.as_ref() {
|
||||
None => {}
|
||||
Some(ContextMenu::Completions(prev_menu)) => {
|
||||
if prev_menu.id > id {
|
||||
return;
|
||||
}
|
||||
_ => return,
|
||||
}
|
||||
_ => return,
|
||||
}
|
||||
|
||||
this.completion_tasks.retain(|(id, _)| *id > menu.id);
|
||||
if this.focused && !menu.matches.is_empty() {
|
||||
this.show_context_menu(ContextMenu::Completions(menu), cx);
|
||||
} else if this.hide_context_menu(cx).is_none() {
|
||||
if this.focused && menu.is_some() {
|
||||
let menu = menu.unwrap();
|
||||
this.show_context_menu(ContextMenu::Completions(menu), cx);
|
||||
} else if this.completion_tasks.is_empty() {
|
||||
// If there are no more completion tasks and the last menu was
|
||||
// empty, we should hide it. If it was already hidden, we should
|
||||
// also show the copilot suggestion when available.
|
||||
if this.hide_context_menu(cx).is_none() {
|
||||
this.update_visible_copilot_suggestion(cx);
|
||||
}
|
||||
})?;
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok::<_, anyhow::Error>(())
|
||||
}
|
||||
.log_err()
|
||||
|
|
|
@ -5932,13 +5932,12 @@ async fn test_copilot(deterministic: Arc<Deterministic>, cx: &mut gpui::TestAppC
|
|||
)
|
||||
.await;
|
||||
|
||||
// When inserting, ensure autocompletion is favored over Copilot suggestions.
|
||||
cx.set_state(indoc! {"
|
||||
oneˇ
|
||||
two
|
||||
three
|
||||
"});
|
||||
|
||||
// When inserting, ensure autocompletion is favored over Copilot suggestions.
|
||||
cx.simulate_keystroke(".");
|
||||
let _ = handle_completion_request(
|
||||
&mut cx,
|
||||
|
@ -5952,8 +5951,8 @@ async fn test_copilot(deterministic: Arc<Deterministic>, cx: &mut gpui::TestAppC
|
|||
handle_copilot_completion_request(
|
||||
&copilot_lsp,
|
||||
vec![copilot::request::Completion {
|
||||
text: "copilot1".into(),
|
||||
range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 5)),
|
||||
text: "one.copilot1".into(),
|
||||
range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 4)),
|
||||
..Default::default()
|
||||
}],
|
||||
vec![],
|
||||
|
@ -5975,13 +5974,45 @@ async fn test_copilot(deterministic: Arc<Deterministic>, cx: &mut gpui::TestAppC
|
|||
assert_eq!(editor.display_text(cx), "one.completion_a\ntwo\nthree\n");
|
||||
});
|
||||
|
||||
// Ensure Copilot suggestions are shown right away if no autocompletion is available.
|
||||
cx.set_state(indoc! {"
|
||||
oneˇ
|
||||
two
|
||||
three
|
||||
"});
|
||||
cx.simulate_keystroke(".");
|
||||
let _ = handle_completion_request(
|
||||
&mut cx,
|
||||
indoc! {"
|
||||
one.|<>
|
||||
two
|
||||
three
|
||||
"},
|
||||
vec![],
|
||||
);
|
||||
handle_copilot_completion_request(
|
||||
&copilot_lsp,
|
||||
vec![copilot::request::Completion {
|
||||
text: "one.copilot1".into(),
|
||||
range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 4)),
|
||||
..Default::default()
|
||||
}],
|
||||
vec![],
|
||||
);
|
||||
deterministic.advance_clock(COPILOT_DEBOUNCE_TIMEOUT);
|
||||
cx.update_editor(|editor, cx| {
|
||||
assert!(!editor.context_menu_visible());
|
||||
assert!(editor.has_active_copilot_suggestion(cx));
|
||||
assert_eq!(editor.display_text(cx), "one.copilot1\ntwo\nthree\n");
|
||||
assert_eq!(editor.text(cx), "one.\ntwo\nthree\n");
|
||||
});
|
||||
|
||||
// When inserting, ensure autocompletion is favored over Copilot suggestions.
|
||||
// Reset editor, and ensure autocompletion is still favored over Copilot suggestions.
|
||||
cx.set_state(indoc! {"
|
||||
oneˇ
|
||||
two
|
||||
three
|
||||
"});
|
||||
cx.simulate_keystroke(".");
|
||||
let _ = handle_completion_request(
|
||||
&mut cx,
|
||||
|
|
|
@ -523,31 +523,7 @@ impl FakeFs {
|
|||
}
|
||||
|
||||
pub async fn insert_file(&self, path: impl AsRef<Path>, content: String) {
|
||||
let mut state = self.state.lock();
|
||||
let path = path.as_ref();
|
||||
let inode = state.next_inode;
|
||||
let mtime = state.next_mtime;
|
||||
state.next_inode += 1;
|
||||
state.next_mtime += Duration::from_nanos(1);
|
||||
let file = Arc::new(Mutex::new(FakeFsEntry::File {
|
||||
inode,
|
||||
mtime,
|
||||
content,
|
||||
}));
|
||||
state
|
||||
.write_path(path, move |entry| {
|
||||
match entry {
|
||||
btree_map::Entry::Vacant(e) => {
|
||||
e.insert(file);
|
||||
}
|
||||
btree_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = file;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
state.emit_event(&[path]);
|
||||
self.write_file_internal(path, content).unwrap()
|
||||
}
|
||||
|
||||
pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
|
||||
|
@ -569,6 +545,33 @@ impl FakeFs {
|
|||
state.emit_event(&[path]);
|
||||
}
|
||||
|
||||
fn write_file_internal(&self, path: impl AsRef<Path>, content: String) -> Result<()> {
|
||||
let mut state = self.state.lock();
|
||||
let path = path.as_ref();
|
||||
let inode = state.next_inode;
|
||||
let mtime = state.next_mtime;
|
||||
state.next_inode += 1;
|
||||
state.next_mtime += Duration::from_nanos(1);
|
||||
let file = Arc::new(Mutex::new(FakeFsEntry::File {
|
||||
inode,
|
||||
mtime,
|
||||
content,
|
||||
}));
|
||||
state.write_path(path, move |entry| {
|
||||
match entry {
|
||||
btree_map::Entry::Vacant(e) => {
|
||||
e.insert(file);
|
||||
}
|
||||
btree_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = file;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
state.emit_event(&[path]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pause_events(&self) {
|
||||
self.state.lock().events_paused = true;
|
||||
}
|
||||
|
@ -952,7 +955,7 @@ impl Fs for FakeFs {
|
|||
async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
|
||||
self.simulate_random_delay().await;
|
||||
let path = normalize_path(path.as_path());
|
||||
self.insert_file(path, data.to_string()).await;
|
||||
self.write_file_internal(path, data.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -961,7 +964,7 @@ impl Fs for FakeFs {
|
|||
self.simulate_random_delay().await;
|
||||
let path = normalize_path(path);
|
||||
let content = chunks(text, line_ending).collect();
|
||||
self.insert_file(path, content).await;
|
||||
self.write_file_internal(path, content)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ pub fn run_test(
|
|||
let seed = atomic_seed.load(SeqCst);
|
||||
|
||||
if is_randomized {
|
||||
dbg!(seed);
|
||||
eprintln!("seed = {seed}");
|
||||
}
|
||||
|
||||
let deterministic = executor::Deterministic::new(seed);
|
||||
|
|
|
@ -1733,13 +1733,19 @@ impl Project {
|
|||
|
||||
async fn send_buffer_messages(
|
||||
this: WeakModelHandle<Self>,
|
||||
mut rx: UnboundedReceiver<BufferMessage>,
|
||||
rx: UnboundedReceiver<BufferMessage>,
|
||||
mut cx: AsyncAppContext,
|
||||
) {
|
||||
) -> Option<()> {
|
||||
const MAX_BATCH_SIZE: usize = 128;
|
||||
|
||||
let mut needs_resync_with_host = false;
|
||||
while let Some(change) = rx.next().await {
|
||||
if let Some(this) = this.upgrade(&mut cx) {
|
||||
let is_local = this.read_with(&cx, |this, _| this.is_local());
|
||||
let mut operations_by_buffer_id = HashMap::default();
|
||||
let mut changes = rx.ready_chunks(MAX_BATCH_SIZE);
|
||||
while let Some(changes) = changes.next().await {
|
||||
let this = this.upgrade(&mut cx)?;
|
||||
let is_local = this.read_with(&cx, |this, _| this.is_local());
|
||||
|
||||
for change in changes {
|
||||
match change {
|
||||
BufferMessage::Operation {
|
||||
buffer_id,
|
||||
|
@ -1748,21 +1754,14 @@ impl Project {
|
|||
if needs_resync_with_host {
|
||||
continue;
|
||||
}
|
||||
let request = this.read_with(&cx, |this, _| {
|
||||
let project_id = this.remote_id()?;
|
||||
Some(this.client.request(proto::UpdateBuffer {
|
||||
buffer_id,
|
||||
project_id,
|
||||
operations: vec![operation],
|
||||
}))
|
||||
});
|
||||
if let Some(request) = request {
|
||||
if request.await.is_err() && !is_local {
|
||||
needs_resync_with_host = true;
|
||||
}
|
||||
}
|
||||
|
||||
operations_by_buffer_id
|
||||
.entry(buffer_id)
|
||||
.or_insert(Vec::new())
|
||||
.push(operation);
|
||||
}
|
||||
BufferMessage::Resync => {
|
||||
operations_by_buffer_id.clear();
|
||||
if this
|
||||
.update(&mut cx, |this, cx| this.synchronize_remote_buffers(cx))
|
||||
.await
|
||||
|
@ -1772,10 +1771,27 @@ impl Project {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
for (buffer_id, operations) in operations_by_buffer_id.drain() {
|
||||
let request = this.read_with(&cx, |this, _| {
|
||||
let project_id = this.remote_id()?;
|
||||
Some(this.client.request(proto::UpdateBuffer {
|
||||
buffer_id,
|
||||
project_id,
|
||||
operations,
|
||||
}))
|
||||
});
|
||||
if let Some(request) = request {
|
||||
if request.await.is_err() && !is_local {
|
||||
needs_resync_with_host = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn on_buffer_event(
|
||||
|
|
|
@ -95,7 +95,17 @@ pub struct Snapshot {
|
|||
root_char_bag: CharBag,
|
||||
entries_by_path: SumTree<Entry>,
|
||||
entries_by_id: SumTree<PathEntry>,
|
||||
|
||||
/// A number that increases every time the worktree begins scanning
|
||||
/// a set of paths from the filesystem. This scanning could be caused
|
||||
/// by some operation performed on the worktree, such as reading or
|
||||
/// writing a file, or by an event reported by the filesystem.
|
||||
scan_id: usize,
|
||||
|
||||
/// The latest scan id that has completed, and whose preceding scans
|
||||
/// have all completed. The current `scan_id` could be more than one
|
||||
/// greater than the `completed_scan_id` if operations are performed
|
||||
/// on the worktree while it is processing a file-system event.
|
||||
completed_scan_id: usize,
|
||||
}
|
||||
|
||||
|
@ -1481,7 +1491,12 @@ impl LocalSnapshot {
|
|||
}
|
||||
|
||||
let scan_id = self.scan_id;
|
||||
self.entries_by_path.insert_or_replace(entry.clone(), &());
|
||||
let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
|
||||
if let Some(removed) = removed {
|
||||
if removed.id != entry.id {
|
||||
self.entries_by_id.remove(&removed.id, &());
|
||||
}
|
||||
}
|
||||
self.entries_by_id.insert_or_replace(
|
||||
PathEntry {
|
||||
id: entry.id,
|
||||
|
@ -2168,6 +2183,7 @@ impl BackgroundScanner {
|
|||
}
|
||||
{
|
||||
let mut snapshot = self.snapshot.lock();
|
||||
snapshot.scan_id += 1;
|
||||
ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
|
||||
if ignore_stack.is_all() {
|
||||
if let Some(mut root_entry) = snapshot.root_entry().cloned() {
|
||||
|
@ -2189,6 +2205,10 @@ impl BackgroundScanner {
|
|||
.unwrap();
|
||||
drop(scan_job_tx);
|
||||
self.scan_dirs(true, scan_job_rx).await;
|
||||
{
|
||||
let mut snapshot = self.snapshot.lock();
|
||||
snapshot.completed_scan_id = snapshot.scan_id;
|
||||
}
|
||||
self.send_status_update(false, None);
|
||||
|
||||
// Process any any FS events that occurred while performing the initial scan.
|
||||
|
@ -2200,7 +2220,6 @@ impl BackgroundScanner {
|
|||
paths.extend(more_events.into_iter().map(|e| e.path));
|
||||
}
|
||||
self.process_events(paths).await;
|
||||
self.send_status_update(false, None);
|
||||
}
|
||||
|
||||
self.finished_initial_scan = true;
|
||||
|
@ -2212,9 +2231,8 @@ impl BackgroundScanner {
|
|||
// these before handling changes reported by the filesystem.
|
||||
request = self.refresh_requests_rx.recv().fuse() => {
|
||||
let Ok((paths, barrier)) = request else { break };
|
||||
self.reload_entries_for_paths(paths, None).await;
|
||||
if !self.send_status_update(false, Some(barrier)) {
|
||||
break;
|
||||
if !self.process_refresh_request(paths, barrier).await {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2225,15 +2243,17 @@ impl BackgroundScanner {
|
|||
paths.extend(more_events.into_iter().map(|e| e.path));
|
||||
}
|
||||
self.process_events(paths).await;
|
||||
self.send_status_update(false, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_events(&mut self, paths: Vec<PathBuf>) {
|
||||
use futures::FutureExt as _;
|
||||
async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
|
||||
self.reload_entries_for_paths(paths, None).await;
|
||||
self.send_status_update(false, Some(barrier))
|
||||
}
|
||||
|
||||
async fn process_events(&mut self, paths: Vec<PathBuf>) {
|
||||
let (scan_job_tx, scan_job_rx) = channel::unbounded();
|
||||
if let Some(mut paths) = self
|
||||
.reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
|
||||
|
@ -2245,35 +2265,7 @@ impl BackgroundScanner {
|
|||
drop(scan_job_tx);
|
||||
self.scan_dirs(false, scan_job_rx).await;
|
||||
|
||||
let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
|
||||
let snapshot = self.update_ignore_statuses(ignore_queue_tx);
|
||||
self.executor
|
||||
.scoped(|scope| {
|
||||
for _ in 0..self.executor.num_cpus() {
|
||||
scope.spawn(async {
|
||||
loop {
|
||||
select_biased! {
|
||||
// Process any path refresh requests before moving on to process
|
||||
// the queue of ignore statuses.
|
||||
request = self.refresh_requests_rx.recv().fuse() => {
|
||||
let Ok((paths, barrier)) = request else { break };
|
||||
self.reload_entries_for_paths(paths, None).await;
|
||||
if !self.send_status_update(false, Some(barrier)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively process directories whose ignores have changed.
|
||||
job = ignore_queue_rx.recv().fuse() => {
|
||||
let Ok(job) = job else { break };
|
||||
self.update_ignore_status(job, &snapshot).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
.await;
|
||||
self.update_ignore_statuses().await;
|
||||
|
||||
let mut snapshot = self.snapshot.lock();
|
||||
let mut git_repositories = mem::take(&mut snapshot.git_repositories);
|
||||
|
@ -2281,6 +2273,9 @@ impl BackgroundScanner {
|
|||
snapshot.git_repositories = git_repositories;
|
||||
snapshot.removed_entry_ids.clear();
|
||||
snapshot.completed_scan_id = snapshot.scan_id;
|
||||
drop(snapshot);
|
||||
|
||||
self.send_status_update(false, None);
|
||||
}
|
||||
|
||||
async fn scan_dirs(
|
||||
|
@ -2313,8 +2308,7 @@ impl BackgroundScanner {
|
|||
// the scan queue, so that user operations are prioritized.
|
||||
request = self.refresh_requests_rx.recv().fuse() => {
|
||||
let Ok((paths, barrier)) = request else { break };
|
||||
self.reload_entries_for_paths(paths, None).await;
|
||||
if !self.send_status_update(false, Some(barrier)) {
|
||||
if !self.process_refresh_request(paths, barrier).await {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2521,12 +2515,10 @@ impl BackgroundScanner {
|
|||
.await;
|
||||
|
||||
let mut snapshot = self.snapshot.lock();
|
||||
|
||||
if snapshot.completed_scan_id == snapshot.scan_id {
|
||||
snapshot.scan_id += 1;
|
||||
if !doing_recursive_update {
|
||||
snapshot.completed_scan_id = snapshot.scan_id;
|
||||
}
|
||||
let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
|
||||
snapshot.scan_id += 1;
|
||||
if is_idle && !doing_recursive_update {
|
||||
snapshot.completed_scan_id = snapshot.scan_id;
|
||||
}
|
||||
|
||||
// Remove any entries for paths that no longer exist or are being recursively
|
||||
|
@ -2596,16 +2588,17 @@ impl BackgroundScanner {
|
|||
Some(event_paths)
|
||||
}
|
||||
|
||||
fn update_ignore_statuses(
|
||||
&self,
|
||||
ignore_queue_tx: Sender<UpdateIgnoreStatusJob>,
|
||||
) -> LocalSnapshot {
|
||||
async fn update_ignore_statuses(&self) {
|
||||
use futures::FutureExt as _;
|
||||
|
||||
let mut snapshot = self.snapshot.lock().clone();
|
||||
let mut ignores_to_update = Vec::new();
|
||||
let mut ignores_to_delete = Vec::new();
|
||||
for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
|
||||
if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
|
||||
if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
|
||||
if *scan_id > snapshot.completed_scan_id
|
||||
&& snapshot.entry_for_path(parent_path).is_some()
|
||||
{
|
||||
ignores_to_update.push(parent_abs_path.clone());
|
||||
}
|
||||
|
||||
|
@ -2624,6 +2617,7 @@ impl BackgroundScanner {
|
|||
.remove(&parent_abs_path);
|
||||
}
|
||||
|
||||
let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
|
||||
ignores_to_update.sort_unstable();
|
||||
let mut ignores_to_update = ignores_to_update.into_iter().peekable();
|
||||
while let Some(parent_abs_path) = ignores_to_update.next() {
|
||||
|
@ -2642,8 +2636,34 @@ impl BackgroundScanner {
|
|||
}))
|
||||
.unwrap();
|
||||
}
|
||||
drop(ignore_queue_tx);
|
||||
|
||||
snapshot
|
||||
self.executor
|
||||
.scoped(|scope| {
|
||||
for _ in 0..self.executor.num_cpus() {
|
||||
scope.spawn(async {
|
||||
loop {
|
||||
select_biased! {
|
||||
// Process any path refresh requests before moving on to process
|
||||
// the queue of ignore statuses.
|
||||
request = self.refresh_requests_rx.recv().fuse() => {
|
||||
let Ok((paths, barrier)) = request else { break };
|
||||
if !self.process_refresh_request(paths, barrier).await {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively process directories whose ignores have changed.
|
||||
job = ignore_queue_rx.recv().fuse() => {
|
||||
let Ok(job) = job else { break };
|
||||
self.update_ignore_status(job, &snapshot).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
|
||||
|
@ -3054,12 +3074,11 @@ mod tests {
|
|||
use fs::repository::FakeGitRepository;
|
||||
use fs::{FakeFs, RealFs};
|
||||
use gpui::{executor::Deterministic, TestAppContext};
|
||||
use pretty_assertions::assert_eq;
|
||||
use rand::prelude::*;
|
||||
use serde_json::json;
|
||||
use std::{env, fmt::Write};
|
||||
use util::http::FakeHttpClient;
|
||||
|
||||
use util::test::temp_tree;
|
||||
use util::{http::FakeHttpClient, test::temp_tree};
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_traversal(cx: &mut TestAppContext) {
|
||||
|
@ -3461,7 +3480,7 @@ mod tests {
|
|||
}
|
||||
|
||||
#[gpui::test(iterations = 30)]
|
||||
async fn test_create_directory(cx: &mut TestAppContext) {
|
||||
async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
|
||||
let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
|
||||
|
||||
let fs = FakeFs::new(cx.background());
|
||||
|
@ -3486,6 +3505,8 @@ mod tests {
|
|||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
|
||||
let entry = tree
|
||||
.update(cx, |tree, cx| {
|
||||
tree.as_local_mut()
|
||||
|
@ -3497,10 +3518,91 @@ mod tests {
|
|||
assert!(entry.is_dir());
|
||||
|
||||
cx.foreground().run_until_parked();
|
||||
|
||||
tree.read_with(cx, |tree, _| {
|
||||
assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
|
||||
});
|
||||
|
||||
let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
let update = snapshot2.build_update(&snapshot1, 0, 0, true);
|
||||
snapshot1.apply_remote_update(update).unwrap();
|
||||
assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 100)]
|
||||
async fn test_random_worktree_operations_during_initial_scan(
|
||||
cx: &mut TestAppContext,
|
||||
mut rng: StdRng,
|
||||
) {
|
||||
let operations = env::var("OPERATIONS")
|
||||
.map(|o| o.parse().unwrap())
|
||||
.unwrap_or(5);
|
||||
let initial_entries = env::var("INITIAL_ENTRIES")
|
||||
.map(|o| o.parse().unwrap())
|
||||
.unwrap_or(20);
|
||||
|
||||
let root_dir = Path::new("/test");
|
||||
let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
|
||||
fs.as_fake().insert_tree(root_dir, json!({})).await;
|
||||
for _ in 0..initial_entries {
|
||||
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
|
||||
}
|
||||
log::info!("generated initial tree");
|
||||
|
||||
let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
|
||||
let worktree = Worktree::local(
|
||||
client.clone(),
|
||||
root_dir,
|
||||
true,
|
||||
fs.clone(),
|
||||
Default::default(),
|
||||
&mut cx.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
|
||||
for _ in 0..operations {
|
||||
worktree
|
||||
.update(cx, |worktree, cx| {
|
||||
randomly_mutate_worktree(worktree, &mut rng, cx)
|
||||
})
|
||||
.await
|
||||
.log_err();
|
||||
worktree.read_with(cx, |tree, _| {
|
||||
tree.as_local().unwrap().snapshot.check_invariants()
|
||||
});
|
||||
|
||||
if rng.gen_bool(0.6) {
|
||||
let new_snapshot =
|
||||
worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
let update = new_snapshot.build_update(&snapshot, 0, 0, true);
|
||||
snapshot.apply_remote_update(update.clone()).unwrap();
|
||||
assert_eq!(
|
||||
snapshot.to_vec(true),
|
||||
new_snapshot.to_vec(true),
|
||||
"incorrect snapshot after update {:?}",
|
||||
update
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
worktree
|
||||
.update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
|
||||
.await;
|
||||
worktree.read_with(cx, |tree, _| {
|
||||
tree.as_local().unwrap().snapshot.check_invariants()
|
||||
});
|
||||
|
||||
let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
let update = new_snapshot.build_update(&snapshot, 0, 0, true);
|
||||
snapshot.apply_remote_update(update.clone()).unwrap();
|
||||
assert_eq!(
|
||||
snapshot.to_vec(true),
|
||||
new_snapshot.to_vec(true),
|
||||
"incorrect snapshot after update {:?}",
|
||||
update
|
||||
);
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 100)]
|
||||
|
@ -3516,18 +3618,17 @@ mod tests {
|
|||
let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
|
||||
fs.as_fake().insert_tree(root_dir, json!({})).await;
|
||||
for _ in 0..initial_entries {
|
||||
randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
|
||||
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
|
||||
}
|
||||
log::info!("generated initial tree");
|
||||
|
||||
let next_entry_id = Arc::new(AtomicUsize::default());
|
||||
let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
|
||||
let worktree = Worktree::local(
|
||||
client.clone(),
|
||||
root_dir,
|
||||
true,
|
||||
fs.clone(),
|
||||
next_entry_id.clone(),
|
||||
Default::default(),
|
||||
&mut cx.to_async(),
|
||||
)
|
||||
.await
|
||||
|
@ -3583,14 +3684,14 @@ mod tests {
|
|||
let mut snapshots = Vec::new();
|
||||
let mut mutations_len = operations;
|
||||
while mutations_len > 1 {
|
||||
randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
|
||||
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
|
||||
let buffered_event_count = fs.as_fake().buffered_event_count().await;
|
||||
if buffered_event_count > 0 && rng.gen_bool(0.3) {
|
||||
let len = rng.gen_range(0..=buffered_event_count);
|
||||
log::info!("flushing {} events", len);
|
||||
fs.as_fake().flush_events(len).await;
|
||||
} else {
|
||||
randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
|
||||
randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
|
||||
mutations_len -= 1;
|
||||
}
|
||||
|
||||
|
@ -3615,7 +3716,7 @@ mod tests {
|
|||
root_dir,
|
||||
true,
|
||||
fs.clone(),
|
||||
next_entry_id,
|
||||
Default::default(),
|
||||
&mut cx.to_async(),
|
||||
)
|
||||
.await
|
||||
|
@ -3659,7 +3760,67 @@ mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
async fn randomly_mutate_tree(
|
||||
fn randomly_mutate_worktree(
|
||||
worktree: &mut Worktree,
|
||||
rng: &mut impl Rng,
|
||||
cx: &mut ModelContext<Worktree>,
|
||||
) -> Task<Result<()>> {
|
||||
let worktree = worktree.as_local_mut().unwrap();
|
||||
let snapshot = worktree.snapshot();
|
||||
let entry = snapshot.entries(false).choose(rng).unwrap();
|
||||
|
||||
match rng.gen_range(0_u32..100) {
|
||||
0..=33 if entry.path.as_ref() != Path::new("") => {
|
||||
log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
|
||||
worktree.delete_entry(entry.id, cx).unwrap()
|
||||
}
|
||||
..=66 if entry.path.as_ref() != Path::new("") => {
|
||||
let other_entry = snapshot.entries(false).choose(rng).unwrap();
|
||||
let new_parent_path = if other_entry.is_dir() {
|
||||
other_entry.path.clone()
|
||||
} else {
|
||||
other_entry.path.parent().unwrap().into()
|
||||
};
|
||||
let mut new_path = new_parent_path.join(gen_name(rng));
|
||||
if new_path.starts_with(&entry.path) {
|
||||
new_path = gen_name(rng).into();
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"renaming entry {:?} ({}) to {:?}",
|
||||
entry.path,
|
||||
entry.id.0,
|
||||
new_path
|
||||
);
|
||||
let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
|
||||
cx.foreground().spawn(async move {
|
||||
task.await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
let task = if entry.is_dir() {
|
||||
let child_path = entry.path.join(gen_name(rng));
|
||||
let is_dir = rng.gen_bool(0.3);
|
||||
log::info!(
|
||||
"creating {} at {:?}",
|
||||
if is_dir { "dir" } else { "file" },
|
||||
child_path,
|
||||
);
|
||||
worktree.create_entry(child_path, is_dir, cx)
|
||||
} else {
|
||||
log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
|
||||
worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
|
||||
};
|
||||
cx.foreground().spawn(async move {
|
||||
task.await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn randomly_mutate_fs(
|
||||
fs: &Arc<dyn Fs>,
|
||||
root_path: &Path,
|
||||
insertion_probability: f64,
|
||||
|
@ -3827,6 +3988,20 @@ mod tests {
|
|||
|
||||
impl LocalSnapshot {
|
||||
fn check_invariants(&self) {
|
||||
assert_eq!(
|
||||
self.entries_by_path
|
||||
.cursor::<()>()
|
||||
.map(|e| (&e.path, e.id))
|
||||
.collect::<Vec<_>>(),
|
||||
self.entries_by_id
|
||||
.cursor::<()>()
|
||||
.map(|e| (&e.path, e.id))
|
||||
.collect::<collections::BTreeSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>(),
|
||||
"entries_by_path and entries_by_id are inconsistent"
|
||||
);
|
||||
|
||||
let mut files = self.files(true, 0);
|
||||
let mut visible_files = self.files(false, 0);
|
||||
for entry in self.entries_by_path.cursor::<()>() {
|
||||
|
@ -3837,6 +4012,7 @@ mod tests {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(files.next().is_none());
|
||||
assert!(visible_files.next().is_none());
|
||||
|
||||
|
|
65
script/randomized-test-ci
Executable file
65
script/randomized-test-ci
Executable file
|
@ -0,0 +1,65 @@
|
|||
#!/usr/bin/env node --redirect-warnings=/dev/null
|
||||
|
||||
const fs = require('fs')
|
||||
const {randomBytes} = require('crypto')
|
||||
const {execFileSync} = require('child_process')
|
||||
const {minimizeTestPlan, buildTests, runTests} = require('./randomized-test-minimize');
|
||||
|
||||
const {ZED_SERVER_URL, ZED_CLIENT_SECRET_TOKEN} = process.env
|
||||
if (!ZED_SERVER_URL) throw new Error('Missing env var `ZED_SERVER_URL`')
|
||||
if (!ZED_CLIENT_SECRET_TOKEN) throw new Error('Missing env var `ZED_CLIENT_SECRET_TOKEN`')
|
||||
|
||||
main()
|
||||
|
||||
async function main() {
|
||||
buildTests()
|
||||
|
||||
const seed = randomU64();
|
||||
const commit = execFileSync(
|
||||
'git',
|
||||
['rev-parse', 'HEAD'],
|
||||
{encoding: 'utf8'}
|
||||
).trim()
|
||||
|
||||
console.log("commit:", commit)
|
||||
console.log("starting seed:", seed)
|
||||
|
||||
const planPath = 'target/test-plan.json'
|
||||
const minPlanPath = 'target/test-plan.min.json'
|
||||
const failingSeed = runTests({
|
||||
SEED: seed,
|
||||
SAVE_PLAN: planPath,
|
||||
ITERATIONS: 50000,
|
||||
OPERATIONS: 200,
|
||||
})
|
||||
|
||||
if (!failingSeed) {
|
||||
console.log("tests passed")
|
||||
return
|
||||
}
|
||||
|
||||
console.log("found failure at seed", failingSeed)
|
||||
const minimizedSeed = minimizeTestPlan(planPath, minPlanPath)
|
||||
const minimizedPlan = fs.readFileSync(minPlanPath, 'utf8')
|
||||
|
||||
console.log("minimized plan:\n", minimizedPlan)
|
||||
|
||||
const url = `${ZED_SERVER_URL}/api/randomized_test_failure`
|
||||
const body = {
|
||||
seed: minimizedSeed,
|
||||
token: ZED_CLIENT_SECRET_TOKEN,
|
||||
plan: JSON.parse(minimizedPlan),
|
||||
commit: commit,
|
||||
}
|
||||
await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {"Content-Type": "application/json"},
|
||||
body: JSON.stringify(body)
|
||||
})
|
||||
}
|
||||
|
||||
function randomU64() {
|
||||
const bytes = randomBytes(8)
|
||||
const hexString = bytes.reduce(((string, byte) => string + byte.toString(16)), '')
|
||||
return BigInt('0x' + hexString).toString(10)
|
||||
}
|
132
script/randomized-test-minimize
Executable file
132
script/randomized-test-minimize
Executable file
|
@ -0,0 +1,132 @@
|
|||
#!/usr/bin/env node --redirect-warnings=/dev/null
|
||||
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const {spawnSync} = require('child_process')
|
||||
|
||||
const FAILING_SEED_REGEX = /failing seed: (\d+)/ig
|
||||
const CARGO_TEST_ARGS = [
|
||||
'--release',
|
||||
'--lib',
|
||||
'--package', 'collab',
|
||||
'random_collaboration',
|
||||
]
|
||||
|
||||
if (require.main === module) {
|
||||
if (process.argv.length < 4) {
|
||||
process.stderr.write("usage: script/randomized-test-minimize <input-plan> <output-plan> [start-index]\n")
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
minimizeTestPlan(
|
||||
process.argv[2],
|
||||
process.argv[3],
|
||||
parseInt(process.argv[4]) || 0
|
||||
);
|
||||
}
|
||||
|
||||
function minimizeTestPlan(
|
||||
inputPlanPath,
|
||||
outputPlanPath,
|
||||
startIndex = 0
|
||||
) {
|
||||
const tempPlanPath = inputPlanPath + '.try'
|
||||
|
||||
fs.copyFileSync(inputPlanPath, outputPlanPath)
|
||||
let testPlan = JSON.parse(fs.readFileSync(outputPlanPath, 'utf8'))
|
||||
|
||||
process.stderr.write("minimizing failing test plan...\n")
|
||||
for (let ix = startIndex; ix < testPlan.length; ix++) {
|
||||
// Skip 'MutateClients' entries, since they themselves are not single operations.
|
||||
if (testPlan[ix].MutateClients) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove a row from the test plan
|
||||
const newTestPlan = testPlan.slice()
|
||||
newTestPlan.splice(ix, 1)
|
||||
fs.writeFileSync(tempPlanPath, serializeTestPlan(newTestPlan), 'utf8');
|
||||
|
||||
process.stderr.write(`${ix}/${testPlan.length}: ${JSON.stringify(testPlan[ix])}`)
|
||||
const failingSeed = runTests({
|
||||
SEED: '0',
|
||||
LOAD_PLAN: tempPlanPath,
|
||||
SAVE_PLAN: tempPlanPath,
|
||||
ITERATIONS: '500'
|
||||
})
|
||||
|
||||
// If the test failed, keep the test plan with the removed row. Reload the test
|
||||
// plan from the JSON file, since the test itself will remove any operations
|
||||
// which are no longer valid before saving the test plan.
|
||||
if (failingSeed != null) {
|
||||
process.stderr.write(` - remove. failing seed: ${failingSeed}.\n`)
|
||||
fs.copyFileSync(tempPlanPath, outputPlanPath)
|
||||
testPlan = JSON.parse(fs.readFileSync(outputPlanPath, 'utf8'))
|
||||
ix--
|
||||
} else {
|
||||
process.stderr.write(` - keep.\n`)
|
||||
}
|
||||
}
|
||||
|
||||
fs.unlinkSync(tempPlanPath)
|
||||
|
||||
// Re-run the final minimized plan to get the correct failing seed.
|
||||
// This is a workaround for the fact that the execution order can
|
||||
// slightly change when replaying a test plan after it has been
|
||||
// saved and loaded.
|
||||
const failingSeed = runTests({
|
||||
SEED: '0',
|
||||
ITERATIONS: '5000',
|
||||
LOAD_PLAN: outputPlanPath,
|
||||
})
|
||||
|
||||
process.stderr.write(`final test plan: ${outputPlanPath}\n`)
|
||||
process.stderr.write(`final seed: ${failingSeed}\n`)
|
||||
return failingSeed
|
||||
}
|
||||
|
||||
function buildTests() {
|
||||
const {status} = spawnSync('cargo', ['test', '--no-run', ...CARGO_TEST_ARGS], {
|
||||
stdio: 'inherit',
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
}
|
||||
});
|
||||
if (status !== 0) {
|
||||
throw new Error('build failed')
|
||||
}
|
||||
}
|
||||
|
||||
function runTests(env) {
|
||||
const {status, stdout} = spawnSync('cargo', ['test', ...CARGO_TEST_ARGS], {
|
||||
stdio: 'pipe',
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
...env,
|
||||
}
|
||||
});
|
||||
|
||||
if (status !== 0) {
|
||||
FAILING_SEED_REGEX.lastIndex = 0
|
||||
const match = FAILING_SEED_REGEX.exec(stdout)
|
||||
if (!match) {
|
||||
process.stderr.write("test failed, but no failing seed found:\n")
|
||||
process.stderr.write(stdout)
|
||||
process.stderr.write('\n')
|
||||
process.exit(1)
|
||||
}
|
||||
return match[1]
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
function serializeTestPlan(plan) {
|
||||
return "[\n" + plan.map(row => JSON.stringify(row)).join(",\n") + "\n]\n"
|
||||
}
|
||||
|
||||
exports.buildTests = buildTests
|
||||
exports.runTests = runTests
|
||||
exports.minimizeTestPlan = minimizeTestPlan
|
Loading…
Reference in a new issue