mirror of
https://github.com/zed-industries/zed.git
synced 2025-01-10 20:41:59 +00:00
editor: Improve performance of edit coalescing (#14567)
# Background In https://github.com/zed-industries/zed/issues/14408 we received a repro for "Replace all" being slow, even after the work I did https://github.com/zed-industries/zed/pull/13654. Admittedly #13654 was a pretty straightforward change. Under the profiler it turned out that we're spending *10 seconds* in `memmove` on main thread. Ugh. Not great. The direct ancestor of the memmove call was66f0c390a8/crates/editor/src/display_map/tab_map.rs (L108-L119)
What? # Accidental O(n^2) We have a bunch of `consolidate_*_edits` functions which take a list of Fold/Tab/Inlay/Wrap edits and merge consecutive edits if their ranges overlap/are next to one another. The loop usually goes as follows: ``` while ix < edits.len() { let (prev_edits, next_edits) = edits.split_at_mut(ix); let prev_edit = prev_edits.last_mut().unwrap(); let edit = &next_edits[0]; if PREV_EDIT_CAN_BE_MERGED_WITH_CURRENT_ONE { MERGE_EDITS(prev_edit, edit); edits.remove(ix); // !! } else { ix += 1; } } ``` The problem is the call to `.remove` - it has to shift all of the consecutive elements in the `edits` vector! Thus, when processing the edits from the original repro (where consolidation shrinks the edit list from 210k entries to 30k), we mostly spend time moving entries in memory around. Thus, the original repro isn't really an issue with replace_all; it's just that replace_all is one of the few tools available to the end user that can apply large # of edits in a single transaction. # Solution In this PR I address the issue by rewriting the loop in a way that does not throw items away via `.remove`. Instead, `Iterator::scan` is used, which lets us achieve the same logic without having the pitfalls of `.remove`s. Crucially, **this code does not allocate a new backing buffer for edits** (see [this article for rationale](https://blog.polybdenum.com/2024/01/17/identifying-the-collect-vec-memory-leak-footgun.html)); with `vec.into_iter().scan().filter_map().collect()` we still use the same underlying buffer as the one that's passed into `consolidate_*` functions. In development I verified that by checking whether the pointers to backing storage of a Vec are the same before and after the consolidation. # Results ### Before Nightly 0.145.0 [66f0c390a8
](66f0c390a8
) https://github.com/user-attachments/assets/8b0ad3bc-86d6-4f8a-850c-ebb86e8b3bfc (~13s end-to-end) ### After https://github.com/user-attachments/assets/366835db-1d84-4f95-8c74-b1506a9fabec (~2s end-to-end) The remaining lag is (I think) lies in `TextSummary` calculation and not the consolidation itself. Thus, for the purposes of scoping this PR, I'll tackle it separately. Release Notes: - Significantly improved performance of applying large quantities of concurrent edits (e.g. when running "Replace all").
This commit is contained in:
parent
751508b6a2
commit
d338e4a8a6
3 changed files with 100 additions and 58 deletions
|
@ -164,7 +164,7 @@ impl<'a> FoldMapWriter<'a> {
|
|||
new_tree
|
||||
};
|
||||
|
||||
consolidate_inlay_edits(&mut edits);
|
||||
let edits = consolidate_inlay_edits(edits);
|
||||
let edits = self.0.sync(snapshot.clone(), edits);
|
||||
(self.0.snapshot.clone(), edits)
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ impl<'a> FoldMapWriter<'a> {
|
|||
folds
|
||||
};
|
||||
|
||||
consolidate_inlay_edits(&mut edits);
|
||||
let edits = consolidate_inlay_edits(edits);
|
||||
let edits = self.0.sync(snapshot.clone(), edits);
|
||||
(self.0.snapshot.clone(), edits)
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ impl FoldMap {
|
|||
});
|
||||
}
|
||||
|
||||
consolidate_fold_edits(&mut fold_edits);
|
||||
fold_edits = consolidate_fold_edits(fold_edits);
|
||||
}
|
||||
|
||||
self.snapshot.transforms = new_transforms;
|
||||
|
@ -809,7 +809,7 @@ where
|
|||
cursor
|
||||
}
|
||||
|
||||
fn consolidate_inlay_edits(edits: &mut Vec<InlayEdit>) {
|
||||
fn consolidate_inlay_edits(mut edits: Vec<InlayEdit>) -> Vec<InlayEdit> {
|
||||
edits.sort_unstable_by(|a, b| {
|
||||
a.old
|
||||
.start
|
||||
|
@ -817,22 +817,33 @@ fn consolidate_inlay_edits(edits: &mut Vec<InlayEdit>) {
|
|||
.then_with(|| b.old.end.cmp(&a.old.end))
|
||||
});
|
||||
|
||||
let mut i = 1;
|
||||
while i < edits.len() {
|
||||
let edit = edits[i].clone();
|
||||
let prev_edit = &mut edits[i - 1];
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = prev_edit.old.end.max(edit.old.end);
|
||||
prev_edit.new.start = prev_edit.new.start.min(edit.new.start);
|
||||
prev_edit.new.end = prev_edit.new.end.max(edit.new.end);
|
||||
edits.remove(i);
|
||||
continue;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
let mut inlay_edits = edits.into_iter();
|
||||
let inlay_edits = if let Some(mut first_edit) = inlay_edits.next() {
|
||||
let mut v: Vec<_> = inlay_edits
|
||||
.scan(&mut first_edit, |prev_edit, edit| {
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = prev_edit.old.end.max(edit.old.end);
|
||||
prev_edit.new.start = prev_edit.new.start.min(edit.new.start);
|
||||
prev_edit.new.end = prev_edit.new.end.max(edit.new.end);
|
||||
Some(None) // Skip this edit, it's merged
|
||||
} else {
|
||||
let prev = std::mem::replace(*prev_edit, edit);
|
||||
Some(Some(prev)) // Yield the previous edit
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
v.push(first_edit.clone());
|
||||
|
||||
v
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
inlay_edits
|
||||
}
|
||||
|
||||
fn consolidate_fold_edits(edits: &mut Vec<FoldEdit>) {
|
||||
fn consolidate_fold_edits(mut edits: Vec<FoldEdit>) -> Vec<FoldEdit> {
|
||||
edits.sort_unstable_by(|a, b| {
|
||||
a.old
|
||||
.start
|
||||
|
@ -840,19 +851,29 @@ fn consolidate_fold_edits(edits: &mut Vec<FoldEdit>) {
|
|||
.then_with(|| b.old.end.cmp(&a.old.end))
|
||||
});
|
||||
|
||||
let mut i = 1;
|
||||
while i < edits.len() {
|
||||
let edit = edits[i].clone();
|
||||
let prev_edit = &mut edits[i - 1];
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = prev_edit.old.end.max(edit.old.end);
|
||||
prev_edit.new.start = prev_edit.new.start.min(edit.new.start);
|
||||
prev_edit.new.end = prev_edit.new.end.max(edit.new.end);
|
||||
edits.remove(i);
|
||||
continue;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
let mut fold_edits = edits.into_iter();
|
||||
let fold_edits = if let Some(mut first_edit) = fold_edits.next() {
|
||||
let mut v: Vec<_> = fold_edits
|
||||
.scan(&mut first_edit, |prev_edit, edit| {
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = prev_edit.old.end.max(edit.old.end);
|
||||
prev_edit.new.start = prev_edit.new.start.min(edit.new.start);
|
||||
prev_edit.new.end = prev_edit.new.end.max(edit.new.end);
|
||||
Some(None) // Skip this edit, it's merged
|
||||
} else {
|
||||
let prev = std::mem::replace(*prev_edit, edit);
|
||||
Some(Some(prev)) // Yield the previous edit
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
v.push(first_edit.clone());
|
||||
v
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
fold_edits
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
|
|
|
@ -104,19 +104,29 @@ impl TabMap {
|
|||
}
|
||||
|
||||
// Combine any edits that overlap due to the expansion.
|
||||
let mut ix = 1;
|
||||
while ix < fold_edits.len() {
|
||||
let (prev_edits, next_edits) = fold_edits.split_at_mut(ix);
|
||||
let prev_edit = prev_edits.last_mut().unwrap();
|
||||
let edit = &next_edits[0];
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = edit.old.end;
|
||||
prev_edit.new.end = edit.new.end;
|
||||
fold_edits.remove(ix);
|
||||
} else {
|
||||
ix += 1;
|
||||
}
|
||||
}
|
||||
let mut fold_edits = fold_edits.into_iter();
|
||||
let fold_edits = if let Some(mut first_edit) = fold_edits.next() {
|
||||
let mut v: Vec<_> = fold_edits
|
||||
.scan(&mut first_edit, |state, edit| {
|
||||
if state.old.end >= edit.old.start {
|
||||
state.old.end = edit.old.end;
|
||||
state.new.end = edit.new.end;
|
||||
Some(None) // Skip this edit, it's merged
|
||||
} else {
|
||||
let new_state = edit.clone();
|
||||
let result = Some(Some(state.clone())); // Yield the previous edit
|
||||
**state = new_state;
|
||||
result
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
v.push(first_edit);
|
||||
|
||||
v
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
for fold_edit in fold_edits {
|
||||
let old_start = fold_edit.old.start.to_point(&old_snapshot.fold_snapshot);
|
||||
|
|
|
@ -567,7 +567,7 @@ impl WrapSnapshot {
|
|||
});
|
||||
}
|
||||
|
||||
consolidate_wrap_edits(&mut wrap_edits);
|
||||
wrap_edits = consolidate_wrap_edits(wrap_edits);
|
||||
Patch::new(wrap_edits)
|
||||
}
|
||||
|
||||
|
@ -1008,19 +1008,30 @@ impl<'a> sum_tree::Dimension<'a, TransformSummary> for WrapPoint {
|
|||
}
|
||||
}
|
||||
|
||||
fn consolidate_wrap_edits(edits: &mut Vec<WrapEdit>) {
|
||||
let mut i = 1;
|
||||
while i < edits.len() {
|
||||
let edit = edits[i].clone();
|
||||
let prev_edit = &mut edits[i - 1];
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = edit.old.end;
|
||||
prev_edit.new.end = edit.new.end;
|
||||
edits.remove(i);
|
||||
continue;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
fn consolidate_wrap_edits(edits: Vec<WrapEdit>) -> Vec<WrapEdit> {
|
||||
let mut wrap_edits = edits.into_iter();
|
||||
let wrap_edits = if let Some(mut first_edit) = wrap_edits.next() {
|
||||
let mut v: Vec<_> = wrap_edits
|
||||
.scan(&mut first_edit, |prev_edit, edit| {
|
||||
if prev_edit.old.end >= edit.old.start {
|
||||
prev_edit.old.end = edit.old.end;
|
||||
prev_edit.new.end = edit.new.end;
|
||||
Some(None) // Skip this edit, it's merged
|
||||
} else {
|
||||
let prev = std::mem::replace(*prev_edit, edit);
|
||||
Some(Some(prev)) // Yield the previous edit
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
v.push(first_edit.clone());
|
||||
|
||||
v
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
wrap_edits
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
Loading…
Reference in a new issue