ok/jj
1
0
Fork 0
forked from mirrors/jj

index: optimize RevWalk to store IndexPosition instead of IndexEntry

The idea is the same as the heads_pos() change in 9832ee205d. While
IndexEntry::position() should be cheap, saving 20 bytes per entry appears to
improve the performance in mid-size repos.

In my "linux" repo:

    revsets/all()
    -------------
    baseline  1.24     156.0±1.06ms
    this      1.00     126.0±0.51ms

I don't see significant difference in small-sized repos like "jj" or "git".

IndexEntryByPosition isn't removed since it's still used by the revset engine.
This commit is contained in:
Yuya Nishihara 2023-11-16 18:23:22 +09:00
parent 9be24db051
commit 32a7b33e56

View file

@ -1130,57 +1130,47 @@ impl From<&IndexEntry<'_>> for IndexPositionByGeneration {
} }
trait RevWalkIndex<'a> { trait RevWalkIndex<'a> {
type Entry: Clone + Ord + RevWalkIndexEntry<'a>; type Position: Copy + Ord;
type AdjacentPositions: IntoIterator<Item = Self::Position>;
fn entry_by_pos(&self, pos: IndexPosition) -> Self::Entry; fn entry_by_pos(&self, pos: Self::Position) -> IndexEntry<'a>;
fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> SmallIndexPositionsVec; fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> Self::AdjacentPositions;
}
trait RevWalkIndexEntry<'a> {
fn as_index_entry(&self) -> &IndexEntry<'a>;
fn into_index_entry(self) -> IndexEntry<'a>;
} }
impl<'a> RevWalkIndex<'a> for CompositeIndex<'a> { impl<'a> RevWalkIndex<'a> for CompositeIndex<'a> {
type Entry = IndexEntryByPosition<'a>; type Position = IndexPosition;
type AdjacentPositions = SmallIndexPositionsVec;
fn entry_by_pos(&self, pos: IndexPosition) -> Self::Entry { fn entry_by_pos(&self, pos: Self::Position) -> IndexEntry<'a> {
IndexEntryByPosition(CompositeIndex::entry_by_pos(self, pos)) CompositeIndex::entry_by_pos(self, pos)
} }
fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> SmallIndexPositionsVec { fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> Self::AdjacentPositions {
entry.parent_positions() entry.parent_positions()
} }
} }
impl<'a> RevWalkIndexEntry<'a> for IndexEntryByPosition<'a> {
fn as_index_entry(&self) -> &IndexEntry<'a> {
&self.0
}
fn into_index_entry(self) -> IndexEntry<'a> {
self.0
}
}
#[derive(Clone)] #[derive(Clone)]
struct RevWalkDescendantsIndex<'a> { struct RevWalkDescendantsIndex<'a> {
index: CompositeIndex<'a>, index: CompositeIndex<'a>,
children_map: HashMap<IndexPosition, SmallIndexPositionsVec>, children_map: HashMap<IndexPosition, DescendantIndexPositionsVec>,
} }
// See SmallIndexPositionsVec for the array size.
type DescendantIndexPositionsVec = SmallVec<[Reverse<IndexPosition>; 4]>;
impl<'a> RevWalkDescendantsIndex<'a> { impl<'a> RevWalkDescendantsIndex<'a> {
fn build<'b>( fn build<'b>(
index: CompositeIndex<'a>, index: CompositeIndex<'a>,
entries: impl IntoIterator<Item = IndexEntry<'b>>, entries: impl IntoIterator<Item = IndexEntry<'b>>,
) -> Self { ) -> Self {
// For dense set, it's probably cheaper to use `Vec` instead of `HashMap`. // For dense set, it's probably cheaper to use `Vec` instead of `HashMap`.
let mut children_map: HashMap<IndexPosition, SmallIndexPositionsVec> = HashMap::new(); let mut children_map: HashMap<IndexPosition, DescendantIndexPositionsVec> = HashMap::new();
for entry in entries { for entry in entries {
children_map.entry(entry.position()).or_default(); // mark head node children_map.entry(entry.position()).or_default(); // mark head node
for parent_pos in entry.parent_positions() { for parent_pos in entry.parent_positions() {
let parent = children_map.entry(parent_pos).or_default(); let parent = children_map.entry(parent_pos).or_default();
parent.push(entry.position()); parent.push(Reverse(entry.position()));
} }
} }
@ -1196,30 +1186,21 @@ impl<'a> RevWalkDescendantsIndex<'a> {
} }
impl<'a> RevWalkIndex<'a> for RevWalkDescendantsIndex<'a> { impl<'a> RevWalkIndex<'a> for RevWalkDescendantsIndex<'a> {
type Entry = Reverse<IndexEntryByPosition<'a>>; type Position = Reverse<IndexPosition>;
type AdjacentPositions = DescendantIndexPositionsVec;
fn entry_by_pos(&self, pos: IndexPosition) -> Self::Entry { fn entry_by_pos(&self, pos: Self::Position) -> IndexEntry<'a> {
Reverse(IndexEntryByPosition(self.index.entry_by_pos(pos))) self.index.entry_by_pos(pos.0)
} }
fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> SmallIndexPositionsVec { fn adjacent_positions(&self, entry: &IndexEntry<'_>) -> Self::AdjacentPositions {
self.children_map[&entry.position()].clone() self.children_map[&entry.position()].clone()
} }
} }
impl<'a> RevWalkIndexEntry<'a> for Reverse<IndexEntryByPosition<'a>> {
fn as_index_entry(&self) -> &IndexEntry<'a> {
self.0.as_index_entry()
}
fn into_index_entry(self) -> IndexEntry<'a> {
self.0.into_index_entry()
}
}
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd)] #[derive(Clone, Eq, PartialEq, Ord, PartialOrd)]
struct RevWalkWorkItem<E, T> { struct RevWalkWorkItem<P, T> {
entry: E, pos: P,
state: RevWalkWorkItemState<T>, state: RevWalkWorkItemState<T>,
} }
@ -1230,21 +1211,14 @@ enum RevWalkWorkItemState<T> {
Unwanted, Unwanted,
} }
impl<E, T> RevWalkWorkItem<E, T> { impl<P, T> RevWalkWorkItem<P, T> {
fn is_wanted(&self) -> bool { fn is_wanted(&self) -> bool {
matches!(self.state, RevWalkWorkItemState::Wanted(_)) matches!(self.state, RevWalkWorkItemState::Wanted(_))
} }
fn map_entry<U>(self, f: impl FnOnce(E) -> U) -> RevWalkWorkItem<U, T> { fn map_wanted<U>(self, f: impl FnOnce(T) -> U) -> RevWalkWorkItem<P, U> {
RevWalkWorkItem { RevWalkWorkItem {
entry: f(self.entry), pos: self.pos,
state: self.state,
}
}
fn map_wanted<U>(self, f: impl FnOnce(T) -> U) -> RevWalkWorkItem<E, U> {
RevWalkWorkItem {
entry: self.entry,
state: match self.state { state: match self.state {
RevWalkWorkItemState::Wanted(t) => RevWalkWorkItemState::Wanted(f(t)), RevWalkWorkItemState::Wanted(t) => RevWalkWorkItemState::Wanted(f(t)),
RevWalkWorkItemState::Unwanted => RevWalkWorkItemState::Unwanted, RevWalkWorkItemState::Unwanted => RevWalkWorkItemState::Unwanted,
@ -1256,7 +1230,7 @@ impl<E, T> RevWalkWorkItem<E, T> {
#[derive(Clone)] #[derive(Clone)]
struct RevWalkQueue<'a, I: RevWalkIndex<'a>, T> { struct RevWalkQueue<'a, I: RevWalkIndex<'a>, T> {
index: I, index: I,
items: BinaryHeap<RevWalkWorkItem<I::Entry, T>>, items: BinaryHeap<RevWalkWorkItem<I::Position, T>>,
unwanted_count: usize, unwanted_count: usize,
} }
@ -1281,18 +1255,14 @@ impl<'a, I: RevWalkIndex<'a>, T: Ord> RevWalkQueue<'a, I, T> {
} }
} }
fn push_wanted(&mut self, pos: IndexPosition, t: T) { fn push_wanted(&mut self, pos: I::Position, t: T) {
self.items.push(RevWalkWorkItem { let state = RevWalkWorkItemState::Wanted(t);
entry: self.index.entry_by_pos(pos), self.items.push(RevWalkWorkItem { pos, state });
state: RevWalkWorkItemState::Wanted(t),
});
} }
fn push_unwanted(&mut self, pos: IndexPosition) { fn push_unwanted(&mut self, pos: I::Position) {
self.items.push(RevWalkWorkItem { let state = RevWalkWorkItemState::Unwanted;
entry: self.index.entry_by_pos(pos), self.items.push(RevWalkWorkItem { pos, state });
state: RevWalkWorkItemState::Unwanted,
});
self.unwanted_count += 1; self.unwanted_count += 1;
} }
@ -1311,25 +1281,25 @@ impl<'a, I: RevWalkIndex<'a>, T: Ord> RevWalkQueue<'a, I, T> {
} }
} }
fn pop(&mut self) -> Option<RevWalkWorkItem<IndexEntry<'a>, T>> { fn pop(&mut self) -> Option<RevWalkWorkItem<I::Position, T>> {
if let Some(x) = self.items.pop() { if let Some(x) = self.items.pop() {
self.unwanted_count -= !x.is_wanted() as usize; self.unwanted_count -= !x.is_wanted() as usize;
Some(x.map_entry(RevWalkIndexEntry::into_index_entry)) Some(x)
} else { } else {
None None
} }
} }
fn pop_eq(&mut self, entry: &IndexEntry<'_>) -> Option<RevWalkWorkItem<IndexEntry<'a>, T>> { fn pop_eq(&mut self, pos: &I::Position) -> Option<RevWalkWorkItem<I::Position, T>> {
if let Some(x) = self.items.peek() { if let Some(x) = self.items.peek() {
(x.entry.as_index_entry() == entry).then(|| self.pop().unwrap()) (x.pos == *pos).then(|| self.pop().unwrap())
} else { } else {
None None
} }
} }
fn skip_while_eq(&mut self, entry: &IndexEntry<'_>) { fn skip_while_eq(&mut self, pos: &I::Position) {
while self.pop_eq(entry).is_some() { while self.pop_eq(pos).is_some() {
continue; continue;
} }
} }
@ -1395,7 +1365,7 @@ impl<'a> RevWalk<'a> {
for &pos in root_positions { for &pos in root_positions {
// Do not add unreachable roots which shouldn't be visited // Do not add unreachable roots which shouldn't be visited
if queue.index.contains_pos(pos) { if queue.index.contains_pos(pos) {
queue.push_wanted(pos, ()); queue.push_wanted(Reverse(pos), ());
} }
} }
RevWalkDescendantsGenerationRange(RevWalkGenerationRangeImpl::new(queue, generation_range)) RevWalkDescendantsGenerationRange(RevWalkGenerationRangeImpl::new(queue, generation_range))
@ -1418,16 +1388,18 @@ struct RevWalkImpl<'a, I: RevWalkIndex<'a>> {
impl<'a, I: RevWalkIndex<'a>> RevWalkImpl<'a, I> { impl<'a, I: RevWalkIndex<'a>> RevWalkImpl<'a, I> {
fn next(&mut self) -> Option<IndexEntry<'a>> { fn next(&mut self) -> Option<IndexEntry<'a>> {
while let Some(item) = self.queue.pop() { while let Some(item) = self.queue.pop() {
self.queue.skip_while_eq(&item.entry); self.queue.skip_while_eq(&item.pos);
if item.is_wanted() { if item.is_wanted() {
self.queue.push_wanted_adjacents(&item.entry, ()); let entry = self.queue.index.entry_by_pos(item.pos);
return Some(item.entry); self.queue.push_wanted_adjacents(&entry, ());
return Some(entry);
} else if self.queue.items.len() == self.queue.unwanted_count { } else if self.queue.items.len() == self.queue.unwanted_count {
// No more wanted entries to walk // No more wanted entries to walk
debug_assert!(!self.queue.items.iter().any(|x| x.is_wanted())); debug_assert!(!self.queue.items.iter().any(|x| x.is_wanted()));
return None; return None;
} else { } else {
self.queue.push_unwanted_adjacents(&item.entry); let entry = self.queue.index.entry_by_pos(item.pos);
self.queue.push_unwanted_adjacents(&entry);
} }
} }
@ -1509,8 +1481,9 @@ impl<'a, I: RevWalkIndex<'a>> RevWalkGenerationRangeImpl<'a, I> {
fn next(&mut self) -> Option<IndexEntry<'a>> { fn next(&mut self) -> Option<IndexEntry<'a>> {
while let Some(item) = self.queue.pop() { while let Some(item) = self.queue.pop() {
if let RevWalkWorkItemState::Wanted(Reverse(mut pending_gen)) = item.state { if let RevWalkWorkItemState::Wanted(Reverse(mut pending_gen)) = item.state {
let entry = self.queue.index.entry_by_pos(item.pos);
let mut some_in_range = pending_gen.contains_end(self.generation_end); let mut some_in_range = pending_gen.contains_end(self.generation_end);
while let Some(x) = self.queue.pop_eq(&item.entry) { while let Some(x) = self.queue.pop_eq(&item.pos) {
// Merge overlapped ranges to reduce number of the queued items. // Merge overlapped ranges to reduce number of the queued items.
// For queries like `:(heads-)`, `gen.end` is close to `u32::MAX`, so // For queries like `:(heads-)`, `gen.end` is close to `u32::MAX`, so
// ranges can be merged into one. If this is still slow, maybe we can add // ranges can be merged into one. If this is still slow, maybe we can add
@ -1520,24 +1493,25 @@ impl<'a, I: RevWalkIndex<'a>> RevWalkGenerationRangeImpl<'a, I> {
pending_gen = if let Some(merged) = pending_gen.try_merge_end(gen) { pending_gen = if let Some(merged) = pending_gen.try_merge_end(gen) {
merged merged
} else { } else {
self.enqueue_wanted_adjacents(&item.entry, pending_gen); self.enqueue_wanted_adjacents(&entry, pending_gen);
gen gen
}; };
} else { } else {
unreachable!("no more unwanted items of the same entry"); unreachable!("no more unwanted items of the same entry");
} }
} }
self.enqueue_wanted_adjacents(&item.entry, pending_gen); self.enqueue_wanted_adjacents(&entry, pending_gen);
if some_in_range { if some_in_range {
return Some(item.entry); return Some(entry);
} }
} else if self.queue.items.len() == self.queue.unwanted_count { } else if self.queue.items.len() == self.queue.unwanted_count {
// No more wanted entries to walk // No more wanted entries to walk
debug_assert!(!self.queue.items.iter().any(|x| x.is_wanted())); debug_assert!(!self.queue.items.iter().any(|x| x.is_wanted()));
return None; return None;
} else { } else {
self.queue.skip_while_eq(&item.entry); let entry = self.queue.index.entry_by_pos(item.pos);
self.queue.push_unwanted_adjacents(&item.entry); self.queue.skip_while_eq(&item.pos);
self.queue.push_unwanted_adjacents(&entry);
} }
} }