fix: make directly apply faster

This commit is contained in:
Zixuan Chen 2022-11-17 23:17:36 +08:00
parent f5ae229ca3
commit e7b1148c8a
6 changed files with 67 additions and 19 deletions

View file

@ -230,8 +230,7 @@ impl<'a, T: DagNode + 'a, D: Dag<Node = T>> Iterator for DagCausalIter<'a, D> {
return None;
}
let node_id = self.heap.pop().unwrap();
let node_id = node_id.id;
let node_id = self.heap.pop().unwrap().id;
let target_span = self.target.get_mut(&node_id.client_id).unwrap();
debug_assert_eq!(
node_id.counter,

View file

@ -187,6 +187,18 @@ impl LogStore {
if are_frontiers_eq(&common_ancestors, &self.frontiers) {
// we may apply changes directly into state
let target_spans = next_vv.diff(&self.vv).left;
if target_spans.len() == 1 {
let (client_id, span) = target_spans.iter().next().unwrap();
for op in
self.iter_ops_at_id_span(IdSpan::new(*client_id, span.start, span.end))
{
let container = container_map.get_mut(&op.op().container).unwrap();
container.update_state_directly(&op);
}
break 'apply;
}
// TODO: can reuse this path
let causal_visit_path: Vec<_> =
self.iter_causal(&common_ancestors, target_spans).collect();
@ -475,13 +487,8 @@ impl LogStore {
}
}
pub(crate) fn iter_ops_at_id_span(
&self,
id_span: IdSpan,
container: ContainerID,
) -> iter::OpSpanIter<'_> {
let idx = self.get_container_idx(&container).unwrap();
iter::OpSpanIter::new(&self.changes, id_span, idx)
pub(crate) fn iter_ops_at_id_span(&self, id_span: IdSpan) -> iter::OpSpanIter<'_> {
iter::OpSpanIter::new(&self.changes, id_span)
}
#[inline(always)]

View file

@ -46,7 +46,6 @@ pub struct OpSpanIter<'a> {
changes: &'a [Change],
change_index: usize,
op_index: usize,
container: ContainerIdx,
span: IdSpan,
}
@ -54,7 +53,6 @@ impl<'a> OpSpanIter<'a> {
pub fn new(
changes: &'a FxHashMap<ClientID, RleVecWithIndex<Change, ChangeMergeCfg>>,
target_span: IdSpan,
container: ContainerIdx,
) -> Self {
let rle_changes = changes.get(&target_span.client_id).unwrap();
let changes = rle_changes.vec();
@ -65,7 +63,6 @@ impl<'a> OpSpanIter<'a> {
Self {
span: target_span,
container,
changes,
change_index,
op_index: rle_changes[change_index]
@ -94,14 +91,17 @@ impl<'a> Iterator for OpSpanIter<'a> {
}
self.op_index += 1;
if op.container != self.container {
continue;
let op = RichOp::new_by_slice_on_change(
change,
op,
self.span.counter.min(),
self.span.counter.end(),
);
if op.atom_len() == 0 {
return None;
} else {
return Some(op);
}
let start = (self.span.counter.min() - op.counter).max(0) as usize;
let end = ((self.span.counter.end() - op.counter) as usize).min(op.atom_len());
assert!(start < end, "{:?} {:#?}", self.span, op);
return Some(RichOp::new_by_change(change, op));
} else {
self.op_index = 0;
self.change_index += 1;

View file

@ -1,5 +1,6 @@
use ctor::ctor;
pub mod tests;
use loro_core::container::registry::ContainerWrapper;
use loro_core::{LoroCore, LoroValue};

View file

@ -0,0 +1 @@
pub mod task;

View file

@ -0,0 +1,40 @@
use std::io::Read;
use flate2::read::GzDecoder;
use loro_core::container::registry::ContainerWrapper;
use loro_core::LoroCore;
use serde_json::Value;
const RAW_DATA: &[u8; 901823] = include_bytes!("../../benches/automerge-paper.json.gz");
#[test]
pub fn automerge_direct_sync() {
let mut d = GzDecoder::new(&RAW_DATA[..]);
let mut s = String::new();
d.read_to_string(&mut s).unwrap();
let json: Value = serde_json::from_str(&s).unwrap();
let txns = json.as_object().unwrap().get("txns");
let mut loro = LoroCore::default();
let mut loro_b = LoroCore::default();
for txn in txns.unwrap().as_array().unwrap() {
let text = loro.get_text("text");
text.with_container(|text| {
let patches = txn
.as_object()
.unwrap()
.get("patches")
.unwrap()
.as_array()
.unwrap();
for patch in patches {
let pos = patch[0].as_u64().unwrap() as usize;
let del_here = patch[1].as_u64().unwrap() as usize;
let ins_content = patch[2].as_str().unwrap();
text.delete(&loro, pos, del_here);
text.insert(&loro, pos, ins_content);
}
});
loro_b.import(loro.export(loro_b.vv()));
}
}