mirror of
https://github.com/loro-dev/loro.git
synced 2025-02-05 12:14:43 +00:00
feat: add exportJsonInIdSpan and make peer compression optional (#602)
This change will make the internal operations more accessible to the application code. * feat: add exportJsonInIdSpan and make peer compression optional - Introduced `export_json_updates_without_peer_compression` method to allow exporting JSON updates without compressing peer IDs, making it easier for application code to process. - Updated existing `export_json_updates` method to accept a `with_peer_compression` parameter, defaulting to true. - Refactored related code in various files to accommodate the new functionality, ensuring backward compatibility. * fix: slice err & add tests * chore: changeset
This commit is contained in:
parent
46bab49281
commit
ac51ceb2f9
14 changed files with 451 additions and 74 deletions
5
.changeset/popular-ghosts-travel.md
Normal file
5
.changeset/popular-ghosts-travel.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
"loro-crdt": minor
|
||||
---
|
||||
|
||||
feat: add exportJsonInIdSpan and make peer compression optional
|
|
@ -111,12 +111,12 @@ mod run {
|
|||
b.bench_function("B4_encode_json_update", |b| {
|
||||
ensure_ran();
|
||||
b.iter(|| {
|
||||
let _ = loro.export_json_updates(&Default::default(), &loro.oplog_vv());
|
||||
let _ = loro.export_json_updates(&Default::default(), &loro.oplog_vv(), true);
|
||||
})
|
||||
});
|
||||
b.bench_function("B4_decode_json_update", |b| {
|
||||
ensure_ran();
|
||||
let json = loro.export_json_updates(&Default::default(), &loro.oplog_vv());
|
||||
let json = loro.export_json_updates(&Default::default(), &loro.oplog_vv(), true);
|
||||
b.iter(|| {
|
||||
let store2 = LoroDoc::default();
|
||||
store2.import_json_updates(json.clone()).unwrap();
|
||||
|
|
|
@ -79,9 +79,12 @@ fn main() {
|
|||
output.len(),
|
||||
);
|
||||
|
||||
let json_updates =
|
||||
serde_json::to_string(&loro.export_json_updates(&Default::default(), &loro.oplog_vv()))
|
||||
.unwrap();
|
||||
let json_updates = serde_json::to_string(&loro.export_json_updates(
|
||||
&Default::default(),
|
||||
&loro.oplog_vv(),
|
||||
true,
|
||||
))
|
||||
.unwrap();
|
||||
let output = miniz_oxide::deflate::compress_to_vec(json_updates.as_bytes(), 6);
|
||||
println!(
|
||||
"json updates size {} after compression {}",
|
||||
|
|
|
@ -23,9 +23,12 @@ fn log_size() {
|
|||
txn.commit().unwrap();
|
||||
let snapshot = loro.export_snapshot().unwrap();
|
||||
let updates = loro.export_from(&Default::default());
|
||||
let json_updates =
|
||||
serde_json::to_string(&loro.export_json_updates(&Default::default(), &loro.oplog_vv()))
|
||||
.unwrap();
|
||||
let json_updates = serde_json::to_string(&loro.export_json_updates(
|
||||
&Default::default(),
|
||||
&loro.oplog_vv(),
|
||||
true,
|
||||
))
|
||||
.unwrap();
|
||||
println!("\n");
|
||||
println!("Snapshot size={}", snapshot.len());
|
||||
println!("Updates size={}", updates.len());
|
||||
|
|
|
@ -19,8 +19,8 @@ use crate::{
|
|||
use either::Either;
|
||||
use json::{JsonOpContent, JsonSchema};
|
||||
use loro_common::{
|
||||
ContainerID, ContainerType, HasCounterSpan, HasIdSpan, IdLp, LoroError, LoroResult, LoroValue,
|
||||
PeerID, TreeID, ID,
|
||||
ContainerID, ContainerType, HasCounterSpan, HasId, HasIdSpan, IdLp, IdSpan, LoroError,
|
||||
LoroResult, LoroValue, PeerID, TreeID, ID,
|
||||
};
|
||||
use rle::{HasLength, RleVec, Sliceable};
|
||||
use std::sync::Arc;
|
||||
|
@ -47,23 +47,70 @@ pub(crate) fn export_json<'a, 'c: 'a>(
|
|||
oplog: &'c OpLog,
|
||||
start_vv: &VersionVector,
|
||||
end_vv: &VersionVector,
|
||||
with_peer_compression: bool,
|
||||
) -> JsonSchema {
|
||||
let actual_start_vv = refine_vv(start_vv, oplog);
|
||||
let actual_end_vv = refine_vv(end_vv, oplog);
|
||||
|
||||
let frontiers = oplog.dag.vv_to_frontiers(&actual_start_vv);
|
||||
|
||||
let mut peer_register = ValueRegister::<PeerID>::new();
|
||||
let diff_changes = init_encode(oplog, &actual_start_vv, &actual_end_vv);
|
||||
let changes = encode_changes(&diff_changes, &oplog.arena, &mut peer_register);
|
||||
JsonSchema {
|
||||
changes,
|
||||
schema_version: SCHEMA_VERSION,
|
||||
peers: peer_register.unwrap_vec(),
|
||||
start_version: frontiers,
|
||||
if with_peer_compression {
|
||||
let mut peer_register = ValueRegister::<PeerID>::new();
|
||||
let changes = encode_changes(&diff_changes, &oplog.arena, Some(&mut peer_register));
|
||||
JsonSchema {
|
||||
changes,
|
||||
schema_version: SCHEMA_VERSION,
|
||||
peers: Some(peer_register.unwrap_vec()),
|
||||
start_version: frontiers,
|
||||
}
|
||||
} else {
|
||||
let changes = encode_changes(&diff_changes, &oplog.arena, None);
|
||||
JsonSchema {
|
||||
changes,
|
||||
schema_version: SCHEMA_VERSION,
|
||||
peers: None,
|
||||
start_version: frontiers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn export_json_in_id_span(oplog: &OpLog, mut id_span: IdSpan) -> Vec<json::JsonChange> {
|
||||
id_span.normalize_();
|
||||
let end = oplog.vv().get(&id_span.peer).copied().unwrap_or(0);
|
||||
if id_span.counter.start >= end {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
id_span.counter.end = id_span.counter.end.min(end);
|
||||
let mut diff_changes: Vec<Either<BlockChangeRef, Change>> = Vec::new();
|
||||
while id_span.counter.end - id_span.counter.start > 0 {
|
||||
let change: BlockChangeRef = oplog.get_change_at(id_span.id_start()).unwrap();
|
||||
let ctr_end = change.ctr_end();
|
||||
if change.id.counter >= id_span.counter.start && change.ctr_end() <= id_span.counter.end {
|
||||
diff_changes.push(Either::Left(change));
|
||||
} else {
|
||||
let start = if change.id.counter < id_span.counter.start {
|
||||
(id_span.counter.start - change.id.counter) as usize
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let end = if change.ctr_end() > id_span.counter.end {
|
||||
(id_span.counter.end - change.id.counter) as usize
|
||||
} else {
|
||||
change.atom_len()
|
||||
};
|
||||
|
||||
diff_changes.push(Either::Right(change.slice(start, end)));
|
||||
}
|
||||
|
||||
id_span.counter.start = ctr_end;
|
||||
}
|
||||
|
||||
encode_changes(&diff_changes, &oplog.arena, None)
|
||||
}
|
||||
|
||||
pub(crate) fn import_json(oplog: &mut OpLog, json: JsonSchema) -> LoroResult<ImportStatus> {
|
||||
let changes = decode_changes(json, &oplog.arena)?;
|
||||
let ImportChangesResult {
|
||||
|
@ -124,28 +171,38 @@ fn init_encode<'s, 'a: 's>(
|
|||
diff_changes
|
||||
}
|
||||
|
||||
fn register_id(id: &ID, peer_register: &mut ValueRegister<PeerID>) -> ID {
|
||||
let peer = peer_register.register(&id.peer);
|
||||
fn register_id(id: &ID, peer_register: Option<&mut ValueRegister<PeerID>>) -> ID {
|
||||
let peer = match peer_register {
|
||||
Some(peer_register) => peer_register.register(&id.peer) as PeerID,
|
||||
None => id.peer,
|
||||
};
|
||||
ID::new(peer as PeerID, id.counter)
|
||||
}
|
||||
|
||||
fn register_idlp(idlp: &IdLp, peer_register: &mut ValueRegister<PeerID>) -> IdLp {
|
||||
fn register_idlp(idlp: &IdLp, peer_register: Option<&mut ValueRegister<PeerID>>) -> IdLp {
|
||||
let peer = match peer_register {
|
||||
Some(peer_register) => peer_register.register(&idlp.peer) as PeerID,
|
||||
None => idlp.peer,
|
||||
};
|
||||
IdLp {
|
||||
peer: peer_register.register(&idlp.peer) as PeerID,
|
||||
peer,
|
||||
lamport: idlp.lamport,
|
||||
}
|
||||
}
|
||||
|
||||
fn register_tree_id(tree: &TreeID, peer_register: &mut ValueRegister<PeerID>) -> TreeID {
|
||||
fn register_tree_id(tree: &TreeID, peer_register: Option<&mut ValueRegister<PeerID>>) -> TreeID {
|
||||
TreeID {
|
||||
peer: peer_register.register(&tree.peer) as PeerID,
|
||||
peer: match peer_register {
|
||||
Some(peer_register) => peer_register.register(&tree.peer) as PeerID,
|
||||
None => tree.peer,
|
||||
},
|
||||
counter: tree.counter,
|
||||
}
|
||||
}
|
||||
|
||||
fn register_container_id(
|
||||
container: ContainerID,
|
||||
peer_register: &mut ValueRegister<PeerID>,
|
||||
peer_register: Option<&mut ValueRegister<PeerID>>,
|
||||
) -> ContainerID {
|
||||
match container {
|
||||
ContainerID::Normal {
|
||||
|
@ -153,7 +210,10 @@ fn register_container_id(
|
|||
counter,
|
||||
container_type,
|
||||
} => ContainerID::Normal {
|
||||
peer: peer_register.register(&peer) as PeerID,
|
||||
peer: match peer_register {
|
||||
Some(peer_register) => peer_register.register(&peer) as PeerID,
|
||||
None => peer,
|
||||
},
|
||||
counter,
|
||||
container_type,
|
||||
},
|
||||
|
@ -161,14 +221,14 @@ fn register_container_id(
|
|||
}
|
||||
}
|
||||
|
||||
fn convert_container_id(container: ContainerID, peers: &[PeerID]) -> ContainerID {
|
||||
fn convert_container_id(container: ContainerID, peers: &Option<Vec<PeerID>>) -> ContainerID {
|
||||
match container {
|
||||
ContainerID::Normal {
|
||||
peer,
|
||||
counter,
|
||||
container_type,
|
||||
} => ContainerID::Normal {
|
||||
peer: peers[peer as usize],
|
||||
peer: get_peer_from_peers(peers, peer),
|
||||
counter,
|
||||
container_type,
|
||||
},
|
||||
|
@ -176,23 +236,30 @@ fn convert_container_id(container: ContainerID, peers: &[PeerID]) -> ContainerID
|
|||
}
|
||||
}
|
||||
|
||||
fn convert_id(id: &ID, peers: &[PeerID]) -> ID {
|
||||
pub(crate) fn get_peer_from_peers(peers: &Option<Vec<PeerID>>, peer: PeerID) -> PeerID {
|
||||
match peers {
|
||||
Some(peers) => peers[peer as usize],
|
||||
None => peer,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_id(id: &ID, peers: &Option<Vec<PeerID>>) -> ID {
|
||||
ID {
|
||||
peer: peers[id.peer as usize],
|
||||
peer: get_peer_from_peers(peers, id.peer),
|
||||
counter: id.counter,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_idlp(idlp: &IdLp, peers: &[PeerID]) -> IdLp {
|
||||
fn convert_idlp(idlp: &IdLp, peers: &Option<Vec<PeerID>>) -> IdLp {
|
||||
IdLp {
|
||||
lamport: idlp.lamport,
|
||||
peer: peers[idlp.peer as usize],
|
||||
peer: get_peer_from_peers(peers, idlp.peer),
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_tree_id(tree: &TreeID, peers: &[PeerID]) -> TreeID {
|
||||
fn convert_tree_id(tree: &TreeID, peers: &Option<Vec<PeerID>>) -> TreeID {
|
||||
TreeID {
|
||||
peer: peers[tree.peer as usize],
|
||||
peer: get_peer_from_peers(peers, tree.peer),
|
||||
counter: tree.counter,
|
||||
}
|
||||
}
|
||||
|
@ -200,7 +267,7 @@ fn convert_tree_id(tree: &TreeID, peers: &[PeerID]) -> TreeID {
|
|||
fn encode_changes(
|
||||
diff_changes: &[Either<BlockChangeRef, Change>],
|
||||
arena: &SharedArena,
|
||||
peer_register: &mut ValueRegister<PeerID>,
|
||||
mut peer_register: Option<&mut ValueRegister<PeerID>>,
|
||||
) -> Vec<json::JsonChange> {
|
||||
let mut changes = Vec::with_capacity(diff_changes.len());
|
||||
for change in diff_changes.iter() {
|
||||
|
@ -217,7 +284,7 @@ fn encode_changes(
|
|||
{
|
||||
let mut container = arena.get_container_id(*container).unwrap();
|
||||
if container.is_normal() {
|
||||
container = register_container_id(container, peer_register);
|
||||
container = register_container_id(container, peer_register.as_deref_mut());
|
||||
}
|
||||
let op = match container.container_type() {
|
||||
ContainerType::List => match content {
|
||||
|
@ -228,7 +295,10 @@ fn encode_changes(
|
|||
values.iter_mut().for_each(|x| {
|
||||
if let LoroValue::Container(id) = x {
|
||||
if id.is_normal() {
|
||||
*id = register_container_id(id.clone(), peer_register);
|
||||
*id = register_container_id(
|
||||
id.clone(),
|
||||
peer_register.as_deref_mut(),
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -243,7 +313,7 @@ fn encode_changes(
|
|||
}) => json::ListOp::Delete {
|
||||
pos: *pos as i32,
|
||||
len: *signed_len as i32,
|
||||
start_id: register_id(id_start, peer_register),
|
||||
start_id: register_id(id_start, peer_register.as_deref_mut()),
|
||||
},
|
||||
_ => unreachable!(),
|
||||
}),
|
||||
|
@ -257,7 +327,10 @@ fn encode_changes(
|
|||
values.iter_mut().for_each(|x| {
|
||||
if let LoroValue::Container(id) = x {
|
||||
if id.is_normal() {
|
||||
*id = register_container_id(id.clone(), peer_register);
|
||||
*id = register_container_id(
|
||||
id.clone(),
|
||||
peer_register.as_deref_mut(),
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -272,7 +345,7 @@ fn encode_changes(
|
|||
}) => json::MovableListOp::Delete {
|
||||
pos: *pos as i32,
|
||||
len: *signed_len as i32,
|
||||
start_id: register_id(id_start, peer_register),
|
||||
start_id: register_id(id_start, peer_register.as_deref_mut()),
|
||||
},
|
||||
InnerListOp::Move {
|
||||
from,
|
||||
|
@ -281,14 +354,14 @@ fn encode_changes(
|
|||
} => json::MovableListOp::Move {
|
||||
from: *from,
|
||||
to: *to,
|
||||
elem_id: register_idlp(from_id, peer_register),
|
||||
elem_id: register_idlp(from_id, peer_register.as_deref_mut()),
|
||||
},
|
||||
InnerListOp::Set { elem_id, value } => {
|
||||
let value = if let LoroValue::Container(id) = value {
|
||||
if id.is_normal() {
|
||||
LoroValue::Container(register_container_id(
|
||||
id.clone(),
|
||||
peer_register,
|
||||
peer_register.as_deref_mut(),
|
||||
))
|
||||
} else {
|
||||
value.clone()
|
||||
|
@ -297,7 +370,7 @@ fn encode_changes(
|
|||
value.clone()
|
||||
};
|
||||
json::MovableListOp::Set {
|
||||
elem_id: register_idlp(elem_id, peer_register),
|
||||
elem_id: register_idlp(elem_id, peer_register.as_deref_mut()),
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
@ -322,7 +395,7 @@ fn encode_changes(
|
|||
}) => json::TextOp::Delete {
|
||||
pos: *pos as i32,
|
||||
len: *signed_len as i32,
|
||||
start_id: register_id(id_start, peer_register),
|
||||
start_id: register_id(id_start, peer_register.as_deref_mut()),
|
||||
},
|
||||
InnerListOp::StyleStart {
|
||||
start,
|
||||
|
@ -349,7 +422,7 @@ fn encode_changes(
|
|||
if id.is_normal() {
|
||||
LoroValue::Container(register_container_id(
|
||||
id.clone(),
|
||||
peer_register,
|
||||
peer_register.as_deref_mut(),
|
||||
))
|
||||
} else {
|
||||
v.clone()
|
||||
|
@ -378,8 +451,9 @@ fn encode_changes(
|
|||
parent,
|
||||
position,
|
||||
} => json::TreeOp::Create {
|
||||
target: register_tree_id(target, peer_register),
|
||||
parent: parent.map(|p| register_tree_id(&p, peer_register)),
|
||||
target: register_tree_id(target, peer_register.as_deref_mut()),
|
||||
parent: parent
|
||||
.map(|p| register_tree_id(&p, peer_register.as_deref_mut())),
|
||||
fractional_index: position.clone(),
|
||||
},
|
||||
TreeOp::Move {
|
||||
|
@ -387,12 +461,13 @@ fn encode_changes(
|
|||
parent,
|
||||
position,
|
||||
} => json::TreeOp::Move {
|
||||
target: register_tree_id(target, peer_register),
|
||||
parent: parent.map(|p| register_tree_id(&p, peer_register)),
|
||||
target: register_tree_id(target, peer_register.as_deref_mut()),
|
||||
parent: parent
|
||||
.map(|p| register_tree_id(&p, peer_register.as_deref_mut())),
|
||||
fractional_index: position.clone(),
|
||||
},
|
||||
TreeOp::Delete { target } => json::TreeOp::Delete {
|
||||
target: register_tree_id(target, peer_register),
|
||||
target: register_tree_id(target, peer_register.as_deref_mut()),
|
||||
},
|
||||
}),
|
||||
_ => unreachable!(),
|
||||
|
@ -430,12 +505,12 @@ fn encode_changes(
|
|||
});
|
||||
}
|
||||
let c = json::JsonChange {
|
||||
id: register_id(&change.id, peer_register),
|
||||
id: register_id(&change.id, peer_register.as_deref_mut()),
|
||||
ops,
|
||||
deps: change
|
||||
.deps
|
||||
.iter()
|
||||
.map(|id| register_id(&id, peer_register))
|
||||
.map(|id| register_id(&id, peer_register.as_deref_mut()))
|
||||
.collect(),
|
||||
lamport: change.lamport,
|
||||
timestamp: change.timestamp,
|
||||
|
@ -478,7 +553,7 @@ fn decode_changes(json: JsonSchema, arena: &SharedArena) -> LoroResult<Vec<Chang
|
|||
Ok(ans)
|
||||
}
|
||||
|
||||
fn decode_op(op: json::JsonOp, arena: &SharedArena, peers: &[PeerID]) -> LoroResult<Op> {
|
||||
fn decode_op(op: json::JsonOp, arena: &SharedArena, peers: &Option<Vec<PeerID>>) -> LoroResult<Op> {
|
||||
let json::JsonOp {
|
||||
counter,
|
||||
container,
|
||||
|
@ -724,7 +799,7 @@ pub mod json {
|
|||
use serde::{Deserialize, Serialize};
|
||||
use std::ops::Range;
|
||||
|
||||
use super::redact_value;
|
||||
use super::{get_peer_from_peers, redact_value};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct JsonSchema {
|
||||
|
@ -732,7 +807,7 @@ pub mod json {
|
|||
#[serde(with = "self::serde_impl::frontiers")]
|
||||
pub start_version: Frontiers,
|
||||
#[serde(with = "self::serde_impl::peer_id")]
|
||||
pub peers: Vec<PeerID>,
|
||||
pub peers: Option<Vec<PeerID>>,
|
||||
pub changes: Vec<JsonChange>,
|
||||
}
|
||||
|
||||
|
@ -1123,19 +1198,22 @@ pub mod json {
|
|||
use loro_common::PeerID;
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<S>(peers: &[PeerID], s: S) -> Result<S::Ok, S::Error>
|
||||
pub fn serialize<S>(peers: &Option<Vec<PeerID>>, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
s.collect_seq(peers.iter().map(|x| x.to_string()))
|
||||
match peers {
|
||||
Some(peers) => s.collect_seq(peers.iter().map(|x| x.to_string())),
|
||||
None => s.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, 'a, D>(d: D) -> Result<Vec<PeerID>, D::Error>
|
||||
pub fn deserialize<'de, 'a, D>(d: D) -> Result<Option<Vec<PeerID>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let peers: Vec<String> = Deserialize::deserialize(d)?;
|
||||
Ok(peers.into_iter().map(|x| x.parse().unwrap()).collect())
|
||||
let peers: Option<Vec<String>> = Deserialize::deserialize(d)?;
|
||||
Ok(peers.map(|x| x.into_iter().map(|x| x.parse().unwrap()).collect()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1258,7 +1336,7 @@ pub mod json {
|
|||
let peers = json.peers.clone();
|
||||
let mut errors = Vec::new();
|
||||
for change in json.changes.iter_mut() {
|
||||
let real_peer = peers[change.id.peer as usize];
|
||||
let real_peer = get_peer_from_peers(&peers, change.id.peer);
|
||||
let real_id = ID::new(real_peer, change.id.counter);
|
||||
if !range.has_overlap_with(real_id.to_span(change.op_len())) {
|
||||
continue;
|
||||
|
@ -1400,11 +1478,13 @@ mod tests {
|
|||
let json = doc.export_json_updates(
|
||||
&VersionVector::from_iter(vec![(0, 1)]),
|
||||
&VersionVector::from_iter(vec![(0, 2)]),
|
||||
true,
|
||||
);
|
||||
assert_eq!(json.changes[0].ops.len(), 1);
|
||||
let json = doc.export_json_updates(
|
||||
&VersionVector::from_iter(vec![(0, 0)]),
|
||||
&VersionVector::from_iter(vec![(0, 2)]),
|
||||
true,
|
||||
);
|
||||
assert_eq!(json.changes[0].ops.len(), 2);
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ use crate::{
|
|||
event::{str_to_path, EventTriggerKind, Index, InternalDocDiff},
|
||||
handler::{Handler, MovableListHandler, TextHandler, TreeHandler, ValueOrHandler},
|
||||
id::PeerID,
|
||||
json::JsonChange,
|
||||
op::InnerContent,
|
||||
oplog::{loro_dag::FrontiersNotIncluded, OpLog},
|
||||
state::DocState,
|
||||
|
@ -570,10 +571,25 @@ impl LoroDoc {
|
|||
&self,
|
||||
start_vv: &VersionVector,
|
||||
end_vv: &VersionVector,
|
||||
with_peer_compression: bool,
|
||||
) -> JsonSchema {
|
||||
self.commit_then_stop();
|
||||
let oplog = self.oplog.try_lock().unwrap();
|
||||
let json = crate::encoding::json_schema::export_json(&oplog, start_vv, end_vv);
|
||||
let json = crate::encoding::json_schema::export_json(
|
||||
&oplog,
|
||||
start_vv,
|
||||
end_vv,
|
||||
with_peer_compression,
|
||||
);
|
||||
drop(oplog);
|
||||
self.renew_txn_if_auto_commit();
|
||||
json
|
||||
}
|
||||
|
||||
pub fn export_json_in_id_span(&self, id_span: IdSpan) -> Vec<JsonChange> {
|
||||
self.commit_then_stop();
|
||||
let oplog = self.oplog.try_lock().unwrap();
|
||||
let json = crate::encoding::json_schema::export_json_in_id_span(&oplog, id_span);
|
||||
drop(oplog);
|
||||
self.renew_txn_if_auto_commit();
|
||||
json
|
||||
|
|
|
@ -702,7 +702,7 @@ mod test {
|
|||
println!("Snapshot bytes {:?}", dev_utils::ByteSize(bytes.length()));
|
||||
// assert!(bytes.len() < 30);
|
||||
|
||||
let json = doc.export_json_updates(&Default::default(), &doc.oplog_vv());
|
||||
let json = doc.export_json_updates(&Default::default(), &doc.oplog_vv(), true);
|
||||
let json_string = serde_json::to_string(&json.changes).unwrap();
|
||||
println!(
|
||||
"JSON string bytes {:?}",
|
||||
|
|
|
@ -947,7 +947,7 @@ fn counter() {
|
|||
counter.increment(1.).unwrap();
|
||||
counter.increment(2.).unwrap();
|
||||
counter.decrement(1.).unwrap();
|
||||
let json = doc.export_json_updates(&Default::default(), &doc.oplog_vv());
|
||||
let json = doc.export_json_updates(&Default::default(), &doc.oplog_vv(), true);
|
||||
let doc2 = LoroDoc::new_auto_commit();
|
||||
doc2.import_json_updates(json).unwrap();
|
||||
}
|
||||
|
|
|
@ -6,12 +6,12 @@ use loro_internal::encoding::{ImportBlobMetadata, ImportStatus};
|
|||
use loro_internal::event::Diff;
|
||||
use loro_internal::handler::{Handler, ValueOrHandler};
|
||||
use loro_internal::version::VersionRange;
|
||||
use loro_internal::{CounterSpan, ListDiffItem, LoroDoc, LoroValue};
|
||||
use loro_internal::{Counter, CounterSpan, IdSpan, ListDiffItem, LoroDoc, LoroValue};
|
||||
use wasm_bindgen::JsValue;
|
||||
|
||||
use crate::{
|
||||
frontiers_to_ids, Container, Cursor, JsContainer, JsImportBlobMetadata, LoroCounter, LoroList,
|
||||
LoroMap, LoroMovableList, LoroText, LoroTree, VersionVector,
|
||||
frontiers_to_ids, Container, Cursor, JsContainer, JsIdSpan, JsImportBlobMetadata, LoroCounter,
|
||||
LoroList, LoroMap, LoroMovableList, LoroText, LoroTree, VersionVector,
|
||||
};
|
||||
use wasm_bindgen::__rt::IntoJsResult;
|
||||
use wasm_bindgen::convert::RefFromWasmAbi;
|
||||
|
@ -79,6 +79,22 @@ pub(crate) fn js_to_container(js: JsContainer) -> Result<Container, JsValue> {
|
|||
Ok(container)
|
||||
}
|
||||
|
||||
pub(crate) fn js_to_id_span(js: JsIdSpan) -> Result<IdSpan, JsValue> {
|
||||
let value: JsValue = js.into();
|
||||
let peer = Reflect::get(&value, &JsValue::from_str("peer"))?
|
||||
.as_string()
|
||||
.unwrap()
|
||||
.parse::<u64>()
|
||||
.unwrap();
|
||||
let counter = Reflect::get(&value, &JsValue::from_str("counter"))?
|
||||
.as_f64()
|
||||
.unwrap() as Counter;
|
||||
let length = Reflect::get(&value, &JsValue::from_str("length"))?
|
||||
.as_f64()
|
||||
.unwrap() as Counter;
|
||||
Ok(IdSpan::new(peer, counter, counter + length))
|
||||
}
|
||||
|
||||
pub(crate) fn js_to_version_vector(
|
||||
js: JsValue,
|
||||
) -> Result<wasm_bindgen::__rt::Ref<'static, VersionVector>, JsValue> {
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
#![allow(clippy::doc_lazy_continuation)]
|
||||
// #![warn(missing_docs)]
|
||||
|
||||
use convert::{import_status_to_js_value, js_to_version_vector, resolved_diff_to_js};
|
||||
use convert::{
|
||||
import_status_to_js_value, js_to_id_span, js_to_version_vector, resolved_diff_to_js,
|
||||
};
|
||||
use js_sys::{Array, Object, Promise, Reflect, Uint8Array};
|
||||
use loro_internal::{
|
||||
change::Lamport,
|
||||
|
@ -206,6 +208,8 @@ extern "C" {
|
|||
pub type JsLoroTreeValue;
|
||||
#[wasm_bindgen(typescript_type = "Record<string, ContainerID>")]
|
||||
pub type JsLoroRootShallowValue;
|
||||
#[wasm_bindgen(typescript_type = "{ peer: PeerID, counter: number, length: number }")]
|
||||
pub type JsIdSpan;
|
||||
}
|
||||
|
||||
mod observer {
|
||||
|
@ -1222,6 +1226,7 @@ impl LoroDoc {
|
|||
&self,
|
||||
start_vv: JsValue,
|
||||
end_vv: JsValue,
|
||||
with_peer_compression: Option<bool>,
|
||||
) -> JsResult<JsJsonSchema> {
|
||||
let mut json_start_vv: &InternalVersionVector = &Default::default();
|
||||
let temp_start_vv: Option<wasm_bindgen::__rt::Ref<'static, VersionVector>>;
|
||||
|
@ -1235,7 +1240,11 @@ impl LoroDoc {
|
|||
temp_end_vv = Some(js_to_version_vector(end_vv)?);
|
||||
json_end_vv = &temp_end_vv.as_ref().unwrap().0;
|
||||
}
|
||||
let json_schema = self.0.export_json_updates(json_start_vv, json_end_vv);
|
||||
let json_schema = self.0.export_json_updates(
|
||||
json_start_vv,
|
||||
json_end_vv,
|
||||
with_peer_compression.unwrap_or(true),
|
||||
);
|
||||
let s = serde_wasm_bindgen::Serializer::new().serialize_maps_as_objects(true);
|
||||
let v = json_schema
|
||||
.serialize(&s)
|
||||
|
@ -1243,6 +1252,17 @@ impl LoroDoc {
|
|||
Ok(v.into())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = "exportJsonInIdSpan", skip_typescript)]
|
||||
pub fn exportJsonInIdSpan(&self, idSpan: JsIdSpan) -> JsResult<JsValue> {
|
||||
let id_span = js_to_id_span(idSpan)?;
|
||||
let json = self.0.export_json_in_id_span(id_span);
|
||||
let s = serde_wasm_bindgen::Serializer::new().serialize_maps_as_objects(true);
|
||||
let v = json
|
||||
.serialize(&s)
|
||||
.map_err(std::convert::Into::<JsValue>::into)?;
|
||||
Ok(v)
|
||||
}
|
||||
|
||||
/// Import updates from the JSON format.
|
||||
///
|
||||
/// only supports backward compatibility but not forward compatibility.
|
||||
|
@ -5599,9 +5619,19 @@ interface LoroDoc<T extends Record<string, Container> = Record<string, Container
|
|||
*
|
||||
* @param start - The start version vector.
|
||||
* @param end - The end version vector.
|
||||
* @param withPeerCompression - Whether to compress the peer IDs in the updates. Defaults to true. If you want to process the operations in application code, set this to false.
|
||||
* @returns The updates in the given range.
|
||||
*/
|
||||
exportJsonUpdates(start?: VersionVector, end?: VersionVector): JsonSchema;
|
||||
exportJsonUpdates(start?: VersionVector, end?: VersionVector, withPeerCompression?: boolean): JsonSchema;
|
||||
/**
|
||||
* Export the readable [`Change`]s in the given [`IdSpan`].
|
||||
*
|
||||
* The peers are not compressed in the returned changes.
|
||||
*
|
||||
* @param idSpan - The id span to export.
|
||||
* @returns The changes in the given id span.
|
||||
*/
|
||||
exportJsonInIdSpan(idSpan: IdSpan): JsonChange[];
|
||||
}
|
||||
interface LoroList<T = unknown> {
|
||||
new(): LoroList<T>;
|
||||
|
|
|
@ -993,3 +993,33 @@ it("detach and attach on empty doc", () => {
|
|||
doc.attach();
|
||||
expect(doc.isDetached()).toBe(false);
|
||||
})
|
||||
|
||||
it("export json in id span #602", () => {
|
||||
const doc = new LoroDoc();
|
||||
doc.setPeerId("1");
|
||||
doc.getText("text").insert(0, "Hello");
|
||||
doc.commit();
|
||||
{
|
||||
const changes = doc.exportJsonInIdSpan({ peer: "1", counter: 0, length: 1 });
|
||||
expect(changes).toStrictEqual([{
|
||||
id: "0@1",
|
||||
timestamp: expect.any(Number),
|
||||
deps: [],
|
||||
lamport: 0,
|
||||
msg: undefined,
|
||||
ops: [{
|
||||
container: "cid:root-text:Text",
|
||||
counter: 0,
|
||||
content: {
|
||||
type: "insert",
|
||||
pos: 0,
|
||||
text: "H"
|
||||
}
|
||||
}]
|
||||
}]);
|
||||
}
|
||||
{
|
||||
const changes = doc.exportJsonInIdSpan({ peer: "2", counter: 0, length: 1 });
|
||||
expect(changes).toStrictEqual([]);
|
||||
}
|
||||
})
|
||||
|
|
|
@ -442,7 +442,25 @@ impl LoroDoc {
|
|||
start_vv: &VersionVector,
|
||||
end_vv: &VersionVector,
|
||||
) -> JsonSchema {
|
||||
self.doc.export_json_updates(start_vv, end_vv)
|
||||
self.doc.export_json_updates(start_vv, end_vv, true)
|
||||
}
|
||||
|
||||
/// Export the current state with json-string format of the document, without peer compression.
|
||||
///
|
||||
/// Compared to [`export_json_updates`], this method does not compress the peer IDs in the updates.
|
||||
/// So the operations are easier to be processed by application code.
|
||||
#[inline]
|
||||
pub fn export_json_updates_without_peer_compression(
|
||||
&self,
|
||||
start_vv: &VersionVector,
|
||||
end_vv: &VersionVector,
|
||||
) -> JsonSchema {
|
||||
self.doc.export_json_updates(start_vv, end_vv, false)
|
||||
}
|
||||
|
||||
/// Export the readable [`Change`]s in the given [`IdSpan`]
|
||||
pub fn export_json_in_id_span(&self, id_span: IdSpan) -> Vec<JsonChange> {
|
||||
self.doc.export_json_in_id_span(id_span)
|
||||
}
|
||||
|
||||
/// Export all the ops not included in the given `VersionVector`
|
||||
|
|
|
@ -100,10 +100,12 @@ fn allow_editing_on_detached_mode_when_detached_editing_is_enabled() {
|
|||
start_version: Frontiers(
|
||||
[],
|
||||
),
|
||||
peers: [
|
||||
1,
|
||||
2,
|
||||
],
|
||||
peers: Some(
|
||||
[
|
||||
1,
|
||||
2,
|
||||
],
|
||||
),
|
||||
changes: [
|
||||
JsonChange {
|
||||
id: 0@0,
|
||||
|
|
|
@ -2479,3 +2479,177 @@ fn travel_before_commit() -> Result<(), Box<dyn std::error::Error>> {
|
|||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_export_json_in_id_span() -> LoroResult<()> {
|
||||
let doc = LoroDoc::new();
|
||||
doc.set_peer_id(1)?;
|
||||
|
||||
// Test list operations
|
||||
let list = doc.get_list("list");
|
||||
list.insert(0, 1)?;
|
||||
doc.set_next_commit_message("list");
|
||||
doc.commit();
|
||||
|
||||
// Test map operations
|
||||
let map = doc.get_map("map");
|
||||
map.insert("key1", "value1")?;
|
||||
doc.set_next_commit_message("map");
|
||||
doc.commit();
|
||||
|
||||
// Test text operations
|
||||
let text = doc.get_text("text");
|
||||
text.insert(0, "H")?;
|
||||
doc.set_next_commit_message("text");
|
||||
doc.commit();
|
||||
|
||||
// Export changes for list (first change)
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 0, 1));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 0);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Export changes for map (second change)
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 1, 2));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 1);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Export changes for text (third change)
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 2, 3));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 2);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Export multiple changes
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 0, 3));
|
||||
assert_eq!(changes.len(), 3);
|
||||
assert_eq!(changes[0].id.counter, 0);
|
||||
assert_eq!(changes[1].id.counter, 1);
|
||||
assert_eq!(changes[2].id.counter, 2);
|
||||
|
||||
// Test with multiple peers
|
||||
let doc2 = LoroDoc::new();
|
||||
doc2.set_peer_id(2)?;
|
||||
doc2.get_list("list").insert(0, 3)?;
|
||||
doc2.commit();
|
||||
doc.import(&doc2.export_snapshot())?;
|
||||
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(2, 0, 1));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 2);
|
||||
assert_eq!(changes[0].id.counter, 0);
|
||||
|
||||
// Test empty span
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 0, 0));
|
||||
assert_eq!(changes.len(), 0);
|
||||
|
||||
// Test concurrent operations
|
||||
let doc1 = LoroDoc::new();
|
||||
doc1.set_peer_id(1)?;
|
||||
let doc2 = LoroDoc::new();
|
||||
doc2.set_peer_id(2)?;
|
||||
|
||||
// Make concurrent changes
|
||||
doc1.get_text("text").insert(0, "Hello")?;
|
||||
doc2.get_text("text").insert(0, "World")?;
|
||||
doc1.commit();
|
||||
doc2.commit();
|
||||
|
||||
// Sync the documents
|
||||
doc1.import(&doc2.export_snapshot())?;
|
||||
doc2.import(&doc1.export_snapshot())?;
|
||||
|
||||
// Export changes from both peers
|
||||
let changes1 = doc1.export_json_in_id_span(IdSpan::new(1, 0, 1));
|
||||
let changes2 = doc1.export_json_in_id_span(IdSpan::new(2, 0, 1));
|
||||
assert_eq!(changes1.len(), 1);
|
||||
assert_eq!(changes2.len(), 1);
|
||||
assert_eq!(changes1[0].id.peer, 1);
|
||||
assert_eq!(changes2[0].id.peer, 2);
|
||||
|
||||
// Verify that the changes can be imported back
|
||||
let doc3 = LoroDoc::new();
|
||||
doc3.import(&doc1.export_snapshot())?;
|
||||
assert_eq!(
|
||||
doc3.get_text("text").to_string(),
|
||||
doc1.get_text("text").to_string()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_export_json_in_id_span_with_complex_operations() -> LoroResult<()> {
|
||||
let doc = LoroDoc::new();
|
||||
doc.set_peer_id(1)?;
|
||||
|
||||
// Test nested container operations
|
||||
let map = doc.get_map("root");
|
||||
let list = map.insert_container("list", LoroList::new())?;
|
||||
list.insert(0, 1)?;
|
||||
let text = list.insert_container(1, LoroText::new())?;
|
||||
text.insert(0, "Hello")?;
|
||||
doc.commit();
|
||||
|
||||
// Export the changes
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 0, 1));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 0);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Test tree operations
|
||||
let tree = doc.get_tree("tree");
|
||||
let root = tree.create(None)?;
|
||||
let child1 = tree.create(None)?;
|
||||
let child2 = tree.create(None)?;
|
||||
tree.mov(child1, root)?;
|
||||
tree.mov(child2, root)?;
|
||||
doc.commit();
|
||||
|
||||
// Export tree changes
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 1, 2));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 1);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Test rich text operations with multiple attributes
|
||||
let text = doc.get_text("richtext");
|
||||
text.insert(0, "Hello World")?;
|
||||
text.mark(0..5, "bold", true)?;
|
||||
text.mark(6..11, "italic", true)?;
|
||||
doc.commit();
|
||||
|
||||
// Export rich text changes
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 2, 3));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 2);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Test movable list operations
|
||||
let movable_list = doc.get_movable_list("movable");
|
||||
movable_list.insert(0, 1)?;
|
||||
movable_list.insert(1, 2)?;
|
||||
movable_list.mov(0, 1)?;
|
||||
doc.commit();
|
||||
|
||||
// Export movable list changes
|
||||
let changes = doc.export_json_in_id_span(IdSpan::new(1, 3, 4));
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].id.peer, 1);
|
||||
assert_eq!(changes[0].id.counter, 3);
|
||||
assert!(!changes[0].ops.is_empty());
|
||||
|
||||
// Verify that all changes can be imported back
|
||||
let doc2 = LoroDoc::new();
|
||||
doc2.import(&doc.export_snapshot())?;
|
||||
assert_eq!(doc2.get_deep_value(), doc.get_deep_value());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue