diff --git a/Cargo.lock b/Cargo.lock index 10893df3d0..7df97ed637 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4858,6 +4858,7 @@ dependencies = [ "rand 0.8.3", "smallvec", "sum_tree", + "util", ] [[package]] @@ -5382,8 +5383,10 @@ name = "util" version = "0.1.0" dependencies = [ "anyhow", + "clock", "futures", "log", + "rand 0.8.3", "serde_json", "surf", "tempdir", diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index e1b919e5fe..2e7e08d0a5 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -2660,6 +2660,8 @@ impl ModelHandle { loop { { let cx = cx.borrow(); + let executor = cx.foreground(); + let cx = cx.as_ref(); if predicate( handle @@ -2670,10 +2672,10 @@ impl ModelHandle { ) { break; } - } - if cx.borrow_mut().foreground().would_park() { - panic!("parked while waiting on condition"); + if executor.parking_forbidden() && executor.would_park() { + panic!("parked while waiting on condition"); + } } rx.recv() diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 3283ab7553..6b20f56f9e 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -260,7 +260,8 @@ impl Deterministic { impl DeterministicState { fn would_park(&self) -> bool { - self.scheduled_from_foreground.is_empty() + self.forbid_parking + && self.scheduled_from_foreground.is_empty() && self.scheduled_from_background.is_empty() && self.spawned_from_foreground.is_empty() } @@ -438,6 +439,13 @@ impl Foreground { *any_value.downcast().unwrap() } + pub fn parking_forbidden(&self) -> bool { + match self { + Self::Deterministic(executor) => executor.state.lock().forbid_parking, + _ => panic!("this method can only be called on a deterministic executor"), + } + } + pub fn would_park(&self) -> bool { match self { Self::Deterministic(executor) => executor.state.lock().would_park(), diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index aedbd9381a..25d76ab6a0 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -65,8 +65,8 @@ pub struct Buffer { syntax_tree: Mutex>, parsing_in_background: bool, parse_count: usize, - remote_selections: TreeMap]>>, diagnostics: DiagnosticSet, + remote_selections: TreeMap, selections_update_count: usize, diagnostics_update_count: usize, language_server: Option, @@ -79,14 +79,20 @@ pub struct BufferSnapshot { text: text::BufferSnapshot, tree: Option, diagnostics: DiagnosticSet, - remote_selections: TreeMap]>>, diagnostics_update_count: usize, + remote_selections: TreeMap, selections_update_count: usize, is_parsing: bool, language: Option>, parse_count: usize, } +#[derive(Clone, Debug)] +struct SelectionSet { + selections: Arc<[Selection]>, + lamport_timestamp: clock::Lamport, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct GroupId { source: Arc, @@ -131,10 +137,6 @@ pub enum Operation { selections: Arc<[Selection]>, lamport_timestamp: clock::Lamport, }, - RemoveSelections { - replica_id: ReplicaId, - lamport_timestamp: clock::Lamport, - }, } #[derive(Clone, Debug, Eq, PartialEq)] @@ -286,18 +288,37 @@ impl Buffer { file: Option>, cx: &mut ModelContext, ) -> Result { - let mut buffer = - text::Buffer::new(replica_id, message.id, History::new(message.content.into())); - let ops = message - .history - .into_iter() - .map(|op| text::Operation::Edit(proto::deserialize_edit_operation(op))); - buffer.apply_ops(ops)?; + let fragments_len = message.fragments.len(); + let buffer = TextBuffer::from_parts( + replica_id, + message.id, + &message.visible_text, + &message.deleted_text, + message + .undo_map + .into_iter() + .map(proto::deserialize_undo_map_entry), + message + .fragments + .into_iter() + .enumerate() + .map(|(i, fragment)| { + proto::deserialize_buffer_fragment(fragment, i, fragments_len) + }), + message.lamport_timestamp, + From::from(message.version), + ); let mut this = Self::build(buffer, file); for selection_set in message.selections { this.remote_selections.insert( selection_set.replica_id as ReplicaId, - proto::deserialize_selections(selection_set.selections), + SelectionSet { + selections: proto::deserialize_selections(selection_set.selections), + lamport_timestamp: clock::Lamport { + replica_id: selection_set.replica_id as ReplicaId, + value: selection_set.lamport_timestamp, + }, + }, ); } let snapshot = this.snapshot(); @@ -307,27 +328,53 @@ impl Buffer { cx, ); + let deferred_ops = message + .deferred_operations + .into_iter() + .map(proto::deserialize_operation) + .collect::>>()?; + this.apply_ops(deferred_ops, cx)?; + Ok(this) } pub fn to_proto(&self) -> proto::Buffer { proto::Buffer { id: self.remote_id(), - content: self.text.base_text().to_string(), - history: self + visible_text: self.text.text(), + deleted_text: self.text.deleted_text(), + undo_map: self .text - .history() - .map(proto::serialize_edit_operation) + .undo_history() + .map(proto::serialize_undo_map_entry) + .collect(), + version: From::from(&self.version), + lamport_timestamp: self.lamport_clock.value, + fragments: self + .text + .fragments() + .map(proto::serialize_buffer_fragment) .collect(), selections: self .remote_selections .iter() - .map(|(replica_id, selections)| proto::SelectionSet { + .map(|(replica_id, set)| proto::SelectionSet { replica_id: *replica_id as u32, - selections: proto::serialize_selections(selections), + selections: proto::serialize_selections(&set.selections), + lamport_timestamp: set.lamport_timestamp.value, }) .collect(), diagnostics: proto::serialize_diagnostics(self.diagnostics.iter()), + deferred_operations: self + .deferred_ops + .iter() + .map(proto::serialize_operation) + .chain( + self.text + .deferred_ops() + .map(|op| proto::serialize_operation(&Operation::Buffer(op.clone()))), + ) + .collect(), } } @@ -1081,6 +1128,13 @@ impl Buffer { cx: &mut ModelContext, ) { let lamport_timestamp = self.text.lamport_clock.tick(); + self.remote_selections.insert( + self.text.replica_id(), + SelectionSet { + selections: selections.clone(), + lamport_timestamp, + }, + ); self.send_operation( Operation::UpdateSelections { replica_id: self.text.replica_id(), @@ -1092,14 +1146,7 @@ impl Buffer { } pub fn remove_active_selections(&mut self, cx: &mut ModelContext) { - let lamport_timestamp = self.text.lamport_clock.tick(); - self.send_operation( - Operation::RemoveSelections { - replica_id: self.text.replica_id(), - lamport_timestamp, - }, - cx, - ); + self.set_active_selections(Arc::from([]), cx); } fn update_language_server(&mut self) { @@ -1287,6 +1334,7 @@ impl Buffer { }) .collect::>(); self.text.apply_ops(buffer_ops)?; + self.deferred_ops.insert(deferred_ops); self.flush_deferred_ops(cx); self.did_edit(&old_version, was_dirty, cx); // Notify independently of whether the buffer was edited as the operations could include a @@ -1322,7 +1370,6 @@ impl Buffer { Operation::UpdateSelections { selections, .. } => selections .iter() .all(|s| self.can_resolve(&s.start) && self.can_resolve(&s.end)), - Operation::RemoveSelections { .. } => true, } } @@ -1346,15 +1393,19 @@ impl Buffer { selections, lamport_timestamp, } => { - self.remote_selections.insert(replica_id, selections); - self.text.lamport_clock.observe(lamport_timestamp); - self.selections_update_count += 1; - } - Operation::RemoveSelections { - replica_id, - lamport_timestamp, - } => { - self.remote_selections.remove(&replica_id); + if let Some(set) = self.remote_selections.get(&replica_id) { + if set.lamport_timestamp > lamport_timestamp { + return; + } + } + + self.remote_selections.insert( + replica_id, + SelectionSet { + selections, + lamport_timestamp, + }, + ); self.text.lamport_clock.observe(lamport_timestamp); self.selections_update_count += 1; } @@ -1448,6 +1499,10 @@ impl Buffer { #[cfg(any(test, feature = "test-support"))] impl Buffer { + pub fn set_group_interval(&mut self, group_interval: Duration) { + self.text.set_group_interval(group_interval); + } + pub fn randomly_edit( &mut self, rng: &mut T, @@ -1456,9 +1511,38 @@ impl Buffer { ) where T: rand::Rng, { - self.start_transaction(); - self.text.randomly_edit(rng, old_range_count); - self.end_transaction(cx); + let mut old_ranges: Vec> = Vec::new(); + for _ in 0..old_range_count { + let last_end = old_ranges.last().map_or(0, |last_range| last_range.end + 1); + if last_end > self.len() { + break; + } + old_ranges.push(self.text.random_byte_range(last_end, rng)); + } + let new_text_len = rng.gen_range(0..10); + let new_text: String = crate::random_char_iter::RandomCharIter::new(&mut *rng) + .take(new_text_len) + .collect(); + log::info!( + "mutating buffer {} at {:?}: {:?}", + self.replica_id(), + old_ranges, + new_text + ); + self.edit(old_ranges.iter().cloned(), new_text.as_str(), cx); + } + + pub fn randomly_undo_redo(&mut self, rng: &mut impl rand::Rng, cx: &mut ModelContext) { + let was_dirty = self.is_dirty(); + let old_version = self.version.clone(); + + let ops = self.text.randomly_undo_redo(rng); + if !ops.is_empty() { + for op in ops { + self.send_operation(Operation::Buffer(op), cx); + self.did_edit(&old_version, was_dirty, cx); + } + } } } @@ -1711,20 +1795,30 @@ impl BufferSnapshot { { self.remote_selections .iter() - .filter(|(replica_id, _)| **replica_id != self.text.replica_id()) - .map(move |(replica_id, selections)| { - let start_ix = match selections - .binary_search_by(|probe| probe.end.cmp(&range.start, self).unwrap()) - { + .filter(|(replica_id, set)| { + **replica_id != self.text.replica_id() && !set.selections.is_empty() + }) + .map(move |(replica_id, set)| { + let start_ix = match set.selections.binary_search_by(|probe| { + probe + .end + .cmp(&range.start, self) + .unwrap() + .then(Ordering::Greater) + }) { Ok(ix) | Err(ix) => ix, }; - let end_ix = match selections - .binary_search_by(|probe| probe.start.cmp(&range.end, self).unwrap()) - { + let end_ix = match set.selections.binary_search_by(|probe| { + probe + .start + .cmp(&range.end, self) + .unwrap() + .then(Ordering::Less) + }) { Ok(ix) | Err(ix) => ix, }; - (*replica_id, selections[start_ix..end_ix].iter()) + (*replica_id, set.selections[start_ix..end_ix].iter()) }) } @@ -2007,9 +2101,6 @@ impl operation_queue::Operation for Operation { } | Operation::UpdateSelections { lamport_timestamp, .. - } - | Operation::RemoveSelections { - lamport_timestamp, .. } => *lamport_timestamp, } } diff --git a/crates/language/src/proto.rs b/crates/language/src/proto.rs index 200a687052..771d8b7fd3 100644 --- a/crates/language/src/proto.rs +++ b/crates/language/src/proto.rs @@ -1,6 +1,7 @@ use crate::{diagnostic_set::DiagnosticEntry, Diagnostic, Operation}; use anyhow::{anyhow, Result}; use clock::ReplicaId; +use collections::HashSet; use lsp::DiagnosticSeverity; use rpc::proto; use std::sync::Arc; @@ -32,7 +33,7 @@ pub fn serialize_operation(operation: &Operation) -> proto::Operation { counts: undo .counts .iter() - .map(|(edit_id, count)| proto::operation::UndoCount { + .map(|(edit_id, count)| proto::UndoCount { replica_id: edit_id.replica_id as u32, local_timestamp: edit_id.value, count: *count, @@ -49,13 +50,6 @@ pub fn serialize_operation(operation: &Operation) -> proto::Operation { lamport_timestamp: lamport_timestamp.value, selections: serialize_selections(selections), }), - Operation::RemoveSelections { - replica_id, - lamport_timestamp, - } => proto::operation::Variant::RemoveSelections(proto::operation::RemoveSelections { - replica_id: *replica_id as u32, - lamport_timestamp: lamport_timestamp.value, - }), Operation::UpdateDiagnostics { diagnostics, lamport_timestamp, @@ -87,6 +81,43 @@ pub fn serialize_edit_operation(operation: &EditOperation) -> proto::operation:: } } +pub fn serialize_undo_map_entry( + (edit_id, counts): (&clock::Local, &[(clock::Local, u32)]), +) -> proto::UndoMapEntry { + proto::UndoMapEntry { + replica_id: edit_id.replica_id as u32, + local_timestamp: edit_id.value, + counts: counts + .iter() + .map(|(undo_id, count)| proto::UndoCount { + replica_id: undo_id.replica_id as u32, + local_timestamp: undo_id.value, + count: *count, + }) + .collect(), + } +} + +pub fn serialize_buffer_fragment(fragment: &text::Fragment) -> proto::BufferFragment { + proto::BufferFragment { + replica_id: fragment.insertion_timestamp.replica_id as u32, + local_timestamp: fragment.insertion_timestamp.local, + lamport_timestamp: fragment.insertion_timestamp.lamport, + insertion_offset: fragment.insertion_offset as u32, + len: fragment.len as u32, + visible: fragment.visible, + deletions: fragment + .deletions + .iter() + .map(|clock| proto::VectorClockEntry { + replica_id: clock.replica_id as u32, + timestamp: clock.value, + }) + .collect(), + max_undos: From::from(&fragment.max_undos), + } +} + pub fn serialize_selections(selections: &Arc<[Selection]>) -> Vec { selections .iter() @@ -200,13 +231,6 @@ pub fn deserialize_operation(message: proto::Operation) -> Result { selections: Arc::from(selections), } } - proto::operation::Variant::RemoveSelections(message) => Operation::RemoveSelections { - replica_id: message.replica_id as ReplicaId, - lamport_timestamp: clock::Lamport { - replica_id: message.replica_id as ReplicaId, - value: message.lamport_timestamp, - }, - }, proto::operation::Variant::UpdateDiagnostics(message) => Operation::UpdateDiagnostics { diagnostics: deserialize_diagnostics(message.diagnostics), lamport_timestamp: clock::Lamport { @@ -236,6 +260,53 @@ pub fn deserialize_edit_operation(edit: proto::operation::Edit) -> EditOperation } } +pub fn deserialize_undo_map_entry( + entry: proto::UndoMapEntry, +) -> (clock::Local, Vec<(clock::Local, u32)>) { + ( + clock::Local { + replica_id: entry.replica_id as u16, + value: entry.local_timestamp, + }, + entry + .counts + .into_iter() + .map(|undo_count| { + ( + clock::Local { + replica_id: undo_count.replica_id as u16, + value: undo_count.local_timestamp, + }, + undo_count.count, + ) + }) + .collect(), + ) +} + +pub fn deserialize_buffer_fragment( + message: proto::BufferFragment, + ix: usize, + count: usize, +) -> Fragment { + Fragment { + id: locator::Locator::from_index(ix, count), + insertion_timestamp: InsertionTimestamp { + replica_id: message.replica_id as ReplicaId, + local: message.local_timestamp, + lamport: message.lamport_timestamp, + }, + insertion_offset: message.insertion_offset as usize, + len: message.len as usize, + visible: message.visible, + deletions: HashSet::from_iter(message.deletions.into_iter().map(|entry| clock::Local { + replica_id: entry.replica_id as ReplicaId, + value: entry.timestamp, + })), + max_undos: From::from(message.max_undos), + } +} + pub fn deserialize_selections(selections: Vec) -> Arc<[Selection]> { Arc::from( selections diff --git a/crates/language/src/tests.rs b/crates/language/src/tests.rs index e94ff781f3..b634524eb6 100644 --- a/crates/language/src/tests.rs +++ b/crates/language/src/tests.rs @@ -1,13 +1,18 @@ use super::*; +use clock::ReplicaId; +use collections::BTreeMap; use gpui::{ModelHandle, MutableAppContext}; +use rand::prelude::*; use std::{ cell::RefCell, + env, iter::FromIterator, ops::Range, rc::Rc, time::{Duration, Instant}, }; use unindent::Unindent as _; +use util::test::Network; #[cfg(test)] #[ctor::ctor] @@ -758,6 +763,193 @@ async fn test_empty_diagnostic_ranges(mut cx: gpui::TestAppContext) { }); } +#[gpui::test] +fn test_serialization(cx: &mut gpui::MutableAppContext) { + let mut now = Instant::now(); + + let buffer1 = cx.add_model(|cx| { + let mut buffer = Buffer::new(0, "abc", cx); + buffer.edit([3..3], "D", cx); + + now += Duration::from_secs(1); + buffer.start_transaction_at(now); + buffer.edit([4..4], "E", cx); + buffer.end_transaction_at(now, cx); + assert_eq!(buffer.text(), "abcDE"); + + buffer.undo(cx); + assert_eq!(buffer.text(), "abcD"); + + buffer.edit([4..4], "F", cx); + assert_eq!(buffer.text(), "abcDF"); + buffer + }); + assert_eq!(buffer1.read(cx).text(), "abcDF"); + + let message = buffer1.read(cx).to_proto(); + let buffer2 = cx.add_model(|cx| Buffer::from_proto(1, message, None, cx).unwrap()); + assert_eq!(buffer2.read(cx).text(), "abcDF"); +} + +#[gpui::test(iterations = 100)] +fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) { + let min_peers = env::var("MIN_PEERS") + .map(|i| i.parse().expect("invalid `MIN_PEERS` variable")) + .unwrap_or(1); + let max_peers = env::var("MAX_PEERS") + .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) + .unwrap_or(5); + let operations = env::var("OPERATIONS") + .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) + .unwrap_or(10); + + let base_text_len = rng.gen_range(0..10); + let base_text = RandomCharIter::new(&mut rng) + .take(base_text_len) + .collect::(); + let mut replica_ids = Vec::new(); + let mut buffers = Vec::new(); + let mut network = Network::new(rng.clone()); + + for i in 0..rng.gen_range(min_peers..=max_peers) { + let buffer = cx.add_model(|cx| { + let mut buffer = Buffer::new(i as ReplicaId, base_text.as_str(), cx); + buffer.set_group_interval(Duration::from_millis(rng.gen_range(0..=200))); + buffer + }); + buffers.push(buffer); + replica_ids.push(i as ReplicaId); + network.add_peer(i as ReplicaId); + log::info!("Adding initial peer with replica id {}", i); + } + + log::info!("initial text: {:?}", base_text); + + let mut now = Instant::now(); + let mut mutation_count = operations; + let mut active_selections = BTreeMap::default(); + loop { + let replica_index = rng.gen_range(0..replica_ids.len()); + let replica_id = replica_ids[replica_index]; + let buffer = &mut buffers[replica_index]; + let mut new_buffer = None; + match rng.gen_range(0..100) { + 0..=29 if mutation_count != 0 => { + buffer.update(cx, |buffer, cx| { + buffer.start_transaction_at(now); + buffer.randomly_edit(&mut rng, 5, cx); + buffer.end_transaction_at(now, cx); + log::info!("buffer {} text: {:?}", buffer.replica_id(), buffer.text()); + }); + mutation_count -= 1; + } + 30..=39 if mutation_count != 0 => { + buffer.update(cx, |buffer, cx| { + let mut selections = Vec::new(); + for id in 0..rng.gen_range(1..=5) { + let range = buffer.random_byte_range(0, &mut rng); + selections.push(Selection { + id, + start: buffer.anchor_before(range.start), + end: buffer.anchor_before(range.end), + reversed: false, + goal: SelectionGoal::None, + }); + } + let selections: Arc<[Selection]> = selections.into(); + log::info!( + "peer {} setting active selections: {:?}", + replica_id, + selections + ); + active_selections.insert(replica_id, selections.clone()); + buffer.set_active_selections(selections, cx); + }); + mutation_count -= 1; + } + 40..=49 if replica_ids.len() < max_peers => { + let old_buffer = buffer.read(cx).to_proto(); + let new_replica_id = replica_ids.len() as ReplicaId; + log::info!( + "Adding new replica {} (replicating from {})", + new_replica_id, + replica_id + ); + new_buffer = Some(cx.add_model(|cx| { + let mut new_buffer = + Buffer::from_proto(new_replica_id, old_buffer, None, cx).unwrap(); + new_buffer.set_group_interval(Duration::from_millis(rng.gen_range(0..=200))); + new_buffer + })); + replica_ids.push(new_replica_id); + network.replicate(replica_id, new_replica_id); + } + 50..=69 if mutation_count != 0 => { + buffer.update(cx, |buffer, cx| { + buffer.randomly_undo_redo(&mut rng, cx); + log::info!("buffer {} text: {:?}", buffer.replica_id(), buffer.text()); + }); + mutation_count -= 1; + } + 70..=99 if network.has_unreceived(replica_id) => { + let ops = network + .receive(replica_id) + .into_iter() + .map(|op| proto::deserialize_operation(op).unwrap()); + if ops.len() > 0 { + log::info!( + "peer {} applying {} ops from the network.", + replica_id, + ops.len() + ); + buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx).unwrap()); + } + } + _ => {} + } + + buffer.update(cx, |buffer, _| { + let ops = buffer + .operations + .drain(..) + .map(|op| proto::serialize_operation(&op)) + .collect(); + network.broadcast(buffer.replica_id(), ops); + }); + now += Duration::from_millis(rng.gen_range(0..=200)); + buffers.extend(new_buffer); + + if mutation_count == 0 && network.is_idle() { + break; + } + } + + let first_buffer = buffers[0].read(cx); + for buffer in &buffers[1..] { + let buffer = buffer.read(cx); + assert_eq!( + buffer.text(), + first_buffer.text(), + "Replica {} text != Replica 0 text", + buffer.replica_id() + ); + } + + for buffer in &buffers { + let buffer = buffer.read(cx).snapshot(); + let expected_remote_selections = active_selections + .iter() + .filter(|(replica_id, _)| **replica_id != buffer.replica_id()) + .map(|(replica_id, selections)| (*replica_id, selections.iter().collect::>())) + .collect::>(); + let actual_remote_selections = buffer + .remote_selections_in_range(Anchor::min()..Anchor::max()) + .map(|(replica_id, selections)| (replica_id, selections.collect::>())) + .collect::>(); + assert_eq!(actual_remote_selections, expected_remote_selections); + } +} + fn chunks_with_diagnostics( buffer: &Buffer, range: Range, diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 9ec5f90719..71ceb4d9ac 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -270,15 +270,32 @@ message Entry { message Buffer { uint64 id = 1; - string content = 2; - repeated Operation.Edit history = 3; - repeated SelectionSet selections = 4; - repeated Diagnostic diagnostics = 5; + string visible_text = 2; + string deleted_text = 3; + repeated BufferFragment fragments = 4; + repeated UndoMapEntry undo_map = 5; + repeated VectorClockEntry version = 6; + repeated SelectionSet selections = 7; + repeated Diagnostic diagnostics = 8; + uint32 lamport_timestamp = 9; + repeated Operation deferred_operations = 10; +} + +message BufferFragment { + uint32 replica_id = 1; + uint32 local_timestamp = 2; + uint32 lamport_timestamp = 3; + uint32 insertion_offset = 4; + uint32 len = 5; + bool visible = 6; + repeated VectorClockEntry deletions = 7; + repeated VectorClockEntry max_undos = 8; } message SelectionSet { uint32 replica_id = 1; repeated Selection selections = 2; + uint32 lamport_timestamp = 3; } message Selection { @@ -331,8 +348,7 @@ message Operation { Edit edit = 1; Undo undo = 2; UpdateSelections update_selections = 3; - RemoveSelections remove_selections = 4; - UpdateDiagnostics update_diagnostics = 5; + UpdateDiagnostics update_diagnostics = 4; } message Edit { @@ -353,22 +369,23 @@ message Operation { repeated UndoCount counts = 6; } - message UndoCount { - uint32 replica_id = 1; - uint32 local_timestamp = 2; - uint32 count = 3; - } - message UpdateSelections { uint32 replica_id = 1; uint32 lamport_timestamp = 3; repeated Selection selections = 4; } +} - message RemoveSelections { - uint32 replica_id = 1; - uint32 lamport_timestamp = 3; - } +message UndoMapEntry { + uint32 replica_id = 1; + uint32 local_timestamp = 2; + repeated UndoCount counts = 3; +} + +message UndoCount { + uint32 replica_id = 1; + uint32 local_timestamp = 2; + uint32 count = 3; } message VectorClockEntry { diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index bd5d1c384f..091a0c1555 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -398,10 +398,8 @@ mod tests { proto::OpenBufferResponse { buffer: Some(proto::Buffer { id: 101, - content: "path/one content".to_string(), - history: vec![], - selections: vec![], - diagnostics: vec![], + visible_text: "path/one content".to_string(), + ..Default::default() }), } ); @@ -421,10 +419,8 @@ mod tests { proto::OpenBufferResponse { buffer: Some(proto::Buffer { id: 102, - content: "path/two content".to_string(), - history: vec![], - selections: vec![], - diagnostics: vec![], + visible_text: "path/two content".to_string(), + ..Default::default() }), } ); @@ -452,10 +448,8 @@ mod tests { proto::OpenBufferResponse { buffer: Some(proto::Buffer { id: 101, - content: "path/one content".to_string(), - history: vec![], - selections: vec![], - diagnostics: vec![], + visible_text: "path/one content".to_string(), + ..Default::default() }), } } @@ -464,10 +458,8 @@ mod tests { proto::OpenBufferResponse { buffer: Some(proto::Buffer { id: 102, - content: "path/two content".to_string(), - history: vec![], - selections: vec![], - diagnostics: vec![], + visible_text: "path/two content".to_string(), + ..Default::default() }), } } diff --git a/crates/sum_tree/src/tree_map.rs b/crates/sum_tree/src/tree_map.rs index 76eb78476c..f50c233d05 100644 --- a/crates/sum_tree/src/tree_map.rs +++ b/crates/sum_tree/src/tree_map.rs @@ -23,10 +23,13 @@ pub struct MapKeyRef<'a, K>(Option<&'a K>); impl TreeMap { pub fn get<'a>(&self, key: &'a K) -> Option<&V> { let mut cursor = self.0.cursor::>(); - let key = MapKeyRef(Some(key)); - cursor.seek(&key, Bias::Left, &()); - if key.cmp(cursor.start(), &()) == Ordering::Equal { - Some(&cursor.item().unwrap().value) + cursor.seek(&MapKeyRef(Some(key)), Bias::Left, &()); + if let Some(item) = cursor.item() { + if *key == item.key().0 { + Some(&item.value) + } else { + None + } } else { None } @@ -129,24 +132,32 @@ mod tests { assert_eq!(map.iter().collect::>(), vec![]); map.insert(3, "c"); + assert_eq!(map.get(&3), Some(&"c")); assert_eq!(map.iter().collect::>(), vec![(&3, &"c")]); map.insert(1, "a"); + assert_eq!(map.get(&1), Some(&"a")); assert_eq!(map.iter().collect::>(), vec![(&1, &"a"), (&3, &"c")]); map.insert(2, "b"); + assert_eq!(map.get(&2), Some(&"b")); + assert_eq!(map.get(&1), Some(&"a")); + assert_eq!(map.get(&3), Some(&"c")); assert_eq!( map.iter().collect::>(), vec![(&1, &"a"), (&2, &"b"), (&3, &"c")] ); map.remove(&2); + assert_eq!(map.get(&2), None); assert_eq!(map.iter().collect::>(), vec![(&1, &"a"), (&3, &"c")]); map.remove(&3); + assert_eq!(map.get(&3), None); assert_eq!(map.iter().collect::>(), vec![(&1, &"a")]); map.remove(&1); + assert_eq!(map.get(&1), None); assert_eq!(map.iter().collect::>(), vec![]); } } diff --git a/crates/text/Cargo.toml b/crates/text/Cargo.toml index f4b7d7453f..edc1ca7846 100644 --- a/crates/text/Cargo.toml +++ b/crates/text/Cargo.toml @@ -24,6 +24,7 @@ smallvec = { version = "1.6", features = ["union"] } [dev-dependencies] collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } +util = { path = "../util", features = ["test-support"] } ctor = "0.1" env_logger = "0.8" rand = "0.8.3" diff --git a/crates/text/src/locator.rs b/crates/text/src/locator.rs index e4feaf99ac..ddd3663e73 100644 --- a/crates/text/src/locator.rs +++ b/crates/text/src/locator.rs @@ -19,6 +19,11 @@ impl Locator { Self(smallvec![u64::MAX]) } + pub fn from_index(ix: usize, count: usize) -> Self { + let id = ((ix as u128 * u64::MAX as u128) / count as u128) as u64; + Self(smallvec![id]) + } + pub fn assign(&mut self, other: &Self) { self.0.resize(other.0.len(), 0); self.0.copy_from_slice(&other.0); diff --git a/crates/text/src/operation_queue.rs b/crates/text/src/operation_queue.rs index ef99faf3e2..a3f5b9b6bd 100644 --- a/crates/text/src/operation_queue.rs +++ b/crates/text/src/operation_queue.rs @@ -53,7 +53,7 @@ impl OperationQueue { } pub fn iter(&self) -> impl Iterator { - self.0.cursor::<()>().map(|i| &i.0) + self.0.iter().map(|i| &i.0) } } diff --git a/crates/text/src/tests.rs b/crates/text/src/tests.rs index 55163436c5..e1ffc928c0 100644 --- a/crates/text/src/tests.rs +++ b/crates/text/src/tests.rs @@ -7,6 +7,7 @@ use std::{ iter::Iterator, time::{Duration, Instant}, }; +use util::test::Network; #[cfg(test)] #[ctor::ctor] @@ -602,18 +603,6 @@ fn test_random_concurrent_edits(mut rng: StdRng) { } } -#[derive(Clone)] -struct Envelope { - message: T, - sender: ReplicaId, -} - -struct Network { - inboxes: std::collections::BTreeMap>>, - all_messages: Vec, - rng: R, -} - impl Buffer { fn check_invariants(&self) { // Ensure every fragment is ordered by locator in the fragment tree and corresponds @@ -646,69 +635,3 @@ impl Buffer { } } } - -impl Network { - fn new(rng: R) -> Self { - Network { - inboxes: Default::default(), - all_messages: Vec::new(), - rng, - } - } - - fn add_peer(&mut self, id: ReplicaId) { - self.inboxes.insert(id, Vec::new()); - } - - fn is_idle(&self) -> bool { - self.inboxes.values().all(|i| i.is_empty()) - } - - fn broadcast(&mut self, sender: ReplicaId, messages: Vec) { - for (replica, inbox) in self.inboxes.iter_mut() { - if *replica != sender { - for message in &messages { - let min_index = inbox - .iter() - .enumerate() - .rev() - .find_map(|(index, envelope)| { - if sender == envelope.sender { - Some(index + 1) - } else { - None - } - }) - .unwrap_or(0); - - // Insert one or more duplicates of this message *after* the previous - // message delivered by this replica. - for _ in 0..self.rng.gen_range(1..4) { - let insertion_index = self.rng.gen_range(min_index..inbox.len() + 1); - inbox.insert( - insertion_index, - Envelope { - message: message.clone(), - sender, - }, - ); - } - } - } - } - self.all_messages.extend(messages); - } - - fn has_unreceived(&self, receiver: ReplicaId) -> bool { - !self.inboxes[&receiver].is_empty() - } - - fn receive(&mut self, receiver: ReplicaId) -> Vec { - let inbox = self.inboxes.get_mut(&receiver).unwrap(); - let count = self.rng.gen_range(0..inbox.len() + 1); - inbox - .drain(0..count) - .map(|envelope| envelope.message) - .collect() - } -} diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 5debb2f18d..9db7591f22 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -42,7 +42,6 @@ pub type TransactionId = usize; pub struct Buffer { snapshot: BufferSnapshot, - last_edit: clock::Local, history: History, deferred_ops: OperationQueue, deferred_replicas: HashSet, @@ -384,14 +383,14 @@ impl InsertionTimestamp { } #[derive(Eq, PartialEq, Clone, Debug)] -struct Fragment { - id: Locator, - insertion_timestamp: InsertionTimestamp, - insertion_offset: usize, - len: usize, - visible: bool, - deletions: HashSet, - max_undos: clock::Global, +pub struct Fragment { + pub id: Locator, + pub insertion_timestamp: InsertionTimestamp, + pub insertion_offset: usize, + pub len: usize, + pub visible: bool, + pub deletions: HashSet, + pub max_undos: clock::Global, } #[derive(Eq, PartialEq, Clone, Debug)] @@ -496,7 +495,6 @@ impl Buffer { version, undo_map: Default::default(), }, - last_edit: clock::Local::default(), history, deferred_ops: OperationQueue::new(), deferred_replicas: HashSet::default(), @@ -508,6 +506,56 @@ impl Buffer { } } + pub fn from_parts( + replica_id: u16, + remote_id: u64, + visible_text: &str, + deleted_text: &str, + undo_map: impl Iterator)>, + fragments: impl ExactSizeIterator, + lamport_timestamp: u32, + version: clock::Global, + ) -> Self { + let visible_text = visible_text.into(); + let deleted_text = deleted_text.into(); + let fragments = SumTree::from_iter(fragments, &None); + let mut insertions = fragments + .iter() + .map(|fragment| InsertionFragment { + timestamp: fragment.insertion_timestamp.local(), + split_offset: fragment.insertion_offset, + fragment_id: fragment.id.clone(), + }) + .collect::>(); + insertions.sort_unstable_by_key(|i| (i.timestamp, i.split_offset)); + Self { + remote_id, + replica_id, + + history: History::new("".into()), + deferred_ops: OperationQueue::new(), + deferred_replicas: Default::default(), + local_clock: clock::Local { + replica_id, + value: version.get(replica_id) + 1, + }, + lamport_clock: clock::Lamport { + replica_id, + value: lamport_timestamp, + }, + subscriptions: Default::default(), + snapshot: BufferSnapshot { + replica_id, + visible_text, + deleted_text, + undo_map: UndoMap(undo_map.collect()), + fragments, + insertions: SumTree::from_iter(insertions, &()), + version, + }, + } + } + pub fn version(&self) -> clock::Global { self.version.clone() } @@ -557,7 +605,6 @@ impl Buffer { self.history.push(edit.clone()); self.history.push_undo(edit.timestamp.local()); - self.last_edit = edit.timestamp.local(); self.snapshot.version.observe(edit.timestamp.local()); self.end_transaction(); edit @@ -1054,6 +1101,10 @@ impl Buffer { Ok(()) } + pub fn deferred_ops(&self) -> impl Iterator { + self.deferred_ops.iter() + } + fn flush_deferred_ops(&mut self) -> Result<()> { self.deferred_replicas.clear(); let mut deferred_ops = Vec::new(); @@ -1120,6 +1171,13 @@ impl Buffer { self.history.ops.values() } + pub fn undo_history(&self) -> impl Iterator { + self.undo_map + .0 + .iter() + .map(|(edit_id, undo_counts)| (edit_id, undo_counts.as_slice())) + } + pub fn undo(&mut self) -> Option<(TransactionId, Operation)> { if let Some(transaction) = self.history.pop_undo().cloned() { let transaction_id = transaction.id; @@ -1186,7 +1244,11 @@ impl Buffer { #[cfg(any(test, feature = "test-support"))] impl Buffer { - fn random_byte_range(&mut self, start_offset: usize, rng: &mut impl rand::Rng) -> Range { + pub fn set_group_interval(&mut self, group_interval: Duration) { + self.history.group_interval = group_interval; + } + + pub fn random_byte_range(&self, start_offset: usize, rng: &mut impl rand::Rng) -> Range { let end = self.clip_offset(rng.gen_range(start_offset..=self.len()), Bias::Right); let start = self.clip_offset(rng.gen_range(start_offset..=end), Bias::Right); start..end @@ -1288,7 +1350,15 @@ impl BufferSnapshot { } pub fn text(&self) -> String { - self.text_for_range(0..self.len()).collect() + self.visible_text.to_string() + } + + pub fn deleted_text(&self) -> String { + self.deleted_text.to_string() + } + + pub fn fragments(&self) -> impl Iterator { + self.fragments.iter() } pub fn text_summary(&self) -> TextSummary { diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index 419a99af2b..4f193a8718 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -4,12 +4,16 @@ version = "0.1.0" edition = "2018" [features] -test-support = ["serde_json", "tempdir"] +test-support = ["clock", "rand", "serde_json", "tempdir"] [dependencies] +clock = { path = "../clock", optional = true } anyhow = "1.0.38" futures = "0.3" log = "0.4" +rand = { version = "0.8", optional = true } surf = "2.2" tempdir = { version = "0.3.7", optional = true } -serde_json = { version = "1.0.64", features = ["preserve_order"], optional = true } +serde_json = { version = "1.0.64", features = [ + "preserve_order" +], optional = true } diff --git a/crates/util/src/test.rs b/crates/util/src/test.rs index 71b847df69..ce81921fff 100644 --- a/crates/util/src/test.rs +++ b/crates/util/src/test.rs @@ -1,6 +1,90 @@ +use clock::ReplicaId; use std::path::{Path, PathBuf}; use tempdir::TempDir; +#[derive(Clone)] +struct Envelope { + message: T, + sender: ReplicaId, +} + +pub struct Network { + inboxes: std::collections::BTreeMap>>, + all_messages: Vec, + rng: R, +} + +impl Network { + pub fn new(rng: R) -> Self { + Network { + inboxes: Default::default(), + all_messages: Vec::new(), + rng, + } + } + + pub fn add_peer(&mut self, id: ReplicaId) { + self.inboxes.insert(id, Vec::new()); + } + + pub fn replicate(&mut self, old_replica_id: ReplicaId, new_replica_id: ReplicaId) { + self.inboxes + .insert(new_replica_id, self.inboxes[&old_replica_id].clone()); + } + + pub fn is_idle(&self) -> bool { + self.inboxes.values().all(|i| i.is_empty()) + } + + pub fn broadcast(&mut self, sender: ReplicaId, messages: Vec) { + for (replica, inbox) in self.inboxes.iter_mut() { + if *replica != sender { + for message in &messages { + let min_index = inbox + .iter() + .enumerate() + .rev() + .find_map(|(index, envelope)| { + if sender == envelope.sender { + Some(index + 1) + } else { + None + } + }) + .unwrap_or(0); + + // Insert one or more duplicates of this message *after* the previous + // message delivered by this replica. + for _ in 0..self.rng.gen_range(1..4) { + let insertion_index = self.rng.gen_range(min_index..inbox.len() + 1); + inbox.insert( + insertion_index, + Envelope { + message: message.clone(), + sender, + }, + ); + } + } + } + } + self.all_messages.extend(messages); + } + + pub fn has_unreceived(&self, receiver: ReplicaId) -> bool { + !self.inboxes[&receiver].is_empty() + } + + pub fn receive(&mut self, receiver: ReplicaId) -> Vec { + let inbox = self.inboxes.get_mut(&receiver).unwrap(); + let count = self.rng.gen_range(0..inbox.len() + 1); + inbox + .drain(0..count) + .map(|envelope| envelope.message) + .collect() + } +} + pub fn temp_tree(tree: serde_json::Value) -> TempDir { let dir = TempDir::new("").unwrap(); write_tree(dir.path(), tree);