From 40aa9741ec9309cdf7b050e3f6c4c74c80bc1f4e Mon Sep 17 00:00:00 2001 From: Martin von Zweigbergk Date: Sat, 19 Mar 2022 22:55:32 -0700 Subject: [PATCH] op_heads_store: move lock and updating of heads on disk to new type (#111) We had a few lines of similar code where we added a new of the operation log and then removed the old heads. By moving that code into a new type, we prepare for further refactorings. --- lib/src/op_heads_store.rs | 39 +++++++++++++++++++++++---------------- lib/src/transaction.rs | 3 ++- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/lib/src/op_heads_store.rs b/lib/src/op_heads_store.rs index a0709267a..0f481f73b 100644 --- a/lib/src/op_heads_store.rs +++ b/lib/src/op_heads_store.rs @@ -40,6 +40,20 @@ pub enum OpHeadResolutionError { NoHeads, } +pub struct LockedOpHeads { + store: Arc, + _lock: FileLock, +} + +impl LockedOpHeads { + pub fn finish(self, new_op: &Operation) { + self.store.add_op_head(new_op.id()); + for old_id in new_op.parent_ids() { + self.store.remove_op_head(old_id); + } + } +} + impl OpHeadsStore { pub fn init( dir: PathBuf, @@ -66,7 +80,7 @@ impl OpHeadsStore { OpHeadsStore { dir } } - pub fn add_op_head(&self, id: &OperationId) { + fn add_op_head(&self, id: &OperationId) { std::fs::write(self.dir.join(id.hex()), "").unwrap(); } @@ -90,20 +104,16 @@ impl OpHeadsStore { op_heads } - fn lock(&self) -> FileLock { - FileLock::lock(self.dir.join("lock")) - } - - pub fn update_op_heads(&self, op: &Operation) { - let _op_heads_lock = self.lock(); - self.add_op_head(op.id()); - for old_parent_id in op.parent_ids() { - self.remove_op_head(old_parent_id); + pub fn lock(self: &Arc) -> LockedOpHeads { + let lock = FileLock::lock(self.dir.join("lock")); + LockedOpHeads { + store: self.clone(), + _lock: lock, } } pub fn get_single_op_head( - &self, + self: &Arc, repo_loader: &RepoLoader, ) -> Result { let mut op_heads = self.get_op_heads(); @@ -128,7 +138,7 @@ impl OpHeadsStore { // Note that the locking isn't necessary for correctness; we take the lock // only to avoid other concurrent processes from doing the same work (and // producing another set of divergent heads). - let _lock = self.lock(); + let locked_op_heads = self.lock(); let op_head_ids = self.get_op_heads(); if op_head_ids.is_empty() { @@ -158,10 +168,7 @@ impl OpHeadsStore { let merged_repo = merge_op_heads(repo_loader, op_heads)?.leave_unpublished(); let merge_operation = merged_repo.operation().clone(); - self.add_op_head(merge_operation.id()); - for old_op_head_id in merge_operation.parent_ids() { - self.remove_op_head(old_op_head_id); - } + locked_op_heads.finish(&merge_operation); // TODO: Change the return type include the repo if we have it (as we do here) Ok(merge_operation) } diff --git a/lib/src/transaction.rs b/lib/src/transaction.rs index 98600d273..b1abec09b 100644 --- a/lib/src/transaction.rs +++ b/lib/src/transaction.rs @@ -135,7 +135,8 @@ impl UnpublishedOperation { let data = self.data.take().unwrap(); self.repo_loader .op_heads_store() - .update_op_heads(&data.operation); + .lock() + .finish(&data.operation); let repo = self .repo_loader .create_from(data.operation, data.view, data.index);