From ac9fb1832d595892f041536f710e3a8c7becffa7 Mon Sep 17 00:00:00 2001 From: Martin von Zweigbergk Date: Sat, 13 Mar 2021 09:12:05 -0800 Subject: [PATCH] OpHeadsStore: move logic for merging repos to MutableRepo This adds `MutableRepo::merge()`, which applies the difference between two `ReadonRepo`s to itself. That results in much simpler code than the current code in `merge_op_heads()`. It also lets us write `undo` using the new function. Finally -- and this is the actual reason I did it now -- it prepares for using the index when enforcing view invariants. --- lib/src/op_heads_store.rs | 54 +++++++++------------------------------ lib/src/repo.rs | 50 +++++++++++++++++++++++++----------- lib/src/transaction.rs | 9 ++++++- lib/src/view.rs | 4 +++ 4 files changed, 59 insertions(+), 58 deletions(-) diff --git a/lib/src/op_heads_store.rs b/lib/src/op_heads_store.rs index 1877e0128..87738b075 100644 --- a/lib/src/op_heads_store.rs +++ b/lib/src/op_heads_store.rs @@ -13,14 +13,11 @@ // limitations under the License. use crate::dag_walk; -use crate::index::MutableIndex; - use crate::lock::FileLock; use crate::op_store; use crate::op_store::{OpStore, OperationId, OperationMetadata}; use crate::operation::Operation; - -use crate::view; +use crate::transaction::UnpublishedOperation; use std::path::PathBuf; use std::sync::Arc; @@ -116,6 +113,7 @@ impl OpHeadsStore { } let op_store = repo_loader.op_store(); + if op_heads.len() == 1 { let operation_id = op_heads.pop().unwrap(); let operation = op_store.read_operation(&operation_id).unwrap(); @@ -158,7 +156,7 @@ impl OpHeadsStore { return Ok(op_heads.pop().unwrap()); } - let merge_operation = merge_op_heads(repo_loader, op_heads)?; + let merge_operation = merge_op_heads(repo_loader, op_heads)?.leave_unpublished(); self.add_op_head(merge_operation.id()); for old_op_head_id in merge_operation.parent_ids() { self.remove_op_head(old_op_head_id); @@ -187,20 +185,13 @@ impl OpHeadsStore { fn merge_op_heads( repo_loader: &RepoLoader, mut op_heads: Vec, -) -> Result { +) -> Result { op_heads.sort_by_key(|op| op.store_operation().metadata.end_time.timestamp.clone()); - let first_op_head = op_heads[0].clone(); - let op_store = repo_loader.op_store(); - let mut merged_view = op_store.read_view(first_op_head.view().id()).unwrap(); - + let base_repo = repo_loader.load_at(&op_heads[0]).unwrap(); + let mut tx = base_repo.start_transaction("resolve concurrent operations"); + let merged_repo = tx.mut_repo(); let neighbors_fn = |op: &Operation| op.parents(); - let store = repo_loader.store(); - let index_store = repo_loader.index_store(); - let base_index = index_store.get_index_at_op(&first_op_head, store); - let mut index = MutableIndex::incremental(base_index); for (i, other_op_head) in op_heads.iter().enumerate().skip(1) { - let other_index = index_store.get_index_at_op(other_op_head, store); - index.merge_in(&other_index); let ancestor_op = dag_walk::closest_common_node( op_heads[0..i].to_vec(), vec![other_op_head.clone()], @@ -208,35 +199,14 @@ fn merge_op_heads( &|op: &Operation| op.id().clone(), ) .unwrap(); - merged_view = view::merge_views( - store, - &merged_view, - ancestor_op.view().store_view(), - other_op_head.view().store_view(), - ); + let base_repo = repo_loader.load_at(&ancestor_op).unwrap(); + let other_repo = repo_loader.load_at(&other_op_head).unwrap(); + merged_repo.merge(&base_repo, &other_repo); } - let merged_index = index_store.write_index(index).unwrap(); - let merged_view_id = op_store.write_view(&merged_view).unwrap(); - let operation_metadata = OperationMetadata::new( - "resolve concurrent operations".to_string(), - Timestamp::now(), - ); let op_parent_ids = op_heads.iter().map(|op| op.id().clone()).collect(); - let merge_operation = op_store::Operation { - view_id: merged_view_id, - parents: op_parent_ids, - metadata: operation_metadata, - }; - let merge_operation_id = op_store.write_operation(&merge_operation).unwrap(); - index_store - .associate_file_with_operation(merged_index.as_ref(), &merge_operation_id) - .unwrap(); + tx.set_parents(op_parent_ids); // TODO: We already have the resulting View in this case but Operation cannot // keep it. Teach Operation to have a cached View so the caller won't have // to re-read it from the store (by calling Operation::view())? - Ok(Operation::new( - op_store.clone(), - merge_operation_id, - merge_operation, - )) + Ok(tx.write()) } diff --git a/lib/src/repo.rs b/lib/src/repo.rs index 7e6057350..f0a875c19 100644 --- a/lib/src/repo.rs +++ b/lib/src/repo.rs @@ -40,7 +40,7 @@ use crate::store; use crate::store::{CommitId, Store, StoreError}; use crate::store_wrapper::StoreWrapper; use crate::transaction::Transaction; -use crate::view::{MutableView, ReadonlyView, ViewRef}; +use crate::view::{merge_views, MutableView, ReadonlyView, ViewRef}; use crate::working_copy::WorkingCopy; #[derive(Debug, Error, PartialEq, Eq)] @@ -259,9 +259,7 @@ impl ReadonlyRepo { RepoLoader::init(user_settings, wc_path)?.load_at_head() } - pub fn loader( - &self - ) -> RepoLoader { + pub fn loader(&self) -> RepoLoader { RepoLoader { wc_path: self.wc_path.clone(), repo_path: self.repo_path.clone(), @@ -403,7 +401,10 @@ pub struct RepoLoader { } impl RepoLoader { - pub fn init(user_settings: &UserSettings, wc_path: PathBuf) -> Result { + pub fn init( + user_settings: &UserSettings, + wc_path: PathBuf, + ) -> Result { let repo_path = wc_path.join(".jj"); // TODO: Check if ancestor directory has a .jj/ if !repo_path.is_dir() { @@ -458,19 +459,19 @@ impl RepoLoader { &self.op_store } - pub fn load_at_head(self) -> Result, RepoLoadError> { + pub fn load_at_head(&self) -> Result, RepoLoadError> { let op = self.op_heads_store.get_single_op_head(&self).unwrap(); let view = ReadonlyView::new(self.store.clone(), op.view().take_store_view()); self._finish_load(op.id().clone(), view) } - pub fn load_at(self, op: &Operation) -> Result, RepoLoadError> { + pub fn load_at(&self, op: &Operation) -> Result, RepoLoadError> { let view = ReadonlyView::new(self.store.clone(), op.view().take_store_view()); self._finish_load(op.id().clone(), view) } fn _finish_load( - self, + &self, op_id: OperationId, view: ReadonlyView, ) -> Result, RepoLoadError> { @@ -480,14 +481,14 @@ impl RepoLoader { self.repo_path.join("working_copy"), ); let repo = ReadonlyRepo { - repo_path: self.repo_path, - wc_path: self.wc_path, - store: self.store, - op_store: self.op_store, - op_heads_store: self.op_heads_store, + repo_path: self.repo_path.clone(), + wc_path: self.wc_path.clone(), + store: self.store.clone(), + op_store: self.op_store.clone(), + op_heads_store: self.op_heads_store.clone(), op_id, - settings: self.repo_settings, - index_store: self.index_store, + settings: self.repo_settings.clone(), + index_store: self.index_store.clone(), index: Mutex::new(None), working_copy: Arc::new(Mutex::new(working_copy)), view, @@ -682,4 +683,23 @@ impl<'r> MutableRepo<'r> { self.view.set_view(data); self.evolution.as_mut().unwrap().invalidate(); } + + pub fn merge(&mut self, base_repo: &ReadonlyRepo, other_repo: &ReadonlyRepo) { + // First, merge the index, so we can take advantage of a valid index when + // merging the view. Merging in base_repo's index isn't typically + // necessary, but it can be if base_repo is ahead of either self or other_repo + // (e.g. because we're undoing an operation that hasn't been published). + self.index.merge_in(&base_repo.index()); + self.index.merge_in(&other_repo.index()); + + let merged_view = merge_views( + self.store(), + self.view.store_view(), + base_repo.view.store_view(), + other_repo.view.store_view(), + ); + self.view.set_view(merged_view); + + self.evolution.as_mut().unwrap().invalidate(); + } } diff --git a/lib/src/transaction.rs b/lib/src/transaction.rs index 9c328539b..e97db961d 100644 --- a/lib/src/transaction.rs +++ b/lib/src/transaction.rs @@ -17,6 +17,7 @@ use crate::evolution::MutableEvolution; use crate::index::MutableIndex; use crate::op_heads_store::OpHeadsStore; use crate::op_store; +use crate::op_store::OperationId; use crate::op_store::OperationMetadata; use crate::operation::Operation; use crate::repo::{MutableRepo, ReadonlyRepo, RepoRef}; @@ -29,6 +30,7 @@ use std::sync::Arc; pub struct Transaction<'r> { repo: Option>>, + parents: Vec, description: String, start_time: Timestamp, closed: bool, @@ -36,8 +38,10 @@ pub struct Transaction<'r> { impl<'r> Transaction<'r> { pub fn new(mut_repo: Arc>, description: &str) -> Transaction<'r> { + let parents = vec![mut_repo.base_repo().op_id().clone()]; Transaction { repo: Some(mut_repo), + parents, description: description.to_owned(), start_time: Timestamp::now(), closed: false, @@ -48,6 +52,9 @@ impl<'r> Transaction<'r> { self.repo.as_ref().unwrap().base_repo() } + pub fn set_parents(&mut self, parents: Vec) { + self.parents = parents; + } pub fn store(&self) -> &Arc { self.repo.as_ref().unwrap().store() } @@ -144,7 +151,7 @@ impl<'r> Transaction<'r> { OperationMetadata::new(self.description.clone(), self.start_time.clone()); let store_operation = op_store::Operation { view_id, - parents: vec![base_repo.op_id().clone()], + parents: self.parents.clone(), metadata: operation_metadata, }; let new_op_id = base_repo diff --git a/lib/src/view.rs b/lib/src/view.rs index ec1b8b10f..c32ebcbb3 100644 --- a/lib/src/view.rs +++ b/lib/src/view.rs @@ -212,6 +212,10 @@ impl ReadonlyView { pub fn git_refs(&self) -> &BTreeMap { &self.data.git_refs } + + pub fn store_view(&self) -> &op_store::View { + &self.data + } } impl MutableView {