repo: make resolution of concurrent operations a separate step (#111)

This is yet another refactoring step to allow the UI layer to inspect
and control how concurrent operations are resolved.
This commit is contained in:
Martin von Zweigbergk 2022-03-20 11:43:36 -07:00 committed by Martin von Zweigbergk
parent e384f97b20
commit a38dd9d693
4 changed files with 49 additions and 15 deletions

View file

@ -28,7 +28,7 @@ use crate::commit_builder::CommitBuilder;
use crate::dag_walk::{closest_common_node, topo_order_reverse};
use crate::index::{IndexRef, MutableIndex, ReadonlyIndex};
use crate::index_store::IndexStore;
use crate::op_heads_store::{OpHeads, OpHeadsStore};
use crate::op_heads_store::{LockedOpHeads, OpHeads, OpHeadsStore};
use crate::op_store::{BranchTarget, OpStore, OperationId, RefTarget, WorkspaceId};
use crate::operation::Operation;
use crate::rewrite::DescendantRebaser;
@ -192,7 +192,9 @@ impl ReadonlyRepo {
}
pub fn load(user_settings: &UserSettings, repo_path: PathBuf) -> Arc<ReadonlyRepo> {
RepoLoader::init(user_settings, repo_path).load_at_head()
RepoLoader::init(user_settings, repo_path)
.load_at_head()
.resolve()
}
pub fn loader(&self) -> RepoLoader {
@ -277,7 +279,7 @@ impl ReadonlyRepo {
}
pub fn reload(&self) -> Arc<ReadonlyRepo> {
self.loader().load_at_head()
self.loader().load_at_head().resolve()
}
pub fn reload_at(&self, operation: &Operation) -> Arc<ReadonlyRepo> {
@ -285,6 +287,38 @@ impl ReadonlyRepo {
}
}
pub enum RepoAtHead {
Single(Arc<ReadonlyRepo>),
Unresolved(Box<UnresolvedHeadRepo>),
}
impl RepoAtHead {
pub fn resolve(self) -> Arc<ReadonlyRepo> {
match self {
RepoAtHead::Single(repo) => repo,
RepoAtHead::Unresolved(unresolved) => unresolved.resolve(),
}
}
}
pub struct UnresolvedHeadRepo {
repo_loader: RepoLoader,
locked_op_heads: LockedOpHeads,
op_heads: Vec<Operation>,
}
impl UnresolvedHeadRepo {
pub fn resolve(self) -> Arc<ReadonlyRepo> {
let merged_repo = self
.repo_loader
.merge_op_heads(self.op_heads)
.leave_unpublished();
self.locked_op_heads.finish(merged_repo.operation());
merged_repo
}
}
#[derive(Clone)]
pub struct RepoLoader {
repo_path: PathBuf,
repo_settings: RepoSettings,
@ -331,21 +365,21 @@ impl RepoLoader {
&self.op_heads_store
}
pub fn load_at_head(&self) -> Arc<ReadonlyRepo> {
pub fn load_at_head(&self) -> RepoAtHead {
let op_heads = self.op_heads_store.get_heads(self).unwrap();
match op_heads {
OpHeads::Single(op) => {
let view = View::new(op.view().take_store_view());
self._finish_load(op, view)
RepoAtHead::Single(self._finish_load(op, view))
}
OpHeads::Unresolved {
locked_op_heads,
op_heads,
} => {
let merged_repo = self.merge_op_heads(op_heads).leave_unpublished();
locked_op_heads.finish(merged_repo.operation());
merged_repo
}
} => RepoAtHead::Unresolved(Box::new(UnresolvedHeadRepo {
repo_loader: self.clone(),
locked_op_heads,
op_heads,
})),
}
}

View file

@ -109,7 +109,7 @@ fn test_bad_locking_children(use_git: bool) {
let machine1_root = TempDir::new().unwrap().into_path();
copy_directory(workspace_root, &machine1_root);
let machine1_workspace = Workspace::load(&settings, machine1_root.clone()).unwrap();
let machine1_repo = machine1_workspace.repo_loader().load_at_head();
let machine1_repo = machine1_workspace.repo_loader().load_at_head().resolve();
let mut machine1_tx = machine1_repo.start_transaction("test");
let child1 = testutils::create_random_commit(&settings, &machine1_repo)
.set_parents(vec![initial.id().clone()])
@ -120,7 +120,7 @@ fn test_bad_locking_children(use_git: bool) {
let machine2_root = TempDir::new().unwrap().into_path();
copy_directory(workspace_root, &machine2_root);
let machine2_workspace = Workspace::load(&settings, machine2_root.clone()).unwrap();
let machine2_repo = machine2_workspace.repo_loader().load_at_head();
let machine2_repo = machine2_workspace.repo_loader().load_at_head().resolve();
let mut machine2_tx = machine2_repo.start_transaction("test");
let child2 = testutils::create_random_commit(&settings, &machine2_repo)
.set_parents(vec![initial.id().clone()])
@ -132,7 +132,7 @@ fn test_bad_locking_children(use_git: bool) {
let merged_path = TempDir::new().unwrap().into_path();
merge_directories(&machine1_root, workspace_root, &machine2_root, &merged_path);
let merged_workspace = Workspace::load(&settings, merged_path).unwrap();
let merged_repo = merged_workspace.repo_loader().load_at_head();
let merged_repo = merged_workspace.repo_loader().load_at_head().resolve();
assert!(merged_repo.view().heads().contains(child1.id()));
assert!(merged_repo.view().heads().contains(child2.id()));
let op_id = merged_repo.op_id().clone();

View file

@ -34,7 +34,7 @@ fn test_load_at_operation(use_git: bool) {
// If we load the repo at head, we should not see the commit since it was
// removed
let loader = RepoLoader::init(&settings, repo.repo_path().clone());
let head_repo = loader.load_at_head();
let head_repo = loader.load_at_head().resolve();
assert!(!head_repo.view().heads().contains(commit.id()));
// If we load the repo at the previous operation, we should see the commit since

View file

@ -211,7 +211,7 @@ jj init --git-repo=.";
let repo_loader = workspace.repo_loader();
let op_str = &self.args.at_operation;
let repo = if op_str == "@" {
repo_loader.load_at_head()
repo_loader.load_at_head().resolve()
} else {
let op = resolve_single_op_from_store(
repo_loader.op_store(),