repo: simplify and clarify name of base_op_head_id() functions

This commit is contained in:
Martin von Zweigbergk 2021-03-10 15:22:04 -08:00
parent 9ee521d9d3
commit 48d7903925
6 changed files with 50 additions and 56 deletions

View file

@ -277,7 +277,7 @@ impl ReadonlyRepo {
pub fn index(&self) -> Arc<ReadonlyIndex> {
let mut locked_index = self.index.lock().unwrap();
if locked_index.is_none() {
let op_id = self.view.base_op_head_id().clone();
let op_id = self.view.op_id().clone();
let op = self.view.op_store().read_operation(&op_id).unwrap();
let op = Operation::new(self.view.op_store().clone(), op_id, op);
locked_index.replace(self.index_store.get_index_at_op(&op, self.store.as_ref()));

View file

@ -73,10 +73,10 @@ impl<'a> ViewRef<'a> {
}
}
pub fn base_op_head_id(&self) -> &'a OperationId {
pub fn base_op_id(&self) -> &'a OperationId {
match self {
ViewRef::Readonly(view) => view.base_op_head_id(),
ViewRef::Mutable(view) => view.base_op_head_id(),
ViewRef::Readonly(view) => view.op_id(),
ViewRef::Mutable(view) => view.base_op_id(),
}
}
@ -85,8 +85,8 @@ impl<'a> ViewRef<'a> {
Ok(Operation::new(self.op_store().clone(), id.clone(), data))
}
pub fn base_op_head(&self) -> Operation {
self.get_operation(self.base_op_head_id()).unwrap()
pub fn base_op(&self) -> Operation {
self.get_operation(self.base_op_id()).unwrap()
}
}
@ -103,7 +103,7 @@ pub struct MutableView {
store: Arc<StoreWrapper>,
path: PathBuf,
op_store: Arc<dyn OpStore>,
base_op_head_id: OperationId,
base_op_id: OperationId,
data: op_store::View,
}
@ -465,7 +465,7 @@ impl ReadonlyView {
store: self.store.clone(),
path: self.path.clone(),
op_store: self.op_store.clone(),
base_op_head_id: self.op_id.clone(),
base_op_id: self.op_id.clone(),
data: self.data.clone(),
}
}
@ -494,7 +494,7 @@ impl ReadonlyView {
self.op_store.clone()
}
pub fn base_op_head_id(&self) -> &OperationId {
pub fn op_id(&self) -> &OperationId {
&self.op_id
}
}
@ -524,8 +524,8 @@ impl MutableView {
self.op_store.clone()
}
pub fn base_op_head_id(&self) -> &OperationId {
&self.base_op_head_id
pub fn base_op_id(&self) -> &OperationId {
&self.base_op_id
}
pub fn set_checkout(&mut self, id: CommitId) {
@ -573,11 +573,11 @@ impl MutableView {
operation_metadata.start_time = operation_start_time;
let operation = op_store::Operation {
view_id,
parents: vec![self.base_op_head_id.clone()],
parents: vec![self.base_op_id.clone()],
metadata: operation_metadata,
};
let new_op_head_id = self.op_store.write_operation(&operation).unwrap();
Operation::new(self.op_store.clone(), new_op_head_id, operation)
let new_op_id = self.op_store.write_operation(&operation).unwrap();
Operation::new(self.op_store.clone(), new_op_id, operation)
}
pub fn update_op_heads(&self, op: &Operation) {

View file

@ -130,13 +130,13 @@ fn test_bad_locking_children(use_git: bool) {
let merged_repo = ReadonlyRepo::load(&settings, merged_path).unwrap();
assert!(merged_repo.view().heads().contains(child1.id()));
assert!(merged_repo.view().heads().contains(child2.id()));
let op_head_id = merged_repo.view().base_op_head_id().clone();
let op_head = merged_repo
let op_id = merged_repo.view().op_id().clone();
let op = merged_repo
.view()
.op_store()
.read_operation(&op_head_id)
.read_operation(&op_id)
.unwrap();
assert_eq!(op_head.parents.len(), 2);
assert_eq!(op.parents.len(), 2);
}
#[test_case(false ; "local store")]
@ -164,14 +164,14 @@ fn test_bad_locking_interrupted(use_git: bool) {
testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let op_head_id = tx.commit().id().clone();
let op_id = tx.commit().id().clone();
copy_directory(&backup_path, &op_heads_dir);
// Reload the repo and check that only the new head is present.
let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()).unwrap();
assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id);
assert_eq!(reloaded_repo.view().op_id(), &op_id);
// Reload once more to make sure that the view/op_heads/ directory was updated
// correctly.
let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()).unwrap();
assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id);
assert_eq!(reloaded_repo.view().op_id(), &op_id);
}

View file

@ -23,11 +23,11 @@ use test_case::test_case;
fn count_non_merge_operations(repo: &ReadonlyRepo) -> u32 {
let view = repo.view();
let op_store = view.op_store();
let op_head_id = view.base_op_head_id().clone();
let op_id = view.op_id().clone();
let mut num_ops = 0;
for op_id in dag_walk::bfs(
vec![op_head_id],
vec![op_id],
Box::new(|op_id| op_id.clone()),
Box::new(|op_id| op_store.read_operation(&op_id).unwrap().parents),
) {

View file

@ -36,30 +36,27 @@ fn test_consecutive_operations(use_git: bool) {
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let op_heads_dir = repo.repo_path().join("view").join("op_heads");
let op_head_id0 = repo.view().base_op_head_id().clone();
assert_eq!(
list_dir(&op_heads_dir),
vec![repo.view().base_op_head_id().hex()]
);
let op_id0 = repo.view().op_id().clone();
assert_eq!(list_dir(&op_heads_dir), vec![repo.view().op_id().hex()]);
let mut tx1 = repo.start_transaction("transaction 1");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1);
let op_head_id1 = tx1.commit().id().clone();
assert_ne!(op_head_id1, op_head_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]);
let op_id1 = tx1.commit().id().clone();
assert_ne!(op_id1, op_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx2 = repo.start_transaction("transaction 2");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2);
let op_head_id2 = tx2.commit().id().clone();
assert_ne!(op_head_id2, op_head_id0);
assert_ne!(op_head_id2, op_head_id1);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]);
let op_id2 = tx2.commit().id().clone();
assert_ne!(op_id2, op_id0);
assert_ne!(op_id2, op_id1);
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
// Reloading the repo makes no difference (there are no conflicting operations
// to resolve).
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]);
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
}
#[test_case(false ; "local store")]
@ -71,38 +68,35 @@ fn test_concurrent_operations(use_git: bool) {
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let op_heads_dir = repo.repo_path().join("view").join("op_heads");
let op_head_id0 = repo.view().base_op_head_id().clone();
assert_eq!(
list_dir(&op_heads_dir),
vec![repo.view().base_op_head_id().hex()]
);
let op_id0 = repo.view().op_id().clone();
assert_eq!(list_dir(&op_heads_dir), vec![repo.view().op_id().hex()]);
let mut tx1 = repo.start_transaction("transaction 1");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1);
let op_head_id1 = tx1.commit().id().clone();
assert_ne!(op_head_id1, op_head_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]);
let op_id1 = tx1.commit().id().clone();
assert_ne!(op_id1, op_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
// After both transactions have committed, we should have two op-heads on disk,
// since they were run in parallel.
let mut tx2 = repo.start_transaction("transaction 2");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2);
let op_head_id2 = tx2.commit().id().clone();
assert_ne!(op_head_id2, op_head_id0);
assert_ne!(op_head_id2, op_head_id1);
let op_id2 = tx2.commit().id().clone();
assert_ne!(op_id2, op_id0);
assert_ne!(op_id2, op_id1);
let mut actual_heads_on_disk = list_dir(&op_heads_dir);
actual_heads_on_disk.sort();
let mut expected_heads_on_disk = vec![op_head_id1.hex(), op_head_id2.hex()];
let mut expected_heads_on_disk = vec![op_id1.hex(), op_id2.hex()];
expected_heads_on_disk.sort();
assert_eq!(actual_heads_on_disk, expected_heads_on_disk);
// Reloading the repo causes the operations to be merged
Arc::get_mut(&mut repo).unwrap().reload();
let merged_op_head_id = repo.view().base_op_head_id().clone();
assert_ne!(merged_op_head_id, op_head_id0);
assert_ne!(merged_op_head_id, op_head_id1);
assert_ne!(merged_op_head_id, op_head_id2);
assert_eq!(list_dir(&op_heads_dir), vec![merged_op_head_id.hex()]);
let merged_op_id = repo.view().op_id().clone();
assert_ne!(merged_op_id, op_id0);
assert_ne!(merged_op_id, op_id1);
assert_ne!(merged_op_id, op_id2);
assert_eq!(list_dir(&op_heads_dir), vec![merged_op_id.hex()]);
}
fn assert_heads(repo: RepoRef, expected: Vec<&CommitId>) {

View file

@ -205,7 +205,7 @@ fn op_arg<'a, 'b>() -> Arg<'a, 'b> {
fn resolve_single_op(repo: &ReadonlyRepo, op_str: &str) -> Result<Operation, CommandError> {
let view = repo.view();
if op_str == "@" {
Ok(view.as_view_ref().base_op_head())
Ok(view.as_view_ref().base_op())
} else {
resolve_single_op_from_store(&repo.view().op_store(), op_str)
}
@ -1905,7 +1905,7 @@ fn cmd_op_log(
) -> Result<(), CommandError> {
let repo = get_repo(ui, &matches)?;
let view = repo.view();
let head_op = view.as_view_ref().base_op_head();
let head_op = view.as_view_ref().base_op();
let mut styler = ui.styler();
let mut styler = styler.as_mut();
struct OpTemplate;
@ -1993,7 +1993,7 @@ fn cmd_op_undo(
let view = repo.view();
let parent_view = parent_ops[0].view();
let bad_view = bad_op.view();
let current_view = view.as_view_ref().base_op_head().view();
let current_view = view.as_view_ref().base_op().view();
merge_views(
repo.store(),
current_view.store_view(),