ok/jj
1
0
Fork 0
forked from mirrors/jj

index: update in memory and on disk while resolving operation conflicts

Updating the index on disk means that reader won't have to calculate
the state. Updating it in memory means that we can take advantage of
it while resolving conflicts. We will do that soon.
This commit is contained in:
Martin von Zweigbergk 2021-03-06 10:37:57 -08:00
parent 779db67f8f
commit 1e623bd019
3 changed files with 76 additions and 12 deletions

View file

@ -427,6 +427,37 @@ impl MutableIndex {
}
}
pub fn merge_in(&mut self, other: &Arc<ReadonlyIndex>) {
let mut maybe_own_ancestor = self.parent_file.clone();
let mut maybe_other_ancestor = Some(other.clone());
let mut files_to_add = vec![];
loop {
if maybe_other_ancestor.is_none() {
break;
}
let other_ancestor = maybe_other_ancestor.as_ref().unwrap();
if maybe_own_ancestor.is_none() {
files_to_add.push(other_ancestor.clone());
maybe_other_ancestor = other_ancestor.parent_file.clone();
continue;
}
let own_ancestor = maybe_own_ancestor.as_ref().unwrap();
if own_ancestor.name == other_ancestor.name {
break;
}
if own_ancestor.num_commits() < other_ancestor.num_commits() {
files_to_add.push(other_ancestor.clone());
maybe_other_ancestor = other_ancestor.parent_file.clone();
} else {
maybe_own_ancestor = own_ancestor.parent_file.clone();
}
}
for file in files_to_add.iter().rev() {
self.add_commits_from(file.as_ref());
}
}
fn serialize(self) -> Vec<u8> {
assert_eq!(self.graph.len(), self.lookup.len());

View file

@ -100,7 +100,7 @@ pub struct ReadonlyRepo {
wc_path: PathBuf,
store: Arc<StoreWrapper>,
settings: RepoSettings,
index_store: IndexStore,
index_store: Arc<IndexStore>,
index: Mutex<Option<Arc<ReadonlyIndex>>>,
working_copy: Arc<Mutex<WorkingCopy>>,
view: ReadonlyView,
@ -198,16 +198,18 @@ impl ReadonlyRepo {
std::fs::create_dir(repo_path.join("op_store")).unwrap();
let op_store: Arc<dyn OpStore> = Arc::new(SimpleOpStore::init(repo_path.join("op_store")));
fs::create_dir(repo_path.join("index")).unwrap();
let index_store = Arc::new(IndexStore::init(repo_path.join("index")));
let view = ReadonlyView::init(
store.clone(),
op_store,
index_store.clone(),
repo_path.join("view"),
checkout_commit.id().clone(),
);
fs::create_dir(repo_path.join("index")).unwrap();
let index_store = IndexStore::init(repo_path.join("index"));
let repo = ReadonlyRepo {
repo_path,
wc_path,
@ -300,7 +302,7 @@ impl ReadonlyRepo {
&self.store
}
pub fn index_store(&self) -> &IndexStore {
pub fn index_store(&self) -> &Arc<IndexStore> {
&self.index_store
}
@ -347,7 +349,7 @@ pub struct RepoLoader {
repo_settings: RepoSettings,
store: Arc<StoreWrapper>,
op_store: Arc<dyn OpStore>,
index_store: IndexStore,
index_store: Arc<IndexStore>,
}
impl RepoLoader {
@ -360,7 +362,7 @@ impl RepoLoader {
let store = RepoLoader::load_store(&repo_path);
let repo_settings = user_settings.with_repo(&repo_path).unwrap();
let op_store: Arc<dyn OpStore> = Arc::new(SimpleOpStore::load(repo_path.join("op_store")));
let index_store = IndexStore::load(repo_path.join("index"));
let index_store = Arc::new(IndexStore::load(repo_path.join("index")));
Ok(RepoLoader {
wc_path,
repo_path,
@ -400,6 +402,7 @@ impl RepoLoader {
let view = ReadonlyView::load(
self.store.clone(),
self.op_store.clone(),
self.index_store.clone(),
self.repo_path.join("view"),
);
self._finish_load(view)
@ -409,6 +412,7 @@ impl RepoLoader {
let view = ReadonlyView::load_at(
self.store.clone(),
self.op_store.clone(),
self.index_store.clone(),
self.repo_path.join("view"),
op,
);

View file

@ -22,6 +22,8 @@ use thiserror::Error;
use crate::commit::Commit;
use crate::dag_walk;
use crate::index::MutableIndex;
use crate::index_store::IndexStore;
use crate::lock::FileLock;
use crate::op_store;
use crate::op_store::{OpStore, OpStoreResult, OperationId, OperationMetadata};
@ -93,6 +95,7 @@ pub struct ReadonlyView {
path: PathBuf,
op_store: Arc<dyn OpStore>,
op_id: OperationId,
index_store: Arc<IndexStore>,
data: op_store::View,
}
@ -250,6 +253,7 @@ pub fn merge_views(
fn get_single_op_head(
store: &StoreWrapper,
op_store: &Arc<dyn OpStore>,
index_store: &Arc<IndexStore>,
op_heads_dir: &Path,
) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> {
let mut op_heads = get_op_heads(&op_heads_dir);
@ -289,7 +293,7 @@ fn get_single_op_head(
}
let (merge_operation_id, merge_operation, merged_view) =
merge_op_heads(store, op_store, &op_heads)?;
merge_op_heads(store, op_store, index_store, &op_heads)?;
add_op_head(&op_heads_dir, &merge_operation_id);
for old_op_head_id in op_heads {
// The merged one will be in the input to the merge if it's a "fast-forward"
@ -304,6 +308,7 @@ fn get_single_op_head(
fn merge_op_heads(
store: &StoreWrapper,
op_store: &Arc<dyn OpStore>,
index_store: &Arc<IndexStore>,
op_head_ids: &[OperationId],
) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> {
let op_heads: Vec<_> = op_head_ids
@ -332,7 +337,11 @@ fn merge_op_heads(
));
}
let base_index = index_store.get_index_at_op(&first_op_head, store);
let mut index = MutableIndex::incremental(base_index);
for (i, other_op_head) in op_heads.iter().enumerate().skip(1) {
let other_index = index_store.get_index_at_op(other_op_head, store);
index.merge_in(&other_index);
let ancestor_op = dag_walk::closest_common_node(
op_heads[0..i].to_vec(),
vec![other_op_head.clone()],
@ -347,6 +356,7 @@ fn merge_op_heads(
other_op_head.view().store_view(),
);
}
let merged_index = index_store.write_index(index).unwrap();
let merged_view_id = op_store.write_view(&merged_view).unwrap();
let operation_metadata = OperationMetadata::new("resolve concurrent operations".to_string());
let op_parent_ids = op_heads.iter().map(|op| op.id().clone()).collect();
@ -356,6 +366,10 @@ fn merge_op_heads(
metadata: operation_metadata,
};
let merge_operation_id = op_store.write_operation(&merge_operation).unwrap();
// TODO: Like in Transaction::commit(), there's a race here.
index_store
.associate_file_with_operation(merged_index.as_ref(), &merge_operation_id)
.unwrap();
Ok((merge_operation_id, merge_operation, merged_view))
}
@ -363,6 +377,7 @@ impl ReadonlyView {
pub fn init(
store: Arc<StoreWrapper>,
op_store: Arc<dyn OpStore>,
index_store: Arc<IndexStore>,
path: PathBuf,
checkout: CommitId,
) -> Self {
@ -386,19 +401,26 @@ impl ReadonlyView {
path,
op_store,
op_id: init_operation_id,
index_store,
data: root_view,
}
}
pub fn load(store: Arc<StoreWrapper>, op_store: Arc<dyn OpStore>, path: PathBuf) -> Self {
pub fn load(
store: Arc<StoreWrapper>,
op_store: Arc<dyn OpStore>,
index_store: Arc<IndexStore>,
path: PathBuf,
) -> Self {
let op_heads_dir = path.join("op_heads");
let (op_id, _operation, view) =
get_single_op_head(&store, &op_store, &op_heads_dir).unwrap();
get_single_op_head(&store, &op_store, &index_store, &op_heads_dir).unwrap();
ReadonlyView {
store,
path,
op_store,
op_id,
index_store,
data: view,
}
}
@ -406,6 +428,7 @@ impl ReadonlyView {
pub fn load_at(
store: Arc<StoreWrapper>,
op_store: Arc<dyn OpStore>,
index_store: Arc<IndexStore>,
path: PathBuf,
operation: &Operation,
) -> Self {
@ -414,14 +437,20 @@ impl ReadonlyView {
path,
op_store,
op_id: operation.id().clone(),
index_store,
data: operation.view().take_store_view(),
}
}
pub fn reload(&mut self) -> OperationId {
let op_heads_dir = self.path.join("op_heads");
let (op_id, _operation, view) =
get_single_op_head(&self.store, &self.op_store, &op_heads_dir).unwrap();
let (op_id, _operation, view) = get_single_op_head(
&self.store,
&self.op_store,
&self.index_store,
&op_heads_dir,
)
.unwrap();
self.op_id = op_id;
self.data = view;
self.op_id.clone()