forked from mirrors/jj
operation: propagate OpStoreError from parents()
We need to .collect_vec() the parents iterator to temporary buffer since the borrowed iterator can't be returned back to the dag_walk functions. Another option is to clone op_store and parent ids to remove &self lifetime from the iterator, but that also means a temporary Vec is created.
This commit is contained in:
parent
3f5e5994eb
commit
8e143541a5
6 changed files with 58 additions and 52 deletions
|
@ -1767,12 +1767,12 @@ pub fn check_stale_working_copy(
|
|||
wc_operation_data,
|
||||
);
|
||||
let repo_operation = repo.operation();
|
||||
let maybe_ancestor_op = dag_walk::closest_common_node(
|
||||
[wc_operation.clone()],
|
||||
[repo_operation.clone()],
|
||||
let maybe_ancestor_op = dag_walk::closest_common_node_ok(
|
||||
[Ok(wc_operation.clone())],
|
||||
[Ok(repo_operation.clone())],
|
||||
|op: &Operation| op.id().clone(),
|
||||
|op: &Operation| op.parents(),
|
||||
);
|
||||
|op: &Operation| op.parents().collect_vec(),
|
||||
)?;
|
||||
if let Some(ancestor_op) = maybe_ancestor_op {
|
||||
if ancestor_op.id() == repo_operation.id() {
|
||||
// The working copy was updated since we loaded the repo. The repo must be
|
||||
|
@ -1911,15 +1911,19 @@ fn resolve_single_op(
|
|||
.map_err(OpHeadResolutionError::Err),
|
||||
}?;
|
||||
for _ in op_postfix.chars() {
|
||||
operation = match operation.parents().as_slice() {
|
||||
[op] => Ok(op.clone()),
|
||||
[] => Err(user_error(format!(
|
||||
let mut parent_ops = operation.parents();
|
||||
let Some(op) = parent_ops.next().transpose()? else {
|
||||
return Err(user_error(format!(
|
||||
r#"The "{op_str}" expression resolved to no operations"#
|
||||
))),
|
||||
[_, _, ..] => Err(user_error(format!(
|
||||
)));
|
||||
};
|
||||
if parent_ops.next().is_some() {
|
||||
return Err(user_error(format!(
|
||||
r#"The "{op_str}" expression resolved to more than one operation"#
|
||||
))),
|
||||
}?;
|
||||
)));
|
||||
}
|
||||
drop(parent_ops);
|
||||
operation = op;
|
||||
}
|
||||
Ok(operation)
|
||||
}
|
||||
|
|
|
@ -116,6 +116,7 @@ fn cmd_op_log(
|
|||
let mut graph = get_graphlog(command.settings(), formatter.raw());
|
||||
let default_node_symbol = graph.default_node_symbol().to_owned();
|
||||
for op in iter {
|
||||
let op = op?;
|
||||
let mut edges = vec![];
|
||||
for id in op.parent_ids() {
|
||||
edges.push(Edge::direct(id.clone()));
|
||||
|
@ -146,6 +147,7 @@ fn cmd_op_log(
|
|||
}
|
||||
} else {
|
||||
for op in iter {
|
||||
let op = op?;
|
||||
with_content_format.write(formatter, |formatter| {
|
||||
formatter.with_label("op_log", |formatter| template.format(&op, formatter))
|
||||
})?;
|
||||
|
@ -190,19 +192,19 @@ pub fn cmd_op_undo(
|
|||
) -> Result<(), CommandError> {
|
||||
let mut workspace_command = command.workspace_helper(ui)?;
|
||||
let bad_op = workspace_command.resolve_single_op(&args.operation)?;
|
||||
let parent_ops = bad_op.parents();
|
||||
if parent_ops.len() > 1 {
|
||||
return Err(user_error("Cannot undo a merge operation"));
|
||||
}
|
||||
if parent_ops.is_empty() {
|
||||
let mut parent_ops = bad_op.parents();
|
||||
let Some(parent_op) = parent_ops.next().transpose()? else {
|
||||
return Err(user_error("Cannot undo repo initialization"));
|
||||
};
|
||||
if parent_ops.next().is_some() {
|
||||
return Err(user_error("Cannot undo a merge operation"));
|
||||
}
|
||||
|
||||
let mut tx =
|
||||
workspace_command.start_transaction(&format!("undo operation {}", bad_op.id().hex()));
|
||||
let repo_loader = tx.base_repo().loader();
|
||||
let bad_repo = repo_loader.load_at(&bad_op)?;
|
||||
let parent_repo = repo_loader.load_at(&parent_ops[0])?;
|
||||
let parent_repo = repo_loader.load_at(&parent_op)?;
|
||||
tx.mut_repo().merge(&bad_repo, &parent_repo);
|
||||
let new_view = view_with_desired_portions_restored(
|
||||
tx.repo().view().store_view(),
|
||||
|
|
|
@ -115,11 +115,12 @@ impl DefaultIndexStore {
|
|||
let change_id_length = store.change_id_length();
|
||||
let mut new_heads = view.heads().clone();
|
||||
let mut parent_op_id: Option<OperationId> = None;
|
||||
for op in dag_walk::dfs(
|
||||
vec![operation.clone()],
|
||||
for op in dag_walk::dfs_ok(
|
||||
[Ok(operation.clone())],
|
||||
|op: &Operation| op.id().clone(),
|
||||
|op: &Operation| op.parents(),
|
||||
|op: &Operation| op.parents().collect_vec(),
|
||||
) {
|
||||
let op = op?;
|
||||
if operations_dir.join(op.id().hex()).is_file() {
|
||||
if parent_op_id.is_none() {
|
||||
parent_op_id = Some(op.id().clone())
|
||||
|
|
|
@ -22,7 +22,7 @@ use itertools::Itertools;
|
|||
use thiserror::Error;
|
||||
|
||||
use crate::dag_walk;
|
||||
use crate::op_store::{OpStore, OpStoreError, OperationId};
|
||||
use crate::op_store::{OpStore, OpStoreError, OpStoreResult, OperationId};
|
||||
use crate::operation::Operation;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
@ -54,20 +54,21 @@ pub trait OpHeadsStore: Send + Sync + Debug {
|
|||
/// Removes operations in the input that are ancestors of other operations
|
||||
/// in the input. The ancestors are removed both from the list and from
|
||||
/// storage.
|
||||
fn handle_ancestor_ops(&self, op_heads: Vec<Operation>) -> Vec<Operation> {
|
||||
// TODO: maybe introduce OpHeadsStoreError?
|
||||
fn handle_ancestor_ops(&self, op_heads: Vec<Operation>) -> OpStoreResult<Vec<Operation>> {
|
||||
let op_head_ids_before: HashSet<_> = op_heads.iter().map(|op| op.id().clone()).collect();
|
||||
// Remove ancestors so we don't create merge operation with an operation and its
|
||||
// ancestor
|
||||
let op_heads = dag_walk::heads(
|
||||
op_heads,
|
||||
let op_heads = dag_walk::heads_ok(
|
||||
op_heads.into_iter().map(Ok),
|
||||
|op: &Operation| op.id().clone(),
|
||||
|op: &Operation| op.parents(),
|
||||
);
|
||||
|op: &Operation| op.parents().collect_vec(),
|
||||
)?;
|
||||
let op_head_ids_after: HashSet<_> = op_heads.iter().map(|op| op.id().clone()).collect();
|
||||
for removed_op_head in op_head_ids_before.difference(&op_head_ids_after) {
|
||||
self.remove_op_head(removed_op_head);
|
||||
}
|
||||
op_heads.into_iter().collect()
|
||||
Ok(op_heads.into_iter().collect())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +122,7 @@ pub fn resolve_op_heads<E>(
|
|||
Ok(Operation::new(op_store.clone(), op_id.clone(), data))
|
||||
})
|
||||
.try_collect()?;
|
||||
let mut op_heads = op_heads_store.handle_ancestor_ops(op_heads);
|
||||
let mut op_heads = op_heads_store.handle_ancestor_ops(op_heads)?;
|
||||
|
||||
// Return without creating a merge operation
|
||||
if op_heads.len() == 1 {
|
||||
|
|
|
@ -20,6 +20,8 @@ use std::fmt::{Debug, Error, Formatter};
|
|||
use std::hash::{Hash, Hasher};
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools as _;
|
||||
|
||||
use crate::backend::CommitId;
|
||||
use crate::op_store::{OpStore, OpStoreResult, OperationId, ViewId};
|
||||
use crate::{dag_walk, op_store};
|
||||
|
@ -80,17 +82,12 @@ impl Operation {
|
|||
&self.data.parents
|
||||
}
|
||||
|
||||
pub fn parents(&self) -> Vec<Operation> {
|
||||
let mut parents = Vec::new();
|
||||
for parent_id in &self.data.parents {
|
||||
let data = self.op_store.read_operation(parent_id).unwrap();
|
||||
parents.push(Operation::new(
|
||||
self.op_store.clone(),
|
||||
parent_id.clone(),
|
||||
data,
|
||||
));
|
||||
}
|
||||
parents
|
||||
pub fn parents(&self) -> impl ExactSizeIterator<Item = OpStoreResult<Operation>> + '_ {
|
||||
let op_store = &self.op_store;
|
||||
self.data.parents.iter().map(|parent_id| {
|
||||
let data = op_store.read_operation(parent_id)?;
|
||||
Ok(Operation::new(op_store.clone(), parent_id.clone(), data))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn view(&self) -> OpStoreResult<View> {
|
||||
|
@ -189,13 +186,13 @@ impl PartialOrd for OperationByEndTime {
|
|||
}
|
||||
|
||||
/// Walks `head_op` and its ancestors in reverse topological order.
|
||||
pub fn walk_ancestors(head_op: &Operation) -> impl Iterator<Item = Operation> {
|
||||
pub fn walk_ancestors(head_op: &Operation) -> impl Iterator<Item = OpStoreResult<Operation>> {
|
||||
// Lazily load operations based on timestamp-based heuristic. This works so long
|
||||
// as the operation history is mostly linear.
|
||||
dag_walk::topo_order_reverse_lazy(
|
||||
vec![OperationByEndTime(head_op.clone())],
|
||||
dag_walk::topo_order_reverse_lazy_ok(
|
||||
[Ok(OperationByEndTime(head_op.clone()))],
|
||||
|OperationByEndTime(op)| op.id().clone(),
|
||||
|OperationByEndTime(op)| op.parents().into_iter().map(OperationByEndTime),
|
||||
|OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
|
||||
)
|
||||
.map(|OperationByEndTime(op)| op)
|
||||
.map_ok(|OperationByEndTime(op)| op)
|
||||
}
|
||||
|
|
|
@ -16,15 +16,16 @@
|
|||
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools as _;
|
||||
|
||||
use crate::backend::Timestamp;
|
||||
use crate::dag_walk::closest_common_node;
|
||||
use crate::index::ReadonlyIndex;
|
||||
use crate::op_store;
|
||||
use crate::op_store::OperationMetadata;
|
||||
use crate::operation::Operation;
|
||||
use crate::repo::{MutableRepo, ReadonlyRepo, Repo, RepoLoader, RepoLoaderError};
|
||||
use crate::settings::UserSettings;
|
||||
use crate::view::View;
|
||||
use crate::{dag_walk, op_store};
|
||||
|
||||
pub struct Transaction {
|
||||
mut_repo: MutableRepo,
|
||||
|
@ -71,12 +72,12 @@ impl Transaction {
|
|||
}
|
||||
|
||||
pub fn merge_operation(&mut self, other_op: Operation) -> Result<(), RepoLoaderError> {
|
||||
let ancestor_op = closest_common_node(
|
||||
self.parent_ops.clone(),
|
||||
vec![other_op.clone()],
|
||||
let ancestor_op = dag_walk::closest_common_node_ok(
|
||||
self.parent_ops.iter().cloned().map(Ok),
|
||||
[Ok(other_op.clone())],
|
||||
|op: &Operation| op.id().clone(),
|
||||
|op: &Operation| op.parents(),
|
||||
)
|
||||
|op: &Operation| op.parents().collect_vec(),
|
||||
)?
|
||||
.unwrap();
|
||||
let repo_loader = self.base_repo().loader();
|
||||
let base_repo = repo_loader.load_at(&ancestor_op)?;
|
||||
|
|
Loading…
Reference in a new issue