2022-11-26 23:57:50 +00:00
|
|
|
// Copyright 2020 The Jujutsu Authors
|
2020-12-12 08:00:42 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2023-07-10 15:17:00 +00:00
|
|
|
#![allow(missing_docs)]
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
use std::fmt::Debug;
|
2023-03-30 20:11:18 +00:00
|
|
|
use std::fs;
|
|
|
|
use std::io::{ErrorKind, Write};
|
|
|
|
use std::path::{Path, PathBuf};
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-03-30 20:11:18 +00:00
|
|
|
use prost::Message;
|
|
|
|
use tempfile::{NamedTempFile, PersistError};
|
2022-11-02 16:51:25 +00:00
|
|
|
|
2023-03-30 20:11:18 +00:00
|
|
|
use crate::backend::{CommitId, MillisSinceEpoch, ObjectId, Timestamp};
|
|
|
|
use crate::content_hash::blake2b_hash;
|
|
|
|
use crate::file_util::persist_content_addressed_temp_file;
|
|
|
|
use crate::op_store::{
|
|
|
|
BranchTarget, OpStore, OpStoreError, OpStoreResult, Operation, OperationId, OperationMetadata,
|
2023-07-12 22:20:44 +00:00
|
|
|
RefTarget, RefTargetMap, View, ViewId, WorkspaceId,
|
2023-03-30 20:11:18 +00:00
|
|
|
};
|
2022-11-02 16:51:25 +00:00
|
|
|
|
|
|
|
impl From<std::io::Error> for OpStoreError {
|
|
|
|
fn from(err: std::io::Error) -> Self {
|
2023-07-26 17:27:43 +00:00
|
|
|
OpStoreError::Other(err.into())
|
2022-11-02 16:51:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<PersistError> for OpStoreError {
|
|
|
|
fn from(err: PersistError) -> Self {
|
2023-07-26 17:27:43 +00:00
|
|
|
OpStoreError::Other(err.into())
|
2022-11-02 16:51:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-30 20:11:18 +00:00
|
|
|
impl From<prost::DecodeError> for OpStoreError {
|
|
|
|
fn from(err: prost::DecodeError) -> Self {
|
2023-07-26 17:27:43 +00:00
|
|
|
OpStoreError::Other(err.into())
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct SimpleOpStore {
|
2023-03-30 20:11:18 +00:00
|
|
|
path: PathBuf,
|
2022-11-02 16:51:25 +00:00
|
|
|
}
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
impl SimpleOpStore {
|
2023-04-11 03:40:03 +00:00
|
|
|
/// Creates an empty OpStore, panics if it already exists
|
2022-12-14 19:10:42 +00:00
|
|
|
pub fn init(store_path: &Path) -> Self {
|
2023-03-30 20:11:18 +00:00
|
|
|
fs::create_dir(store_path.join("views")).unwrap();
|
|
|
|
fs::create_dir(store_path.join("operations")).unwrap();
|
|
|
|
SimpleOpStore {
|
|
|
|
path: store_path.to_owned(),
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 03:40:03 +00:00
|
|
|
/// Load an existing OpStore
|
2022-12-14 19:10:42 +00:00
|
|
|
pub fn load(store_path: &Path) -> Self {
|
2023-03-30 20:11:18 +00:00
|
|
|
SimpleOpStore {
|
|
|
|
path: store_path.to_path_buf(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn view_path(&self, id: &ViewId) -> PathBuf {
|
|
|
|
self.path.join("views").join(id.hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn operation_path(&self, id: &OperationId) -> PathBuf {
|
|
|
|
self.path.join("operations").join(id.hex())
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2022-11-02 16:51:25 +00:00
|
|
|
}
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
impl OpStore for SimpleOpStore {
|
2022-12-14 18:22:12 +00:00
|
|
|
fn name(&self) -> &str {
|
|
|
|
"simple_op_store"
|
|
|
|
}
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
fn read_view(&self, id: &ViewId) -> OpStoreResult<View> {
|
2023-03-30 20:11:18 +00:00
|
|
|
let path = self.view_path(id);
|
2023-07-26 19:54:30 +00:00
|
|
|
let buf = fs::read(path).map_err(not_found_to_store_error)?;
|
2023-03-30 20:11:18 +00:00
|
|
|
|
|
|
|
let proto = crate::protos::op_store::View::decode(&*buf)?;
|
|
|
|
Ok(view_from_proto(proto))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn write_view(&self, view: &View) -> OpStoreResult<ViewId> {
|
2023-03-30 20:11:18 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path)?;
|
|
|
|
|
|
|
|
let proto = view_to_proto(view);
|
|
|
|
temp_file.as_file().write_all(&proto.encode_to_vec())?;
|
|
|
|
|
|
|
|
let id = ViewId::new(blake2b_hash(view).to_vec());
|
|
|
|
|
|
|
|
persist_content_addressed_temp_file(temp_file, self.view_path(&id))?;
|
|
|
|
Ok(id)
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
|
2023-03-30 20:11:18 +00:00
|
|
|
let path = self.operation_path(id);
|
|
|
|
let buf = fs::read(path).map_err(not_found_to_store_error)?;
|
|
|
|
|
|
|
|
let proto = crate::protos::op_store::Operation::decode(&*buf)?;
|
|
|
|
Ok(operation_from_proto(proto))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn write_operation(&self, operation: &Operation) -> OpStoreResult<OperationId> {
|
2023-03-30 20:11:18 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path)?;
|
|
|
|
|
|
|
|
let proto = operation_to_proto(operation);
|
|
|
|
temp_file.as_file().write_all(&proto.encode_to_vec())?;
|
|
|
|
|
|
|
|
let id = OperationId::new(blake2b_hash(operation).to_vec());
|
|
|
|
|
|
|
|
persist_content_addressed_temp_file(temp_file, self.operation_path(&id))?;
|
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn not_found_to_store_error(err: std::io::Error) -> OpStoreError {
|
|
|
|
if err.kind() == ErrorKind::NotFound {
|
|
|
|
OpStoreError::NotFound
|
|
|
|
} else {
|
|
|
|
OpStoreError::from(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn timestamp_to_proto(timestamp: &Timestamp) -> crate::protos::op_store::Timestamp {
|
|
|
|
crate::protos::op_store::Timestamp {
|
|
|
|
millis_since_epoch: timestamp.timestamp.0,
|
|
|
|
tz_offset: timestamp.tz_offset,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn timestamp_from_proto(proto: crate::protos::op_store::Timestamp) -> Timestamp {
|
|
|
|
Timestamp {
|
|
|
|
timestamp: MillisSinceEpoch(proto.millis_since_epoch),
|
|
|
|
tz_offset: proto.tz_offset,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn operation_metadata_to_proto(
|
|
|
|
metadata: &OperationMetadata,
|
|
|
|
) -> crate::protos::op_store::OperationMetadata {
|
|
|
|
crate::protos::op_store::OperationMetadata {
|
|
|
|
start_time: Some(timestamp_to_proto(&metadata.start_time)),
|
|
|
|
end_time: Some(timestamp_to_proto(&metadata.end_time)),
|
|
|
|
description: metadata.description.clone(),
|
|
|
|
hostname: metadata.hostname.clone(),
|
|
|
|
username: metadata.username.clone(),
|
|
|
|
tags: metadata.tags.clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn operation_metadata_from_proto(
|
|
|
|
proto: crate::protos::op_store::OperationMetadata,
|
|
|
|
) -> OperationMetadata {
|
|
|
|
let start_time = timestamp_from_proto(proto.start_time.unwrap_or_default());
|
|
|
|
let end_time = timestamp_from_proto(proto.end_time.unwrap_or_default());
|
|
|
|
OperationMetadata {
|
|
|
|
start_time,
|
|
|
|
end_time,
|
|
|
|
description: proto.description,
|
|
|
|
hostname: proto.hostname,
|
|
|
|
username: proto.username,
|
|
|
|
tags: proto.tags,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn operation_to_proto(operation: &Operation) -> crate::protos::op_store::Operation {
|
|
|
|
let mut proto = crate::protos::op_store::Operation {
|
|
|
|
view_id: operation.view_id.as_bytes().to_vec(),
|
|
|
|
metadata: Some(operation_metadata_to_proto(&operation.metadata)),
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
for parent in &operation.parents {
|
|
|
|
proto.parents.push(parent.to_bytes());
|
|
|
|
}
|
|
|
|
proto
|
|
|
|
}
|
|
|
|
|
|
|
|
fn operation_from_proto(proto: crate::protos::op_store::Operation) -> Operation {
|
|
|
|
let parents = proto.parents.into_iter().map(OperationId::new).collect();
|
|
|
|
let view_id = ViewId::new(proto.view_id);
|
|
|
|
let metadata = operation_metadata_from_proto(proto.metadata.unwrap_or_default());
|
|
|
|
Operation {
|
|
|
|
view_id,
|
|
|
|
parents,
|
|
|
|
metadata,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn view_to_proto(view: &View) -> crate::protos::op_store::View {
|
|
|
|
let mut proto = crate::protos::op_store::View::default();
|
|
|
|
for (workspace_id, commit_id) in &view.wc_commit_ids {
|
|
|
|
proto
|
|
|
|
.wc_commit_ids
|
|
|
|
.insert(workspace_id.as_str().to_string(), commit_id.to_bytes());
|
|
|
|
}
|
|
|
|
for head_id in &view.head_ids {
|
|
|
|
proto.head_ids.push(head_id.to_bytes());
|
|
|
|
}
|
|
|
|
for head_id in &view.public_head_ids {
|
|
|
|
proto.public_head_ids.push(head_id.to_bytes());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (name, target) in &view.branches {
|
|
|
|
let mut branch_proto = crate::protos::op_store::Branch {
|
|
|
|
name: name.clone(),
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
branch_proto.name = name.clone();
|
2023-07-12 14:41:38 +00:00
|
|
|
branch_proto.local_target = ref_target_to_proto(&target.local_target);
|
2023-03-30 20:11:18 +00:00
|
|
|
for (remote_name, target) in &target.remote_targets {
|
|
|
|
branch_proto
|
|
|
|
.remote_branches
|
|
|
|
.push(crate::protos::op_store::RemoteBranch {
|
|
|
|
remote_name: remote_name.clone(),
|
2023-07-12 14:41:38 +00:00
|
|
|
target: ref_target_to_proto(target),
|
2023-03-30 20:11:18 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
proto.branches.push(branch_proto);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (name, target) in &view.tags {
|
|
|
|
proto.tags.push(crate::protos::op_store::Tag {
|
|
|
|
name: name.clone(),
|
2023-07-12 14:41:38 +00:00
|
|
|
target: ref_target_to_proto(target),
|
2023-03-30 20:11:18 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
for (git_ref_name, target) in &view.git_refs {
|
|
|
|
proto.git_refs.push(crate::protos::op_store::GitRef {
|
|
|
|
name: git_ref_name.clone(),
|
2023-07-12 14:41:38 +00:00
|
|
|
target: ref_target_to_proto(target),
|
2023-03-30 20:11:18 +00:00
|
|
|
..Default::default()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2023-07-12 14:41:38 +00:00
|
|
|
proto.git_head = ref_target_to_proto(&view.git_head);
|
2023-03-30 20:11:18 +00:00
|
|
|
|
|
|
|
proto
|
|
|
|
}
|
|
|
|
|
|
|
|
fn view_from_proto(proto: crate::protos::op_store::View) -> View {
|
|
|
|
let mut view = View::default();
|
|
|
|
// For compatibility with old repos before we had support for multiple working
|
|
|
|
// copies
|
|
|
|
#[allow(deprecated)]
|
|
|
|
if !proto.wc_commit_id.is_empty() {
|
|
|
|
view.wc_commit_ids
|
|
|
|
.insert(WorkspaceId::default(), CommitId::new(proto.wc_commit_id));
|
|
|
|
}
|
|
|
|
for (workspace_id, commit_id) in proto.wc_commit_ids {
|
|
|
|
view.wc_commit_ids
|
|
|
|
.insert(WorkspaceId::new(workspace_id), CommitId::new(commit_id));
|
|
|
|
}
|
|
|
|
for head_id_bytes in proto.head_ids {
|
|
|
|
view.head_ids.insert(CommitId::new(head_id_bytes));
|
|
|
|
}
|
|
|
|
for head_id_bytes in proto.public_head_ids {
|
|
|
|
view.public_head_ids.insert(CommitId::new(head_id_bytes));
|
|
|
|
}
|
|
|
|
|
|
|
|
for branch_proto in proto.branches {
|
2023-07-12 13:11:50 +00:00
|
|
|
let local_target = ref_target_from_proto(branch_proto.local_target);
|
2023-03-30 20:11:18 +00:00
|
|
|
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
let mut remote_targets = RefTargetMap::new();
|
2023-03-30 20:11:18 +00:00
|
|
|
for remote_branch in branch_proto.remote_branches {
|
|
|
|
remote_targets.insert(
|
|
|
|
remote_branch.remote_name,
|
2023-07-12 14:41:38 +00:00
|
|
|
ref_target_from_proto(remote_branch.target),
|
2023-03-30 20:11:18 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
view.branches.insert(
|
|
|
|
branch_proto.name.clone(),
|
|
|
|
BranchTarget {
|
|
|
|
local_target,
|
|
|
|
remote_targets,
|
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for tag_proto in proto.tags {
|
2023-07-12 14:41:38 +00:00
|
|
|
view.tags
|
|
|
|
.insert(tag_proto.name, ref_target_from_proto(tag_proto.target));
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for git_ref in proto.git_refs {
|
2023-07-12 13:11:50 +00:00
|
|
|
let target = if git_ref.target.is_some() {
|
|
|
|
ref_target_from_proto(git_ref.target)
|
2023-03-30 20:11:18 +00:00
|
|
|
} else {
|
|
|
|
// Legacy format
|
2023-07-12 13:11:50 +00:00
|
|
|
RefTarget::normal(CommitId::new(git_ref.commit_id))
|
|
|
|
};
|
2023-07-12 14:41:38 +00:00
|
|
|
view.git_refs.insert(git_ref.name, target);
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(deprecated)]
|
2023-07-12 13:11:50 +00:00
|
|
|
if proto.git_head.is_some() {
|
|
|
|
view.git_head = ref_target_from_proto(proto.git_head);
|
2023-03-30 20:11:18 +00:00
|
|
|
} else if !proto.git_head_legacy.is_empty() {
|
2023-07-11 13:14:59 +00:00
|
|
|
view.git_head = RefTarget::normal(CommitId::new(proto.git_head_legacy));
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
view
|
|
|
|
}
|
|
|
|
|
2023-07-12 22:20:44 +00:00
|
|
|
fn ref_target_to_proto(value: &RefTarget) -> Option<crate::protos::op_store::RefTarget> {
|
2023-07-11 19:36:25 +00:00
|
|
|
if let Some(id) = value.as_normal() {
|
|
|
|
let proto = crate::protos::op_store::RefTarget {
|
|
|
|
value: Some(crate::protos::op_store::ref_target::Value::CommitId(
|
2023-03-30 20:11:18 +00:00
|
|
|
id.to_bytes(),
|
2023-07-11 19:36:25 +00:00
|
|
|
)),
|
|
|
|
};
|
|
|
|
Some(proto)
|
2023-07-19 12:31:49 +00:00
|
|
|
} else if value.has_conflict() {
|
2023-07-12 14:41:38 +00:00
|
|
|
// TODO: Preserve "absent" targets, and remove op_store::RefTargetMap hack.
|
2023-07-11 19:36:25 +00:00
|
|
|
let ref_conflict_proto = crate::protos::op_store::RefConflict {
|
2023-07-12 14:08:47 +00:00
|
|
|
removes: value.removed_ids().map(|id| id.to_bytes()).collect(),
|
|
|
|
adds: value.added_ids().map(|id| id.to_bytes()).collect(),
|
2023-07-11 19:36:25 +00:00
|
|
|
};
|
|
|
|
let proto = crate::protos::op_store::RefTarget {
|
|
|
|
value: Some(crate::protos::op_store::ref_target::Value::Conflict(
|
2023-03-30 20:11:18 +00:00
|
|
|
ref_conflict_proto,
|
2023-07-11 19:36:25 +00:00
|
|
|
)),
|
|
|
|
};
|
|
|
|
Some(proto)
|
|
|
|
} else {
|
|
|
|
assert!(value.is_absent());
|
|
|
|
None
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-12 22:20:44 +00:00
|
|
|
fn ref_target_from_proto(maybe_proto: Option<crate::protos::op_store::RefTarget>) -> RefTarget {
|
|
|
|
let Some(proto) = maybe_proto else {
|
|
|
|
return RefTarget::absent();
|
|
|
|
};
|
2023-03-30 20:11:18 +00:00
|
|
|
match proto.value.unwrap() {
|
|
|
|
crate::protos::op_store::ref_target::Value::CommitId(id) => {
|
2023-07-12 13:11:50 +00:00
|
|
|
RefTarget::normal(CommitId::new(id))
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
|
|
|
crate::protos::op_store::ref_target::Value::Conflict(conflict) => {
|
2023-07-12 13:11:50 +00:00
|
|
|
let removes = conflict.removes.into_iter().map(CommitId::new);
|
|
|
|
let adds = conflict.adds.into_iter().map(CommitId::new);
|
|
|
|
RefTarget::from_legacy_form(removes, adds)
|
2023-03-30 20:11:18 +00:00
|
|
|
}
|
2021-07-31 00:47:30 +00:00
|
|
|
}
|
|
|
|
}
|
2022-11-02 16:51:25 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use insta::assert_snapshot;
|
|
|
|
use maplit::{btreemap, hashmap, hashset};
|
|
|
|
|
|
|
|
use super::*;
|
2023-01-01 03:24:32 +00:00
|
|
|
use crate::backend::{CommitId, MillisSinceEpoch, ObjectId, Timestamp};
|
2022-12-02 17:52:17 +00:00
|
|
|
use crate::content_hash::blake2b_hash;
|
|
|
|
use crate::op_store::{BranchTarget, OperationMetadata, RefTarget, WorkspaceId};
|
2022-11-02 16:51:25 +00:00
|
|
|
|
|
|
|
fn create_view() -> View {
|
|
|
|
let head_id1 = CommitId::from_hex("aaa111");
|
|
|
|
let head_id2 = CommitId::from_hex("aaa222");
|
|
|
|
let public_head_id1 = CommitId::from_hex("bbb444");
|
|
|
|
let public_head_id2 = CommitId::from_hex("bbb555");
|
2023-07-11 13:14:59 +00:00
|
|
|
let branch_main_local_target = RefTarget::normal(CommitId::from_hex("ccc111"));
|
|
|
|
let branch_main_origin_target = RefTarget::normal(CommitId::from_hex("ccc222"));
|
|
|
|
let branch_deleted_origin_target = RefTarget::normal(CommitId::from_hex("ccc333"));
|
|
|
|
let tag_v1_target = RefTarget::normal(CommitId::from_hex("ddd111"));
|
|
|
|
let git_refs_main_target = RefTarget::normal(CommitId::from_hex("fff111"));
|
2023-07-11 15:22:21 +00:00
|
|
|
let git_refs_feature_target = RefTarget::from_legacy_form(
|
|
|
|
[CommitId::from_hex("fff111")],
|
|
|
|
[CommitId::from_hex("fff222"), CommitId::from_hex("fff333")],
|
|
|
|
);
|
2022-11-02 16:51:25 +00:00
|
|
|
let default_wc_commit_id = CommitId::from_hex("abc111");
|
|
|
|
let test_wc_commit_id = CommitId::from_hex("abc222");
|
|
|
|
View {
|
|
|
|
head_ids: hashset! {head_id1, head_id2},
|
|
|
|
public_head_ids: hashset! {public_head_id1, public_head_id2},
|
|
|
|
branches: btreemap! {
|
|
|
|
"main".to_string() => BranchTarget {
|
2023-07-11 13:14:59 +00:00
|
|
|
local_target: branch_main_local_target,
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
remote_targets: RefTargetMap(btreemap! {
|
2023-07-12 14:41:38 +00:00
|
|
|
"origin".to_string() => branch_main_origin_target,
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
}),
|
2022-11-02 16:51:25 +00:00
|
|
|
},
|
|
|
|
"deleted".to_string() => BranchTarget {
|
2023-07-12 16:56:02 +00:00
|
|
|
local_target: RefTarget::absent(),
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
remote_targets: RefTargetMap(btreemap! {
|
2023-07-12 14:41:38 +00:00
|
|
|
"origin".to_string() => branch_deleted_origin_target,
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
}),
|
2022-11-02 16:51:25 +00:00
|
|
|
},
|
|
|
|
},
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
tags: RefTargetMap(btreemap! {
|
2023-07-12 14:41:38 +00:00
|
|
|
"v1.0".to_string() => tag_v1_target,
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
}),
|
|
|
|
git_refs: RefTargetMap(btreemap! {
|
2023-07-12 14:41:38 +00:00
|
|
|
"refs/heads/main".to_string() => git_refs_main_target,
|
|
|
|
"refs/heads/feature".to_string() => git_refs_feature_target,
|
view: add wrapper that will exclude absent RefTarget entries from ContentHash
The next commit will change these maps to store Option<RefTarget> entries, but
None entries will still be omitted from the serialized data. Since ContentHash
should describe the serialized data, relying on the generic ContentHash would
cause future hash conflict where absent RefTarget entries will be preserved.
For example, ([remove], [None, add]) will be serialized as ([remove], [add]),
and deserialized to ([remove], [add, None]). If we add support for lossless
serialization, hash(([remove], [None, add])) should differ from the lossy one.
2023-07-13 10:05:01 +00:00
|
|
|
}),
|
2023-07-11 13:14:59 +00:00
|
|
|
git_head: RefTarget::normal(CommitId::from_hex("fff111")),
|
2022-11-02 16:51:25 +00:00
|
|
|
wc_commit_ids: hashmap! {
|
|
|
|
WorkspaceId::default() => default_wc_commit_id,
|
|
|
|
WorkspaceId::new("test".to_string()) => test_wc_commit_id,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_operation() -> Operation {
|
|
|
|
Operation {
|
|
|
|
view_id: ViewId::from_hex("aaa111"),
|
|
|
|
parents: vec![
|
|
|
|
OperationId::from_hex("bbb111"),
|
|
|
|
OperationId::from_hex("bbb222"),
|
|
|
|
],
|
|
|
|
metadata: OperationMetadata {
|
|
|
|
start_time: Timestamp {
|
|
|
|
timestamp: MillisSinceEpoch(123456789),
|
|
|
|
tz_offset: 3600,
|
|
|
|
},
|
|
|
|
end_time: Timestamp {
|
|
|
|
timestamp: MillisSinceEpoch(123456800),
|
|
|
|
tz_offset: 3600,
|
|
|
|
},
|
|
|
|
description: "check out foo".to_string(),
|
|
|
|
hostname: "some.host.example.com".to_string(),
|
|
|
|
username: "someone".to_string(),
|
|
|
|
tags: hashmap! {
|
|
|
|
"key1".to_string() => "value1".to_string(),
|
|
|
|
"key2".to_string() => "value2".to_string(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_hash_view() {
|
|
|
|
// Test exact output so we detect regressions in compatibility
|
|
|
|
assert_snapshot!(
|
2022-12-02 18:03:00 +00:00
|
|
|
ViewId::new(blake2b_hash(&create_view()).to_vec()).hex(),
|
2022-12-17 17:34:09 +00:00
|
|
|
@"7f47fa81494d7189cb1827b83b3f834662f0f61b4c4090298067e85cdc60f773bf639c4e6a3554a4e401650218ca240291ce591f45a1c501ade1d2b9f97e1a37"
|
2022-11-02 16:51:25 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_hash_operation() {
|
|
|
|
// Test exact output so we detect regressions in compatibility
|
|
|
|
assert_snapshot!(
|
2022-12-02 18:03:00 +00:00
|
|
|
OperationId::new(blake2b_hash(&create_operation()).to_vec()).hex(),
|
2022-11-02 16:51:25 +00:00
|
|
|
@"3ec986c29ff8eb808ea8f6325d6307cea75ef02987536c8e4645406aba51afc8e229957a6e855170d77a66098c58912309323f5e0b32760caa2b59dc84d45fcf"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_read_write_view() {
|
|
|
|
let temp_dir = testutils::new_temp_dir();
|
2022-12-14 19:10:42 +00:00
|
|
|
let store = SimpleOpStore::init(temp_dir.path());
|
2022-11-02 16:51:25 +00:00
|
|
|
let view = create_view();
|
|
|
|
let view_id = store.write_view(&view).unwrap();
|
|
|
|
let read_view = store.read_view(&view_id).unwrap();
|
|
|
|
assert_eq!(read_view, view);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_read_write_operation() {
|
|
|
|
let temp_dir = testutils::new_temp_dir();
|
2022-12-14 19:10:42 +00:00
|
|
|
let store = SimpleOpStore::init(temp_dir.path());
|
2022-11-02 16:51:25 +00:00
|
|
|
let operation = create_operation();
|
|
|
|
let op_id = store.write_operation(&operation).unwrap();
|
|
|
|
let read_operation = store.read_operation(&op_id).unwrap();
|
|
|
|
assert_eq!(read_operation, operation);
|
|
|
|
}
|
|
|
|
}
|