2022-11-26 23:57:50 +00:00
|
|
|
// Copyright 2020 The Jujutsu Authors
|
2020-12-12 08:00:42 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2021-02-27 07:00:46 +00:00
|
|
|
use std::path::Path;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-09-17 16:41:14 +00:00
|
|
|
use itertools::Itertools;
|
2023-06-28 14:12:40 +00:00
|
|
|
use jj_lib::repo::{Repo, StoreFactories};
|
2023-10-14 12:52:50 +00:00
|
|
|
use jj_lib::workspace::{default_working_copy_factories, Workspace};
|
2020-12-12 08:00:42 +00:00
|
|
|
use test_case::test_case;
|
2023-09-18 04:26:31 +00:00
|
|
|
use testutils::{create_random_commit, load_repo_at_head, TestRepoBackend, TestWorkspace};
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2021-02-27 07:00:46 +00:00
|
|
|
fn copy_directory(src: &Path, dst: &Path) {
|
2020-12-12 08:00:42 +00:00
|
|
|
std::fs::create_dir(dst).ok();
|
|
|
|
for entry in std::fs::read_dir(src).unwrap() {
|
|
|
|
let child_src = entry.unwrap().path();
|
|
|
|
let base_name = child_src.file_name().unwrap();
|
|
|
|
let child_dst = dst.join(base_name);
|
|
|
|
if child_src.is_dir() {
|
|
|
|
copy_directory(&child_src, &child_dst)
|
|
|
|
} else {
|
|
|
|
std::fs::copy(&child_src, &child_dst).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-27 07:00:46 +00:00
|
|
|
fn merge_directories(left: &Path, base: &Path, right: &Path, output: &Path) {
|
2023-09-17 16:41:14 +00:00
|
|
|
std::fs::create_dir(output).unwrap();
|
2020-12-12 08:00:42 +00:00
|
|
|
let mut sub_dirs = vec![];
|
|
|
|
// Walk the left side and copy to the output
|
2023-09-17 16:41:14 +00:00
|
|
|
if left.exists() {
|
|
|
|
for entry in std::fs::read_dir(left).unwrap() {
|
|
|
|
let path = entry.unwrap().path();
|
|
|
|
let base_name = path.file_name().unwrap();
|
|
|
|
let child_left = left.join(base_name);
|
|
|
|
let child_output = output.join(base_name);
|
|
|
|
if child_left.is_dir() {
|
|
|
|
sub_dirs.push(base_name.to_os_string());
|
|
|
|
} else {
|
|
|
|
std::fs::copy(&child_left, child_output).unwrap();
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Walk the base and find files removed in the right side, then remove them in
|
|
|
|
// the output
|
2023-09-17 16:41:14 +00:00
|
|
|
if base.exists() {
|
|
|
|
for entry in std::fs::read_dir(base).unwrap() {
|
|
|
|
let path = entry.unwrap().path();
|
|
|
|
let base_name = path.file_name().unwrap();
|
|
|
|
let child_base = base.join(base_name);
|
|
|
|
let child_right = right.join(base_name);
|
|
|
|
let child_output = output.join(base_name);
|
|
|
|
if child_base.is_dir() {
|
|
|
|
sub_dirs.push(base_name.to_os_string());
|
|
|
|
} else if !child_right.exists() {
|
|
|
|
std::fs::remove_file(child_output).ok();
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Walk the right side and find files added in the right side, then add them in
|
|
|
|
// the output
|
2023-09-17 16:41:14 +00:00
|
|
|
if right.exists() {
|
|
|
|
for entry in std::fs::read_dir(right).unwrap() {
|
|
|
|
let path = entry.unwrap().path();
|
|
|
|
let base_name = path.file_name().unwrap();
|
|
|
|
let child_base = base.join(base_name);
|
|
|
|
let child_right = right.join(base_name);
|
|
|
|
let child_output = output.join(base_name);
|
|
|
|
if child_right.is_dir() {
|
|
|
|
sub_dirs.push(base_name.to_os_string());
|
|
|
|
} else if !child_base.exists() {
|
|
|
|
// This overwrites the left side if that's been written. That's fine, since the
|
|
|
|
// point of the test is that it should be okay for either side to win.
|
|
|
|
std::fs::copy(&child_right, child_output).unwrap();
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Do the merge in subdirectories
|
2023-09-17 16:41:14 +00:00
|
|
|
for base_name in sub_dirs.iter().sorted().dedup() {
|
|
|
|
let child_base = base.join(base_name);
|
|
|
|
let child_right = right.join(base_name);
|
|
|
|
let child_left = left.join(base_name);
|
|
|
|
let child_output = output.join(base_name);
|
2020-12-12 08:00:42 +00:00
|
|
|
merge_directories(&child_left, &child_base, &child_right, &child_output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-18 04:26:31 +00:00
|
|
|
#[test_case(TestRepoBackend::Local; "local backend")]
|
|
|
|
#[test_case(TestRepoBackend::Git; "git backend")]
|
|
|
|
fn test_bad_locking_children(backend: TestRepoBackend) {
|
2020-12-12 08:00:42 +00:00
|
|
|
// Test that two new commits created on separate machines are both visible (not
|
|
|
|
// lost due to lack of locking)
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-18 04:26:31 +00:00
|
|
|
let test_workspace = TestWorkspace::init_with_backend(&settings, backend);
|
2021-11-21 07:46:54 +00:00
|
|
|
let repo = &test_workspace.repo;
|
2021-11-21 22:03:59 +00:00
|
|
|
let workspace_root = test_workspace.workspace.workspace_root();
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx = repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
let initial = create_random_commit(tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![repo.store().root_commit_id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
tx.commit("test");
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Simulate a write of a commit that happens on one machine
|
2023-09-17 16:41:14 +00:00
|
|
|
let machine1_root = test_workspace.root_dir().join("machine1");
|
|
|
|
copy_directory(workspace_root, &machine1_root);
|
2023-10-14 12:52:50 +00:00
|
|
|
let machine1_workspace = Workspace::load(
|
|
|
|
&settings,
|
|
|
|
&machine1_root,
|
|
|
|
&StoreFactories::default(),
|
|
|
|
&default_working_copy_factories(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2022-03-17 03:58:04 +00:00
|
|
|
let machine1_repo = machine1_workspace
|
|
|
|
.repo_loader()
|
2023-01-11 21:12:17 +00:00
|
|
|
.load_at_head(&settings)
|
2022-04-28 05:30:03 +00:00
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut machine1_tx = machine1_repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
let child1 = create_random_commit(machine1_tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![initial.id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
machine1_tx.commit("test");
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Simulate a write of a commit that happens on another machine
|
2023-09-17 16:41:14 +00:00
|
|
|
let machine2_root = test_workspace.root_dir().join("machine2");
|
|
|
|
copy_directory(workspace_root, &machine2_root);
|
2023-10-14 12:52:50 +00:00
|
|
|
let machine2_workspace = Workspace::load(
|
|
|
|
&settings,
|
|
|
|
&machine2_root,
|
|
|
|
&StoreFactories::default(),
|
|
|
|
&default_working_copy_factories(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2022-03-17 03:58:04 +00:00
|
|
|
let machine2_repo = machine2_workspace
|
|
|
|
.repo_loader()
|
2023-01-11 21:12:17 +00:00
|
|
|
.load_at_head(&settings)
|
2022-04-28 05:30:03 +00:00
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut machine2_tx = machine2_repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
let child2 = create_random_commit(machine2_tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![initial.id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
machine2_tx.commit("test");
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Simulate that the distributed file system now has received the changes from
|
|
|
|
// both machines
|
2023-09-17 16:41:14 +00:00
|
|
|
let merged_path = test_workspace.root_dir().join("merged");
|
|
|
|
merge_directories(&machine1_root, workspace_root, &machine2_root, &merged_path);
|
2023-10-14 12:52:50 +00:00
|
|
|
let merged_workspace = Workspace::load(
|
|
|
|
&settings,
|
|
|
|
&merged_path,
|
|
|
|
&StoreFactories::default(),
|
|
|
|
&default_working_copy_factories(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2022-03-17 03:58:04 +00:00
|
|
|
let merged_repo = merged_workspace
|
|
|
|
.repo_loader()
|
2023-01-11 21:12:17 +00:00
|
|
|
.load_at_head(&settings)
|
2022-04-28 05:30:03 +00:00
|
|
|
.unwrap();
|
2021-01-16 20:15:06 +00:00
|
|
|
assert!(merged_repo.view().heads().contains(child1.id()));
|
|
|
|
assert!(merged_repo.view().heads().contains(child2.id()));
|
2021-03-14 05:38:37 +00:00
|
|
|
let op_id = merged_repo.op_id().clone();
|
2021-03-10 23:48:32 +00:00
|
|
|
let op = merged_repo.op_store().read_operation(&op_id).unwrap();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_eq!(op.parents.len(), 2);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-09-18 04:26:31 +00:00
|
|
|
#[test_case(TestRepoBackend::Local ; "local backend")]
|
|
|
|
#[test_case(TestRepoBackend::Git ; "git backend")]
|
|
|
|
fn test_bad_locking_interrupted(backend: TestRepoBackend) {
|
2020-12-12 08:00:42 +00:00
|
|
|
// Test that an interrupted update of the op-heads resulting in on op-head
|
|
|
|
// that's a descendant of the other is resolved without creating a new
|
|
|
|
// operation.
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-18 04:26:31 +00:00
|
|
|
let test_workspace = TestWorkspace::init_with_backend(&settings, backend);
|
2021-11-21 07:46:54 +00:00
|
|
|
let repo = &test_workspace.repo;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx = repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
let initial = create_random_commit(tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![repo.store().root_commit_id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let repo = tx.commit("test");
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Simulate a crash that resulted in the old op-head left in place. We simulate
|
2021-03-11 06:59:11 +00:00
|
|
|
// it somewhat hackily by copying the .jj/op_heads/ directory before the
|
2020-12-12 08:00:42 +00:00
|
|
|
// operation and then copying that back afterwards, leaving the existing
|
|
|
|
// op-head(s) in place.
|
2021-03-11 06:59:11 +00:00
|
|
|
let op_heads_dir = repo.repo_path().join("op_heads");
|
2023-09-17 16:41:14 +00:00
|
|
|
let backup_path = test_workspace.root_dir().join("backup");
|
|
|
|
copy_directory(&op_heads_dir, &backup_path);
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx = repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
create_random_commit(tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![initial.id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let op_id = tx.commit("test").operation().id().clone();
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-09-17 16:41:14 +00:00
|
|
|
copy_directory(&backup_path, &op_heads_dir);
|
2020-12-12 08:00:42 +00:00
|
|
|
// Reload the repo and check that only the new head is present.
|
2023-02-26 23:27:59 +00:00
|
|
|
let reloaded_repo = load_repo_at_head(&settings, repo.repo_path());
|
2021-03-14 05:38:37 +00:00
|
|
|
assert_eq!(reloaded_repo.op_id(), &op_id);
|
2021-03-11 06:59:11 +00:00
|
|
|
// Reload once more to make sure that the .jj/op_heads/ directory was updated
|
2020-12-12 08:00:42 +00:00
|
|
|
// correctly.
|
2023-02-26 23:27:59 +00:00
|
|
|
let reloaded_repo = load_repo_at_head(&settings, repo.repo_path());
|
2021-03-14 05:38:37 +00:00
|
|
|
assert_eq!(reloaded_repo.op_id(), &op_id);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|