jj/lib/tests/test_local_working_copy_concurrent.rs
Martin von Zweigbergk 187ba9430a working_copy: rename to local_working_copy
It's about time we make the working copy a pluggable backend like we
have for the other storage. We will use it at Google for at least two
reasons:

 * To support our virtual file system. That will be a completely
   separate working copy backend, which will interact with the virtual
   file system to update and snapshot the working copy.

 * On local disk, we need to tell our build system where to find the
   paths that are not in the sparse patterns. We plan to do that by
   wrapping the standard local working copy backend (the one moved in
   this commit), writing a symlink that points to the mainline commit
   where the "background" files can be read from.

Let's start by renaming the exising implementation to
`local_working_copy`.
2023-10-07 08:19:03 -07:00

179 lines
6.7 KiB
Rust

// Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::max;
use std::thread;
use assert_matches::assert_matches;
use jj_lib::local_working_copy::{CheckoutError, SnapshotOptions};
use jj_lib::repo::Repo;
use jj_lib::repo_path::RepoPath;
use jj_lib::workspace::Workspace;
use testutils::{create_tree, write_working_copy_file, TestRepo, TestWorkspace};
#[test]
fn test_concurrent_checkout() {
// Test that we error out if a concurrent checkout is detected (i.e. if the
// working-copy commit changed on disk after we read it).
let settings = testutils::user_settings();
let mut test_workspace1 = TestWorkspace::init(&settings);
let repo1 = test_workspace1.repo.clone();
let workspace1_root = test_workspace1.workspace.workspace_root().clone();
let tree_id1 = testutils::create_random_tree(&repo1);
let tree_id2 = testutils::create_random_tree(&repo1);
let tree_id3 = testutils::create_random_tree(&repo1);
let tree1 = repo1.store().get_root_tree(&tree_id1).unwrap();
let tree2 = repo1.store().get_root_tree(&tree_id2).unwrap();
let tree3 = repo1.store().get_root_tree(&tree_id3).unwrap();
// Check out tree1
let wc1 = test_workspace1.workspace.working_copy_mut();
// The operation ID is not correct, but that doesn't matter for this test
wc1.check_out(repo1.op_id().clone(), None, &tree1).unwrap();
// Check out tree2 from another process (simulated by another workspace
// instance)
let mut workspace2 = Workspace::load(
&settings,
&workspace1_root,
&TestRepo::default_store_factories(),
)
.unwrap();
workspace2
.working_copy_mut()
.check_out(repo1.op_id().clone(), Some(&tree_id1), &tree2)
.unwrap();
// Checking out another tree (via the first repo instance) should now fail.
assert_matches!(
wc1.check_out(repo1.op_id().clone(), Some(&tree_id1), &tree3),
Err(CheckoutError::ConcurrentCheckout)
);
// Check that the tree2 is still checked out on disk.
let workspace3 = Workspace::load(
&settings,
&workspace1_root,
&TestRepo::default_store_factories(),
)
.unwrap();
assert_eq!(
*workspace3.working_copy().current_tree_id().unwrap(),
tree_id2
);
}
#[test]
fn test_checkout_parallel() {
// Test that concurrent checkouts by different processes (simulated by using
// different repo instances) is safe.
let settings = testutils::user_settings();
let mut test_workspace = TestWorkspace::init(&settings);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
let num_threads = max(num_cpus::get(), 4);
let mut tree_ids = vec![];
for i in 0..num_threads {
let path = RepoPath::from_internal_string(format!("file{i}").as_str());
let tree = create_tree(repo, &[(&path, "contents")]);
tree_ids.push(tree.id());
}
// Create another tree just so we can test the update stats reliably from the
// first update
let tree = create_tree(
repo,
&[(&RepoPath::from_internal_string("other file"), "contents")],
);
test_workspace
.workspace
.working_copy_mut()
.check_out(repo.op_id().clone(), None, &tree)
.unwrap();
thread::scope(|s| {
for tree_id in &tree_ids {
let op_id = repo.op_id().clone();
let tree_ids = tree_ids.clone();
let tree_id = tree_id.clone();
let settings = settings.clone();
let workspace_root = workspace_root.clone();
s.spawn(move || {
let mut workspace = Workspace::load(
&settings,
&workspace_root,
&TestRepo::default_store_factories(),
)
.unwrap();
let tree = workspace
.repo_loader()
.store()
.get_root_tree(&tree_id)
.unwrap();
// The operation ID is not correct, but that doesn't matter for this test
let stats = workspace
.working_copy_mut()
.check_out(op_id, None, &tree)
.unwrap();
assert_eq!(stats.updated_files, 0);
assert_eq!(stats.added_files, 1);
assert_eq!(stats.removed_files, 1);
// Check that the working copy contains one of the trees. We may see a
// different tree than the one we just checked out, but since
// write_tree() should take the same lock as check_out(), write_tree()
// should never produce a different tree.
let mut locked_wc = workspace.working_copy_mut().start_mutation().unwrap();
let new_tree_id = locked_wc
.snapshot(SnapshotOptions::empty_for_test())
.unwrap();
assert!(tree_ids.contains(&new_tree_id));
});
}
});
}
#[test]
fn test_racy_checkout() {
let settings = testutils::user_settings();
let mut test_workspace = TestWorkspace::init(&settings);
let repo = &test_workspace.repo;
let op_id = repo.op_id().clone();
let workspace_root = test_workspace.workspace.workspace_root().clone();
let path = RepoPath::from_internal_string("file");
let tree = create_tree(repo, &[(&path, "1")]);
let mut num_matches = 0;
for _ in 0..100 {
let wc = test_workspace.workspace.working_copy_mut();
wc.check_out(op_id.clone(), None, &tree).unwrap();
assert_eq!(
std::fs::read(path.to_fs_path(&workspace_root)).unwrap(),
b"1".to_vec()
);
// A file written right after checkout (hopefully, from the test's perspective,
// within the file system timestamp granularity) is detected as changed.
write_working_copy_file(&workspace_root, &path, "x");
let modified_tree = test_workspace.snapshot().unwrap();
if modified_tree.id() == tree.id() {
num_matches += 1;
}
// Reset the state for the next round
write_working_copy_file(&workspace_root, &path, "1");
}
assert_eq!(num_matches, 0);
}