mirror of
https://github.com/martinvonz/jj.git
synced 2025-01-15 00:44:33 +00:00
tests: avoid ReadonlyRepo::repo_path()
This commit is contained in:
parent
a06c393716
commit
19f383ffdd
8 changed files with 47 additions and 33 deletions
|
@ -202,7 +202,7 @@ fn test_bad_locking_interrupted(backend: TestRepoBackend) {
|
|||
// it somewhat hackily by copying the .jj/op_heads/ directory before the
|
||||
// operation and then copying that back afterwards, leaving the existing
|
||||
// op-head(s) in place.
|
||||
let op_heads_dir = repo.repo_path().join("op_heads");
|
||||
let op_heads_dir = test_workspace.repo_path().join("op_heads");
|
||||
let backup_path = test_workspace.root_dir().join("backup");
|
||||
copy_directory(&op_heads_dir, &backup_path);
|
||||
let mut tx = repo.start_transaction(&settings);
|
||||
|
@ -214,10 +214,10 @@ fn test_bad_locking_interrupted(backend: TestRepoBackend) {
|
|||
|
||||
copy_directory(&backup_path, &op_heads_dir);
|
||||
// Reload the repo and check that only the new head is present.
|
||||
let reloaded_repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let reloaded_repo = load_repo_at_head(&settings, test_workspace.repo_path());
|
||||
assert_eq!(reloaded_repo.op_id(), &op_id);
|
||||
// Reload once more to make sure that the .jj/op_heads/ directory was updated
|
||||
// correctly.
|
||||
let reloaded_repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let reloaded_repo = load_repo_at_head(&settings, test_workspace.repo_path());
|
||||
assert_eq!(reloaded_repo.op_id(), &op_id);
|
||||
}
|
||||
|
|
|
@ -81,13 +81,12 @@ fn test_commit_parallel_instances(backend: TestRepoBackend) {
|
|||
// makes it behave very similar to separate processes.
|
||||
let settings = testutils::user_settings();
|
||||
let test_workspace = TestWorkspace::init_with_backend(&settings, backend);
|
||||
let repo = &test_workspace.repo;
|
||||
|
||||
let num_threads = max(num_cpus::get(), 4);
|
||||
thread::scope(|s| {
|
||||
for _ in 0..num_threads {
|
||||
let settings = settings.clone();
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_workspace.repo_path());
|
||||
s.spawn(move || {
|
||||
let mut tx = repo.start_transaction(&settings);
|
||||
write_random_commit(tx.repo_mut(), &settings);
|
||||
|
@ -97,7 +96,7 @@ fn test_commit_parallel_instances(backend: TestRepoBackend) {
|
|||
});
|
||||
// One commit per thread plus the commit from the initial working-copy commit on
|
||||
// top of the root commit
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_workspace.repo_path());
|
||||
assert_eq!(repo.view().heads().len(), num_threads + 1);
|
||||
|
||||
// One additional operation for the root commit, one for initializing the repo,
|
||||
|
|
|
@ -3098,7 +3098,7 @@ fn test_bulk_update_extra_on_import_refs() {
|
|||
let git_repo = get_git_repo(repo);
|
||||
|
||||
let count_extra_tables = || {
|
||||
let extra_dir = repo.repo_path().join("store").join("extra");
|
||||
let extra_dir = test_repo.repo_path().join("store").join("extra");
|
||||
extra_dir
|
||||
.read_dir()
|
||||
.unwrap()
|
||||
|
@ -3199,7 +3199,7 @@ fn test_concurrent_write_commit() {
|
|||
thread::scope(|s| {
|
||||
let barrier = Arc::new(Barrier::new(num_thread));
|
||||
for i in 0..num_thread {
|
||||
let repo = load_repo_at_head(settings, repo.repo_path()); // unshare loader
|
||||
let repo = load_repo_at_head(settings, test_repo.repo_path()); // unshare loader
|
||||
let barrier = barrier.clone();
|
||||
let sender = sender.clone();
|
||||
s.spawn(move || {
|
||||
|
@ -3285,7 +3285,7 @@ fn test_concurrent_read_write_commit() {
|
|||
|
||||
// Writer assigns random change id
|
||||
for (i, commit_id) in commit_ids.iter().enumerate() {
|
||||
let repo = load_repo_at_head(settings, repo.repo_path()); // unshare loader
|
||||
let repo = load_repo_at_head(settings, test_repo.repo_path()); // unshare loader
|
||||
let barrier = barrier.clone();
|
||||
s.spawn(move || {
|
||||
barrier.wait();
|
||||
|
@ -3301,7 +3301,7 @@ fn test_concurrent_read_write_commit() {
|
|||
|
||||
// Reader may generate change id (if not yet assigned by the writer)
|
||||
for i in 0..num_reader_thread {
|
||||
let mut repo = load_repo_at_head(settings, repo.repo_path()); // unshare loader
|
||||
let mut repo = load_repo_at_head(settings, test_repo.repo_path()); // unshare loader
|
||||
let barrier = barrier.clone();
|
||||
let mut pending_commit_ids = commit_ids.clone();
|
||||
pending_commit_ids.rotate_left(i); // start lookup from different place
|
||||
|
|
|
@ -322,7 +322,7 @@ fn test_index_commits_previous_operations() {
|
|||
repo.index_store().as_any().downcast_ref().unwrap();
|
||||
default_index_store.reinit().unwrap();
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
let index = as_readonly_composite(&repo);
|
||||
// There should be the root commit, plus 3 more
|
||||
assert_eq!(index.num_commits(), 1 + 3);
|
||||
|
@ -378,7 +378,7 @@ fn test_index_commits_hidden_but_referenced() {
|
|||
repo.index_store().as_any().downcast_ref().unwrap();
|
||||
default_index_store.reinit().unwrap();
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
// All commits should be reindexed
|
||||
assert!(repo.index().has_id(commit_a.id()));
|
||||
assert!(repo.index().has_id(commit_b.id()));
|
||||
|
@ -420,7 +420,7 @@ fn test_index_commits_incremental() {
|
|||
.unwrap();
|
||||
tx.commit("test");
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
let index = as_readonly_composite(&repo);
|
||||
// There should be the root commit, plus 3 more
|
||||
assert_eq!(index.num_commits(), 1 + 3);
|
||||
|
@ -464,7 +464,7 @@ fn test_index_commits_incremental_empty_transaction() {
|
|||
|
||||
repo.start_transaction(&settings).commit("test");
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
let index = as_readonly_composite(&repo);
|
||||
// There should be the root commit, plus 1 more
|
||||
assert_eq!(index.num_commits(), 1 + 1);
|
||||
|
@ -621,11 +621,11 @@ fn test_reindex_no_segments_dir() {
|
|||
assert!(repo.index().has_id(commit_a.id()));
|
||||
|
||||
// jj <= 0.14 doesn't have "segments" directory
|
||||
let segments_dir = repo.repo_path().join("index").join("segments");
|
||||
let segments_dir = test_repo.repo_path().join("index").join("segments");
|
||||
assert!(segments_dir.is_dir());
|
||||
fs::remove_dir_all(&segments_dir).unwrap();
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
assert!(repo.index().has_id(commit_a.id()));
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ fn test_reindex_corrupt_segment_files() {
|
|||
assert!(repo.index().has_id(commit_a.id()));
|
||||
|
||||
// Corrupt the index files
|
||||
let segments_dir = repo.repo_path().join("index").join("segments");
|
||||
let segments_dir = test_repo.repo_path().join("index").join("segments");
|
||||
for entry in segments_dir.read_dir().unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
// u32: file format version
|
||||
|
@ -653,7 +653,7 @@ fn test_reindex_corrupt_segment_files() {
|
|||
fs::write(entry.path(), b"\0".repeat(24)).unwrap()
|
||||
}
|
||||
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path());
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path());
|
||||
assert!(repo.index().has_id(commit_a.id()));
|
||||
}
|
||||
|
||||
|
@ -693,7 +693,7 @@ fn test_reindex_from_merged_operation() {
|
|||
let index = as_readonly_composite(&repo);
|
||||
assert_eq!(index.num_commits(), 4);
|
||||
|
||||
let index_operations_dir = repo.repo_path().join("index").join("operations");
|
||||
let index_operations_dir = test_repo.repo_path().join("index").join("operations");
|
||||
for &op_id in &op_ids_to_delete {
|
||||
fs::remove_file(index_operations_dir.join(op_id.hex())).unwrap();
|
||||
}
|
||||
|
@ -724,7 +724,7 @@ fn test_reindex_missing_commit() {
|
|||
// Remove historical head commit to simulate bad GC.
|
||||
let test_backend: &TestBackend = repo.store().backend_impl().downcast_ref().unwrap();
|
||||
test_backend.remove_commit_unchecked(missing_commit.id());
|
||||
let repo = load_repo_at_head(&settings, repo.repo_path()); // discard cache
|
||||
let repo = load_repo_at_head(&settings, test_repo.repo_path()); // discard cache
|
||||
assert!(repo.store().get_commit(missing_commit.id()).is_err());
|
||||
|
||||
// Reindexing error should include the operation id where the commit
|
||||
|
@ -745,7 +745,7 @@ fn test_index_store_type() {
|
|||
let repo = &test_repo.repo;
|
||||
|
||||
assert_eq!(as_readonly_composite(repo).num_commits(), 1);
|
||||
let index_store_type_path = repo.repo_path().join("index").join("type");
|
||||
let index_store_type_path = test_repo.repo_path().join("index").join("type");
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(index_store_type_path).unwrap(),
|
||||
"default"
|
||||
|
|
|
@ -42,7 +42,7 @@ fn test_init_local() {
|
|||
.backend_impl()
|
||||
.downcast_ref::<GitBackend>()
|
||||
.is_none());
|
||||
assert_eq!(repo.repo_path(), &canonical.join(".jj").join("repo"));
|
||||
assert_eq!(repo.repo_path(), workspace.repo_path());
|
||||
assert_eq!(workspace.workspace_root(), &canonical);
|
||||
|
||||
// Just test that we can write a commit to the store
|
||||
|
@ -61,7 +61,8 @@ fn test_init_internal_git() {
|
|||
.backend_impl()
|
||||
.downcast_ref::<GitBackend>()
|
||||
.unwrap();
|
||||
assert_eq!(repo.repo_path(), &canonical.join(".jj").join("repo"));
|
||||
let repo_path = canonical.join(".jj").join("repo");
|
||||
assert_eq!(repo.repo_path(), &repo_path);
|
||||
assert_eq!(workspace.workspace_root(), &canonical);
|
||||
assert_eq!(
|
||||
git_backend.git_repo_path(),
|
||||
|
@ -69,7 +70,7 @@ fn test_init_internal_git() {
|
|||
);
|
||||
assert!(git_backend.git_workdir().is_none());
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(repo.repo_path().join("store").join("git_target")).unwrap(),
|
||||
std::fs::read_to_string(repo_path.join("store").join("git_target")).unwrap(),
|
||||
"git"
|
||||
);
|
||||
|
||||
|
@ -89,12 +90,13 @@ fn test_init_colocated_git() {
|
|||
.backend_impl()
|
||||
.downcast_ref::<GitBackend>()
|
||||
.unwrap();
|
||||
assert_eq!(repo.repo_path(), &canonical.join(".jj").join("repo"));
|
||||
let repo_path = canonical.join(".jj").join("repo");
|
||||
assert_eq!(repo.repo_path(), &repo_path);
|
||||
assert_eq!(workspace.workspace_root(), &canonical);
|
||||
assert_eq!(git_backend.git_repo_path(), canonical.join(".git"));
|
||||
assert_eq!(git_backend.git_workdir(), Some(canonical.as_ref()));
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(repo.repo_path().join("store").join("git_target")).unwrap(),
|
||||
std::fs::read_to_string(repo_path.join("store").join("git_target")).unwrap(),
|
||||
"../../../.git"
|
||||
);
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ fn test_load_at_operation() {
|
|||
// removed
|
||||
let loader = RepoLoader::init_from_file_system(
|
||||
&settings,
|
||||
repo.repo_path(),
|
||||
test_repo.repo_path(),
|
||||
&TestRepo::default_store_factories(),
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -45,7 +45,7 @@ fn test_load_at_operation() {
|
|||
// it has not been removed yet
|
||||
let loader = RepoLoader::init_from_file_system(
|
||||
&settings,
|
||||
repo.repo_path(),
|
||||
test_repo.repo_path(),
|
||||
&TestRepo::default_store_factories(),
|
||||
)
|
||||
.unwrap();
|
||||
|
|
|
@ -47,7 +47,7 @@ fn test_unpublished_operation() {
|
|||
let test_repo = TestRepo::init();
|
||||
let repo = &test_repo.repo;
|
||||
|
||||
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
||||
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
|
||||
let op_id0 = repo.op_id().clone();
|
||||
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
||||
|
||||
|
@ -69,7 +69,7 @@ fn test_consecutive_operations() {
|
|||
let test_repo = TestRepo::init();
|
||||
let repo = &test_repo.repo;
|
||||
|
||||
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
||||
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
|
||||
let op_id0 = repo.op_id().clone();
|
||||
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
||||
|
||||
|
@ -101,7 +101,7 @@ fn test_concurrent_operations() {
|
|||
let test_repo = TestRepo::init();
|
||||
let repo = &test_repo.repo;
|
||||
|
||||
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
||||
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
|
||||
let op_id0 = repo.op_id().clone();
|
||||
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
||||
|
||||
|
@ -601,10 +601,10 @@ fn test_resolve_op_parents_children() {
|
|||
fn test_gc() {
|
||||
let settings = stable_op_id_settings();
|
||||
let test_repo = TestRepo::init();
|
||||
let op_dir = test_repo.repo_path().join("op_store").join("operations");
|
||||
let view_dir = test_repo.repo_path().join("op_store").join("views");
|
||||
let repo_0 = test_repo.repo;
|
||||
let op_store = repo_0.op_store();
|
||||
let op_dir = repo_0.repo_path().join("op_store").join("operations");
|
||||
let view_dir = repo_0.repo_path().join("op_store").join("views");
|
||||
|
||||
// Set up operation graph:
|
||||
//
|
||||
|
|
|
@ -122,6 +122,7 @@ pub fn user_settings() -> UserSettings {
|
|||
pub struct TestRepo {
|
||||
_temp_dir: TempDir,
|
||||
pub repo: Arc<ReadonlyRepo>,
|
||||
repo_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone)]
|
||||
|
@ -182,6 +183,7 @@ impl TestRepo {
|
|||
Self {
|
||||
_temp_dir: temp_dir,
|
||||
repo,
|
||||
repo_path: repo_dir,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,12 +201,17 @@ impl TestRepo {
|
|||
);
|
||||
factories
|
||||
}
|
||||
|
||||
pub fn repo_path(&self) -> &Path {
|
||||
&self.repo_path
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestWorkspace {
|
||||
temp_dir: TempDir,
|
||||
pub workspace: Workspace,
|
||||
pub repo: Arc<ReadonlyRepo>,
|
||||
repo_path: PathBuf,
|
||||
}
|
||||
|
||||
impl TestWorkspace {
|
||||
|
@ -237,11 +244,13 @@ impl TestWorkspace {
|
|||
signer,
|
||||
)
|
||||
.unwrap();
|
||||
let repo_path = workspace.repo_path().to_owned();
|
||||
|
||||
Self {
|
||||
temp_dir,
|
||||
workspace,
|
||||
repo,
|
||||
repo_path,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,6 +258,10 @@ impl TestWorkspace {
|
|||
self.temp_dir.path().join("repo").join("..")
|
||||
}
|
||||
|
||||
pub fn repo_path(&self) -> &Path {
|
||||
&self.repo_path
|
||||
}
|
||||
|
||||
/// Snapshots the working copy and returns the tree. Updates the working
|
||||
/// copy state on disk, but does not update the working-copy commit (no
|
||||
/// new operation).
|
||||
|
|
Loading…
Reference in a new issue