ok/jj
1
0
Fork 0
forked from mirrors/jj

repo: stop keeping a WorkingCopy instance

The `Repo` doesn't do anything with the `WorkingCopy` except keeping a
reference to it for its users to use. In fact, the entire lib crate
doesn't do antyhing with the `WorkingCopy`. It therefore seems simpler
to have the users of the crate manage the `WorkingCopy` instance. This
patch does that by letting `Workspace` own it. By not keeping an
instance in `Repo`, which is `Sync`, we can also drop the
`Arc<Mutex<>>` wrapping.

I left `Repo::working_copy()` for convenience for now, but now it
creates a new instance every time. It's only used in tests.

This further decoupling should help us add support for multiple
working copies (#13).
This commit is contained in:
Martin von Zweigbergk 2021-11-17 09:19:41 -08:00
parent c0711f47cf
commit 70073b94a8
6 changed files with 88 additions and 83 deletions

View file

@ -18,7 +18,7 @@ use std::fs;
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, MutexGuard};
use std::sync::{Arc, Mutex};
use thiserror::Error;
@ -107,7 +107,6 @@ pub struct ReadonlyRepo {
settings: RepoSettings,
index_store: Arc<IndexStore>,
index: Mutex<Option<Arc<ReadonlyIndex>>>,
working_copy: Arc<Mutex<WorkingCopy>>,
view: View,
}
@ -236,7 +235,6 @@ impl ReadonlyRepo {
settings: repo_settings,
index_store,
index: Mutex::new(None),
working_copy: Arc::new(Mutex::new(working_copy)),
view,
})
}
@ -309,12 +307,12 @@ impl ReadonlyRepo {
self.index()
}
pub fn working_copy(&self) -> &Arc<Mutex<WorkingCopy>> {
&self.working_copy
}
pub fn working_copy_locked(&self) -> MutexGuard<WorkingCopy> {
self.working_copy.as_ref().lock().unwrap()
pub fn working_copy(&self) -> WorkingCopy {
WorkingCopy::load(
self.store.clone(),
self.wc_path.clone(),
self.repo_path.join("working_copy"),
)
}
pub fn store(&self) -> &Arc<Store> {
@ -458,7 +456,6 @@ impl RepoLoader {
&self,
operation: Operation,
view: View,
working_copy: Arc<Mutex<WorkingCopy>>,
index: Arc<ReadonlyIndex>,
) -> Arc<ReadonlyRepo> {
let repo = ReadonlyRepo {
@ -471,18 +468,12 @@ impl RepoLoader {
settings: self.repo_settings.clone(),
index_store: self.index_store.clone(),
index: Mutex::new(Some(index)),
working_copy,
view,
};
Arc::new(repo)
}
fn _finish_load(&self, operation: Operation, view: View) -> Arc<ReadonlyRepo> {
let working_copy = WorkingCopy::load(
self.store.clone(),
self.wc_path.clone(),
self.repo_path.join("working_copy"),
);
let repo = ReadonlyRepo {
repo_path: self.repo_path.clone(),
wc_path: self.wc_path.clone(),
@ -493,7 +484,6 @@ impl RepoLoader {
settings: self.repo_settings.clone(),
index_store: self.index_store.clone(),
index: Mutex::new(None),
working_copy: Arc::new(Mutex::new(working_copy)),
view,
};
Arc::new(repo)

View file

@ -13,7 +13,7 @@
// limitations under the License.
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use crate::backend::Timestamp;
use crate::index::ReadonlyIndex;
@ -22,7 +22,6 @@ use crate::op_store::{OperationId, OperationMetadata};
use crate::operation::Operation;
use crate::repo::{MutableRepo, ReadonlyRepo, RepoLoader};
use crate::view::View;
use crate::working_copy::WorkingCopy;
pub struct Transaction {
repo: Option<MutableRepo>,
@ -96,13 +95,7 @@ impl Transaction {
.associate_file_with_operation(&index, operation.id())
.unwrap();
self.closed = true;
UnpublishedOperation::new(
base_repo.loader(),
operation,
view,
base_repo.working_copy().clone(),
index,
)
UnpublishedOperation::new(base_repo.loader(), operation, view, index)
}
pub fn discard(mut self) {
@ -121,7 +114,6 @@ impl Drop for Transaction {
struct NewRepoData {
operation: Operation,
view: View,
working_copy: Arc<Mutex<WorkingCopy>>,
index: Arc<ReadonlyIndex>,
}
@ -136,13 +128,11 @@ impl UnpublishedOperation {
repo_loader: RepoLoader,
operation: Operation,
view: View,
working_copy: Arc<Mutex<WorkingCopy>>,
index: Arc<ReadonlyIndex>,
) -> Self {
let data = Some(NewRepoData {
operation,
view,
working_copy,
index,
});
UnpublishedOperation {
@ -161,18 +151,18 @@ impl UnpublishedOperation {
self.repo_loader
.op_heads_store()
.update_op_heads(&data.operation);
let repo =
self.repo_loader
.create_from(data.operation, data.view, data.working_copy, data.index);
let repo = self
.repo_loader
.create_from(data.operation, data.view, data.index);
self.closed = true;
repo
}
pub fn leave_unpublished(mut self) -> Arc<ReadonlyRepo> {
let data = self.data.take().unwrap();
let repo =
self.repo_loader
.create_from(data.operation, data.view, data.working_copy, data.index);
let repo = self
.repo_loader
.create_from(data.operation, data.view, data.index);
self.closed = true;
repo
}

View file

@ -16,6 +16,7 @@ use std::path::PathBuf;
use crate::repo::{RepoLoadError, RepoLoader};
use crate::settings::UserSettings;
use crate::working_copy::WorkingCopy;
/// Represents a workspace, i.e. what's typically the .jj/ directory and its
/// parent.
@ -24,6 +25,7 @@ pub struct Workspace {
// working copy files live.
workspace_root: PathBuf,
repo_loader: RepoLoader,
working_copy: WorkingCopy,
}
impl Workspace {
@ -34,9 +36,16 @@ impl Workspace {
// TODO: Move the find_repo_dir() call from RepoLoader::init() to here
let repo_loader = RepoLoader::init(user_settings, workspace_root)?;
let workspace_root = repo_loader.working_copy_path().clone();
let working_copy_state_path = repo_path.join("working_copy");
let working_copy = WorkingCopy::load(
repo_loader.store().clone(),
workspace_root.clone(),
working_copy_state_path,
);
Ok(Self {
workspace_root,
repo_loader,
working_copy,
})
}
@ -51,4 +60,12 @@ impl Workspace {
pub fn repo_loader(&self) -> &RepoLoader {
&self.repo_loader
}
pub fn working_copy(&self) -> &WorkingCopy {
&self.working_copy
}
pub fn working_copy_mut(&mut self) -> &mut WorkingCopy {
&mut self.working_copy
}
}

View file

@ -37,8 +37,7 @@ fn test_root(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let owned_wc = repo.working_copy().clone();
let mut wc = owned_wc.lock().unwrap();
let mut wc = repo.working_copy();
let locked_wc = wc.write_tree();
let new_tree_id = locked_wc.new_tree_id();
locked_wc.discard();
@ -220,8 +219,7 @@ fn test_checkout_file_transitions(use_git: bool) {
.write_to_repo(tx.mut_repo());
tx.commit();
let owned_wc = repo.working_copy().clone();
let mut wc = owned_wc.lock().unwrap();
let mut wc = repo.working_copy();
wc.check_out(left_commit).unwrap();
wc.check_out(right_commit.clone()).unwrap();
@ -323,16 +321,13 @@ fn test_untrack() {
)
.write_to_repo(tx.mut_repo());
let repo = tx.commit();
let working_copy = repo.working_copy().clone();
let mut locked_working_copy = working_copy.lock().unwrap();
locked_working_copy
.check_out(initial_commit.clone())
.unwrap();
let mut wc = repo.working_copy();
wc.check_out(initial_commit.clone()).unwrap();
// Now we untrack the file called "unwanted"
let mut tx = repo.start_transaction("test");
let matcher = FilesMatcher::new(HashSet::from([unwanted_path.clone()]));
let unfinished_write = locked_working_copy.untrack(&matcher).unwrap();
let unfinished_write = wc.untrack(&matcher).unwrap();
let new_commit = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial_commit)
.set_tree(unfinished_write.new_tree_id())
.write_to_repo(tx.mut_repo());
@ -352,7 +347,7 @@ fn test_untrack() {
// It should not get re-added if we write a new tree since we also added it
// to the .gitignore file.
let unfinished_write = locked_working_copy.write_tree();
let unfinished_write = wc.write_tree();
let new_tree_id = unfinished_write.new_tree_id();
assert_eq!(new_tree_id, *new_commit.tree().id());
unfinished_write.discard();
@ -369,8 +364,7 @@ fn test_commit_racy_timestamps(use_git: bool) {
let file_path = repo.working_copy_path().join("file");
let mut previous_tree_id = repo.store().empty_tree_id().clone();
let owned_wc = repo.working_copy().clone();
let mut wc = owned_wc.lock().unwrap();
let mut wc = repo.working_copy();
for i in 0..100 {
{
let mut file = OpenOptions::new()
@ -412,8 +406,7 @@ fn test_gitignores(use_git: bool) {
std::fs::create_dir(repo.working_copy_path().join("dir")).unwrap();
testutils::write_working_copy_file(&repo, &subdir_modified_path, "1");
let owned_wc = repo.working_copy().clone();
let mut wc = owned_wc.lock().unwrap();
let mut wc = repo.working_copy();
let locked_wc = wc.write_tree();
let new_tree_id1 = locked_wc.new_tree_id();
locked_wc.discard();
@ -491,7 +484,8 @@ fn test_gitignores_checkout_overwrites_ignored(use_git: bool) {
// Now check out the commit that adds the file "modified" with contents
// "contents". The exiting contents ("garbage") should be replaced in the
// working copy.
repo.working_copy_locked().check_out(commit).unwrap();
let mut wc = repo.working_copy();
wc.check_out(commit).unwrap();
// Check that the new contents are in the working copy
let path = repo.working_copy_path().join("modified");
@ -502,7 +496,6 @@ fn test_gitignores_checkout_overwrites_ignored(use_git: bool) {
assert_eq!(buf, b"contents");
// Check that the file is in the tree created by committing the working copy
let mut wc = repo.working_copy_locked();
let locked_wc = wc.write_tree();
let new_tree_id = locked_wc.new_tree_id();
locked_wc.discard();
@ -544,11 +537,11 @@ fn test_gitignores_ignored_directory_already_tracked(use_git: bool) {
let repo = tx.commit();
// Check out the commit with the file in ignored/
repo.working_copy_locked().check_out(commit).unwrap();
let mut wc = repo.working_copy();
wc.check_out(commit).unwrap();
// Check that the file is still in the tree created by committing the working
// copy (that it didn't get removed because the directory is ignored)
let mut wc = repo.working_copy_locked();
let locked_wc = wc.write_tree();
let new_tree_id = locked_wc.new_tree_id();
locked_wc.discard();
@ -569,7 +562,7 @@ fn test_dotgit_ignored(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let mut wc = repo.working_copy_locked();
let mut wc = repo.working_copy();
// Test with a .git/ directory (with a file in, since we don't write empty
// trees)

View file

@ -44,15 +44,12 @@ fn test_concurrent_checkout(use_git: bool) {
tx1.commit();
// Check out commit1
let mut wc1 = repo1.working_copy_locked();
let mut wc1 = repo1.working_copy();
wc1.check_out(commit1).unwrap();
// Check out commit2 from another process (simulated by another repo instance)
let repo2 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone()).unwrap();
repo2
.working_copy_locked()
.check_out(commit2.clone())
.unwrap();
repo2.working_copy().check_out(commit2.clone()).unwrap();
// Checking out another commit (via the first repo instance) should now fail.
assert_eq!(
@ -63,7 +60,7 @@ fn test_concurrent_checkout(use_git: bool) {
// Check that the commit2 is still checked out on disk.
let repo3 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone()).unwrap();
assert_eq!(
repo3.working_copy_locked().current_tree_id(),
repo3.working_copy().current_tree_id(),
commit2.tree().id().clone()
);
}
@ -100,8 +97,8 @@ fn test_checkout_parallel(use_git: bool) {
let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone())
.set_open(true)
.write_to_repo(tx.mut_repo());
repo.working_copy_locked().check_out(commit).unwrap();
tx.commit();
repo.working_copy().check_out(commit).unwrap();
let mut threads = vec![];
for commit_id in &commit_ids {
@ -111,8 +108,7 @@ fn test_checkout_parallel(use_git: bool) {
let working_copy_path = repo.working_copy_path().clone();
let handle = thread::spawn(move || {
let repo = ReadonlyRepo::load(&settings, working_copy_path).unwrap();
let owned_wc = repo.working_copy().clone();
let mut wc = owned_wc.lock().unwrap();
let mut wc = repo.working_copy();
let commit = repo.store().get_commit(&commit_id).unwrap();
let stats = wc.check_out(commit).unwrap();
assert_eq!(stats.updated_files, 0);

View file

@ -187,11 +187,22 @@ jj init --git-store={} <path to new jj repo>",
)?;
repo_loader.load_at(&op)
};
Ok(self.for_loaded_repo(ui, repo))
Ok(self.for_loaded_repo(ui, workspace, repo))
}
fn for_loaded_repo(&self, ui: &Ui, repo: Arc<ReadonlyRepo>) -> WorkspaceCommandHelper {
WorkspaceCommandHelper::for_loaded_repo(ui, self.string_args.clone(), &self.root_args, repo)
fn for_loaded_repo(
&self,
ui: &Ui,
workspace: Workspace,
repo: Arc<ReadonlyRepo>,
) -> WorkspaceCommandHelper {
WorkspaceCommandHelper::for_loaded_repo(
ui,
workspace,
self.string_args.clone(),
&self.root_args,
repo,
)
}
}
@ -200,6 +211,7 @@ jj init --git-store={} <path to new jj repo>",
struct WorkspaceCommandHelper {
string_args: Vec<String>,
settings: UserSettings,
workspace: Workspace,
repo: Arc<ReadonlyRepo>,
may_update_working_copy: bool,
working_copy_committed: bool,
@ -211,6 +223,7 @@ struct WorkspaceCommandHelper {
impl WorkspaceCommandHelper {
fn for_loaded_repo(
ui: &Ui,
workspace: Workspace,
string_args: Vec<String>,
root_args: &ArgMatches,
repo: Arc<ReadonlyRepo>,
@ -219,6 +232,7 @@ impl WorkspaceCommandHelper {
Self {
string_args,
settings: ui.settings().clone(),
workspace,
repo,
may_update_working_copy,
working_copy_committed: false,
@ -239,6 +253,14 @@ impl WorkspaceCommandHelper {
&mut self.repo
}
fn working_copy(&self) -> &WorkingCopy {
self.workspace.working_copy()
}
fn working_copy_mut(&mut self) -> &mut WorkingCopy {
self.workspace.working_copy_mut()
}
fn resolve_revision_arg(
&mut self,
ui: &mut Ui,
@ -343,8 +365,7 @@ impl WorkspaceCommandHelper {
fn maybe_commit_working_copy(&mut self, ui: &mut Ui) -> Result<(), CommandError> {
if self.may_update_working_copy {
let repo = self.repo.clone();
let mut wc = repo.working_copy_locked();
let locked_wc = wc.write_tree();
let locked_wc = self.workspace.working_copy_mut().write_tree();
let old_commit = locked_wc.old_commit();
// Check if the current checkout has changed on disk after we read it. It's fine
// if it has, but we'll need to reload the repo so the new commit is
@ -427,7 +448,7 @@ impl WorkspaceCommandHelper {
}
}
self.repo = tx.commit();
let stats = update_working_copy(ui, &self.repo, &mut self.repo.working_copy_locked())?;
let stats = update_working_copy(ui, &self.repo, self.workspace.working_copy_mut())?;
if let Some(stats) = &stats {
if stats.added_files > 0 || stats.updated_files > 0 || stats.removed_files > 0 {
writeln!(
@ -1359,9 +1380,10 @@ fn cmd_init(ui: &mut Ui, command: &CommandHelper, args: &ArgMatches) -> Result<(
let repo = if let Some(git_store_str) = args.value_of("git-store") {
let git_store_path = ui.cwd().join(git_store_str);
let repo = ReadonlyRepo::init_external_git(ui.settings(), wc_path, git_store_path)?;
let repo = ReadonlyRepo::init_external_git(ui.settings(), wc_path.clone(), git_store_path)?;
let git_repo = repo.store().git_repo().unwrap();
let mut workspace_command = command.for_loaded_repo(ui, repo);
let workspace = Workspace::load(ui.settings(), wc_path).unwrap();
let mut workspace_command = command.for_loaded_repo(ui, workspace, repo);
let mut tx = workspace_command.start_transaction("import git refs");
git::import_refs(tx.mut_repo(), &git_repo).unwrap();
// TODO: Check out a recent commit. Maybe one with the highest generation
@ -1414,9 +1436,9 @@ fn cmd_untrack(
args.values_of("paths"),
)?;
let mut tx = workspace_command.start_transaction("untrack paths");
let mut locked_working_copy = base_repo.working_copy_locked();
let old_commit = locked_working_copy.current_commit();
let unfinished_write = locked_working_copy
let old_commit = workspace_command.working_copy_mut().current_commit();
let unfinished_write = workspace_command
.working_copy_mut()
.untrack(matcher.as_ref())
.map_err(|err| CommandError::InternalError(format!("Failed to untrack paths: {}", err)))?;
let new_tree_id = unfinished_write.new_tree_id();
@ -1426,17 +1448,13 @@ fn cmd_untrack(
tx.mut_repo().set_checkout(new_commit.id().clone());
let repo = tx.commit();
unfinished_write.finish(new_commit);
drop(locked_working_copy);
workspace_command.repo_mut().reload_at(repo.operation());
// TODO: Is it better to have WorkingCopy::untrack() report if any matching
// files exist on disk? That would make the command have no effect rather
// than partially succeeding, for better or worse.
workspace_command.maybe_commit_working_copy(ui)?;
let new_commit = workspace_command
.repo()
.working_copy_locked()
.current_commit();
let new_commit = workspace_command.working_copy().current_commit();
if let Some((path, _value)) = new_commit.tree().entries_matching(matcher.as_ref()).next() {
let ui_path = ui.format_file_path(base_repo.working_copy_path(), &path);
return Err(CommandError::UserError(format!(
@ -3100,7 +3118,7 @@ fn cmd_debug(ui: &mut Ui, command: &CommandHelper, args: &ArgMatches) -> Result<
writeln!(ui, "{}", commit.id().hex())?;
} else if let Some(_wc_matches) = args.subcommand_matches("workingcopy") {
let workspace_command = command.workspace_helper(ui)?;
let wc = workspace_command.repo().working_copy_locked();
let wc = workspace_command.working_copy();
writeln!(ui, "Current commit: {:?}", wc.current_commit_id())?;
writeln!(ui, "Current tree: {:?}", wc.current_tree_id())?;
for (file, state) in wc.file_states().iter() {
@ -3499,14 +3517,15 @@ fn cmd_git_clone(
fs::create_dir(&wc_path).unwrap();
}
let repo = ReadonlyRepo::init_internal_git(ui.settings(), wc_path)?;
let repo = ReadonlyRepo::init_internal_git(ui.settings(), wc_path.clone())?;
let git_repo = get_git_repo(repo.store())?;
writeln!(
ui,
"Fetching into new repo in {:?}",
repo.working_copy_path()
)?;
let mut workspace_command = command.for_loaded_repo(ui, repo);
let workspace = Workspace::load(ui.settings(), wc_path).unwrap();
let mut workspace_command = command.for_loaded_repo(ui, workspace, repo);
let remote_name = "origin";
git_repo.remote(remote_name, source).unwrap();
let mut tx = workspace_command.start_transaction("fetch from git remote into empty repo");