ok/jj
1
0
Fork 0
forked from mirrors/jj

import commit 0f15be02bf4012c116636913562691a0aaa7aed2 from my hg repo

This commit is contained in:
Martin von Zweigbergk 2020-12-12 00:00:42 -08:00
parent 1fa53a13b0
commit 6b1427cb46
65 changed files with 21995 additions and 0 deletions

1516
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

36
Cargo.toml Normal file
View file

@ -0,0 +1,36 @@
[workspace]
members = ["lib"]
[package]
name = "jj"
version = "0.1.0"
authors = ["Martin von Zweigbergk <martinvonz@google.com>"]
edition = "2018"
[dependencies.jj-lib]
path = "lib"
[dependencies]
blake2 = "0.8"
bytes = "0.5"
chrono = "0.4"
clap = "2.33"
config = "0.10"
criterion = "0.3.2"
diff = "0.1"
dirs = "2.0"
git2 = "0.13"
hex = "0.4"
indoc = "1.0"
pest = "2.1"
pest_derive = "2.1"
protobuf = { version = "2.12", features = ["with-bytes"] }
protobuf-codegen-pure = "2.12"
serde_json = "1.0"
tempfile = "3.1"
uuid = { version = "0.8", features = ["v4"] }
zstd = "0.5"
[dev-dependencies]
test-case = "1.0.0"
regex = "1.3.9"

34
lib/Cargo.toml Normal file
View file

@ -0,0 +1,34 @@
[package]
name = "jj-lib"
version = "0.1.0"
authors = ["Martin von Zweigbergk <martinvonz@google.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies.protos]
path = "protos"
[dependencies]
blake2 = "0.8"
bytes = "0.5"
byteorder = "1.3.4"
chrono = "0.4"
config = "0.10"
diff = "0.1"
dirs = "2.0"
git2 = "0.13"
hex = "0.4"
protobuf = { version = "2.12", features = ["with-bytes"] }
protobuf-codegen-pure = "2.12"
rand = "0.7.3"
serde_json = "1.0"
tempfile = "3.1"
thiserror = "1.0"
uuid = { version = "0.8", features = ["v4"] }
whoami = "0.9.0"
zstd = "0.5"
[dev-dependencies]
test-case = "1.0.0"

12
lib/protos/Cargo.toml Normal file
View file

@ -0,0 +1,12 @@
[package]
name = "protos"
version = "0.1.0"
authors = ["Martin von Zweigbergk <martinvonz@google.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bytes = "0.5"
protobuf = { version = "2.12", features = ["with-bytes"] }
protobuf-codegen-pure = "2.12"

17
lib/protos/src/lib.rs Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod op_store;
pub mod store;
pub mod working_copy;

28
lib/protos/src/main.rs Normal file
View file

@ -0,0 +1,28 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate protobuf_codegen_pure;
use protobuf_codegen_pure::Codegen;
fn main() {
Codegen::new()
.out_dir("src/")
.include("src/")
.input("src/op_store.proto")
.input("src/store.proto")
.input("src/working_copy.proto")
.run()
.expect("protoc");
}

View file

@ -0,0 +1,40 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
message View {
repeated bytes head_ids = 1;
bytes checkout = 2;
}
message Operation {
bytes view_id = 1;
repeated bytes parents = 2;
OperationMetadata metadata = 3;
}
// TODO: Share with store.proto? Do we even need the timezone here?
message Timestamp {
uint64 millis_since_epoch = 1;
int32 tz_offset = 2;
}
message OperationMetadata {
Timestamp start_time = 1;
Timestamp end_time = 2;
string description = 3;
string hostname = 4;
string username = 5;
}

1053
lib/protos/src/op_store.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,70 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
message TreeValue {
message NormalFile {
bytes id = 1;
bool executable = 2;
}
oneof value {
NormalFile normal_file = 2;
bytes symlink_id = 3;
bytes tree_id = 4;
bytes conflict_id = 5;
}
}
message Tree {
message Entry {
string name = 1;
TreeValue value = 2;
}
repeated Entry entries = 1;
}
message Commit {
repeated bytes parents = 1;
repeated bytes predecessors = 2;
bytes root_tree = 3;
bytes change_id = 4;
string description = 5;
message Timestamp {
uint64 millis_since_epoch = 1;
int32 tz_offset = 2;
}
message Signature {
string name = 1;
string email = 2;
Timestamp timestamp = 3;
}
Signature author = 6;
Signature committer = 7;
bool is_open = 8;
bool is_pruned = 9;
}
message Conflict {
message Part {
TreeValue content = 1;
}
repeated Part removes = 1;
repeated Part adds = 2;
}

2395
lib/protos/src/store.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,36 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
enum FileType {
Normal = 0;
Symlink = 1;
Executable = 2;
}
message FileState {
uint64 mtime_millis_since_epoch = 1;
uint64 size = 2;
FileType file_type = 3;
}
message TreeState {
bytes tree_id = 1;
map<string, FileState> file_states = 2;
}
message Checkout {
bytes commit_id = 1;
}

View file

@ -0,0 +1,676 @@
// This file is generated by rust-protobuf 2.18.0. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![rustfmt::skip]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `working_copy.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_18_0;
#[derive(PartialEq,Clone,Default)]
pub struct FileState {
// message fields
pub mtime_millis_since_epoch: u64,
pub size: u64,
pub file_type: FileType,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a FileState {
fn default() -> &'a FileState {
<FileState as ::protobuf::Message>::default_instance()
}
}
impl FileState {
pub fn new() -> FileState {
::std::default::Default::default()
}
// uint64 mtime_millis_since_epoch = 1;
pub fn get_mtime_millis_since_epoch(&self) -> u64 {
self.mtime_millis_since_epoch
}
pub fn clear_mtime_millis_since_epoch(&mut self) {
self.mtime_millis_since_epoch = 0;
}
// Param is passed by value, moved
pub fn set_mtime_millis_since_epoch(&mut self, v: u64) {
self.mtime_millis_since_epoch = v;
}
// uint64 size = 2;
pub fn get_size(&self) -> u64 {
self.size
}
pub fn clear_size(&mut self) {
self.size = 0;
}
// Param is passed by value, moved
pub fn set_size(&mut self, v: u64) {
self.size = v;
}
// .FileType file_type = 3;
pub fn get_file_type(&self) -> FileType {
self.file_type
}
pub fn clear_file_type(&mut self) {
self.file_type = FileType::Normal;
}
// Param is passed by value, moved
pub fn set_file_type(&mut self, v: FileType) {
self.file_type = v;
}
}
impl ::protobuf::Message for FileState {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint64()?;
self.mtime_millis_since_epoch = tmp;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint64()?;
self.size = tmp;
},
3 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.file_type, 3, &mut self.unknown_fields)?
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.mtime_millis_since_epoch != 0 {
my_size += ::protobuf::rt::value_size(1, self.mtime_millis_since_epoch, ::protobuf::wire_format::WireTypeVarint);
}
if self.size != 0 {
my_size += ::protobuf::rt::value_size(2, self.size, ::protobuf::wire_format::WireTypeVarint);
}
if self.file_type != FileType::Normal {
my_size += ::protobuf::rt::enum_size(3, self.file_type);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.mtime_millis_since_epoch != 0 {
os.write_uint64(1, self.mtime_millis_since_epoch)?;
}
if self.size != 0 {
os.write_uint64(2, self.size)?;
}
if self.file_type != FileType::Normal {
os.write_enum(3, ::protobuf::ProtobufEnum::value(&self.file_type))?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> FileState {
FileState::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>(
"mtime_millis_since_epoch",
|m: &FileState| { &m.mtime_millis_since_epoch },
|m: &mut FileState| { &mut m.mtime_millis_since_epoch },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>(
"size",
|m: &FileState| { &m.size },
|m: &mut FileState| { &mut m.size },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<FileType>>(
"file_type",
|m: &FileState| { &m.file_type },
|m: &mut FileState| { &mut m.file_type },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<FileState>(
"FileState",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static FileState {
static instance: ::protobuf::rt::LazyV2<FileState> = ::protobuf::rt::LazyV2::INIT;
instance.get(FileState::new)
}
}
impl ::protobuf::Clear for FileState {
fn clear(&mut self) {
self.mtime_millis_since_epoch = 0;
self.size = 0;
self.file_type = FileType::Normal;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for FileState {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for FileState {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TreeState {
// message fields
pub tree_id: ::std::vec::Vec<u8>,
pub file_states: ::std::collections::HashMap<::std::string::String, FileState>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TreeState {
fn default() -> &'a TreeState {
<TreeState as ::protobuf::Message>::default_instance()
}
}
impl TreeState {
pub fn new() -> TreeState {
::std::default::Default::default()
}
// bytes tree_id = 1;
pub fn get_tree_id(&self) -> &[u8] {
&self.tree_id
}
pub fn clear_tree_id(&mut self) {
self.tree_id.clear();
}
// Param is passed by value, moved
pub fn set_tree_id(&mut self, v: ::std::vec::Vec<u8>) {
self.tree_id = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tree_id(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.tree_id
}
// Take field
pub fn take_tree_id(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.tree_id, ::std::vec::Vec::new())
}
// repeated .TreeState.file_states_MapEntry file_states = 2;
pub fn get_file_states(&self) -> &::std::collections::HashMap<::std::string::String, FileState> {
&self.file_states
}
pub fn clear_file_states(&mut self) {
self.file_states.clear();
}
// Param is passed by value, moved
pub fn set_file_states(&mut self, v: ::std::collections::HashMap<::std::string::String, FileState>) {
self.file_states = v;
}
// Mutable pointer to the field.
pub fn mut_file_states(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, FileState> {
&mut self.file_states
}
// Take field
pub fn take_file_states(&mut self) -> ::std::collections::HashMap<::std::string::String, FileState> {
::std::mem::replace(&mut self.file_states, ::std::collections::HashMap::new())
}
}
impl ::protobuf::Message for TreeState {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.tree_id)?;
},
2 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<FileState>>(wire_type, is, &mut self.file_states)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.tree_id.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.tree_id);
}
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<FileState>>(2, &self.file_states);
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.tree_id.is_empty() {
os.write_bytes(1, &self.tree_id)?;
}
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<FileState>>(2, &self.file_states, os)?;
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TreeState {
TreeState::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"tree_id",
|m: &TreeState| { &m.tree_id },
|m: &mut TreeState| { &mut m.tree_id },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<FileState>>(
"file_states",
|m: &TreeState| { &m.file_states },
|m: &mut TreeState| { &mut m.file_states },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TreeState>(
"TreeState",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TreeState {
static instance: ::protobuf::rt::LazyV2<TreeState> = ::protobuf::rt::LazyV2::INIT;
instance.get(TreeState::new)
}
}
impl ::protobuf::Clear for TreeState {
fn clear(&mut self) {
self.tree_id.clear();
self.file_states.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TreeState {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TreeState {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct Checkout {
// message fields
pub commit_id: ::std::vec::Vec<u8>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Checkout {
fn default() -> &'a Checkout {
<Checkout as ::protobuf::Message>::default_instance()
}
}
impl Checkout {
pub fn new() -> Checkout {
::std::default::Default::default()
}
// bytes commit_id = 1;
pub fn get_commit_id(&self) -> &[u8] {
&self.commit_id
}
pub fn clear_commit_id(&mut self) {
self.commit_id.clear();
}
// Param is passed by value, moved
pub fn set_commit_id(&mut self, v: ::std::vec::Vec<u8>) {
self.commit_id = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_commit_id(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.commit_id
}
// Take field
pub fn take_commit_id(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.commit_id, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for Checkout {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.commit_id)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.commit_id.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.commit_id);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.commit_id.is_empty() {
os.write_bytes(1, &self.commit_id)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Checkout {
Checkout::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"commit_id",
|m: &Checkout| { &m.commit_id },
|m: &mut Checkout| { &mut m.commit_id },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<Checkout>(
"Checkout",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static Checkout {
static instance: ::protobuf::rt::LazyV2<Checkout> = ::protobuf::rt::LazyV2::INIT;
instance.get(Checkout::new)
}
}
impl ::protobuf::Clear for Checkout {
fn clear(&mut self) {
self.commit_id.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Checkout {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Checkout {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum FileType {
Normal = 0,
Symlink = 1,
Executable = 2,
}
impl ::protobuf::ProtobufEnum for FileType {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<FileType> {
match value {
0 => ::std::option::Option::Some(FileType::Normal),
1 => ::std::option::Option::Some(FileType::Symlink),
2 => ::std::option::Option::Some(FileType::Executable),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [FileType] = &[
FileType::Normal,
FileType::Symlink,
FileType::Executable,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<FileType>("FileType", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for FileType {
}
impl ::std::default::Default for FileType {
fn default() -> Self {
FileType::Normal
}
}
impl ::protobuf::reflect::ProtobufValue for FileType {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x12working_copy.proto\"\x88\x01\n\tFileState\x129\n\x18mtime_millis_s\
ince_epoch\x18\x01\x20\x01(\x04R\x15mtimeMillisSinceEpochB\0\x12\x14\n\
\x04size\x18\x02\x20\x01(\x04R\x04sizeB\0\x12(\n\tfile_type\x18\x03\x20\
\x01(\x0e2\t.FileTypeR\x08fileTypeB\0:\0\"\xb8\x01\n\tTreeState\x12\x19\
\n\x07tree_id\x18\x01\x20\x01(\x0cR\x06treeIdB\0\x12B\n\x0bfile_states\
\x18\x02\x20\x03(\x0b2\x1f.TreeState.file_states_MapEntryR\nfileStatesB\
\0\x1aJ\n\x14file_states_MapEntry\x12\x0e\n\x03key\x18\x01(\tR\x03key\
\x12\x1e\n\x05value\x18\x02(\x0b2\n.FileStateR\x05value:\x028\x01:\0\"+\
\n\x08Checkout\x12\x1d\n\tcommit_id\x18\x01\x20\x01(\x0cR\x08commitIdB\0\
:\0*5\n\x08FileType\x12\n\n\x06Normal\x10\0\x12\x0b\n\x07Symlink\x10\x01\
\x12\x0e\n\nExecutable\x10\x02\x1a\0B\0b\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}

140
lib/src/commit.rs Normal file
View file

@ -0,0 +1,140 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::fmt::{Debug, Error, Formatter};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use crate::repo_path::DirRepoPath;
use crate::store;
use crate::store::{ChangeId, CommitId, Signature};
use crate::store_wrapper::StoreWrapper;
use crate::tree::Tree;
#[derive(Clone)]
pub struct Commit {
store: Arc<StoreWrapper>,
id: CommitId,
data: Arc<store::Commit>,
}
impl Debug for Commit {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("Commit").field("id", &self.id).finish()
}
}
impl PartialEq for Commit {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for Commit {}
impl Ord for Commit {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for Commit {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.id.cmp(&other.id))
}
}
impl Hash for Commit {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl Commit {
pub fn new(store: Arc<StoreWrapper>, id: CommitId, data: Arc<store::Commit>) -> Self {
Commit { store, id, data }
}
pub fn id(&self) -> &CommitId {
&self.id
}
pub fn parent_ids(&self) -> Vec<CommitId> {
if self.data.parents.is_empty() && &self.id != self.store.root_commit_id() {
vec![self.store.root_commit_id().clone()]
} else {
self.data.parents.clone()
}
}
pub fn parents(&self) -> Vec<Commit> {
let mut parents = Vec::new();
for parent in &self.data.parents {
parents.push(self.store.get_commit(parent).unwrap());
}
if parents.is_empty() && &self.id != self.store.root_commit_id() {
parents.push(self.store.root_commit())
}
parents
}
pub fn predecessors(&self) -> Vec<Commit> {
let mut predecessors = Vec::new();
for predecessor in &self.data.predecessors {
predecessors.push(self.store.get_commit(predecessor).unwrap());
}
predecessors
}
pub fn tree(&self) -> Tree {
self.store
.get_tree(&DirRepoPath::root(), &self.data.root_tree)
.unwrap()
}
pub fn change_id(&self) -> &ChangeId {
&self.data.change_id
}
pub fn store_commit(&self) -> &store::Commit {
&self.data
}
pub fn is_open(&self) -> bool {
self.data.is_open
}
pub fn is_pruned(&self) -> bool {
self.data.is_pruned
}
pub fn is_empty(&self) -> bool {
let parents = self.parents();
// TODO: Perhaps the root commit should also be considered empty.
parents.len() == 1 && parents[0].tree().id() == self.tree().id()
}
pub fn description(&self) -> &str {
&self.data.description
}
pub fn author(&self) -> &Signature {
&self.data.author
}
pub fn committer(&self) -> &Signature {
&self.data.committer
}
}

172
lib/src/commit_builder.rs Normal file
View file

@ -0,0 +1,172 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use uuid::Uuid;
use crate::commit::Commit;
use crate::repo::ReadonlyRepo;
use crate::settings::UserSettings;
use crate::store;
use crate::store::{ChangeId, CommitId, Signature, Timestamp, TreeId};
use crate::store_wrapper::StoreWrapper;
use crate::transaction::Transaction;
use std::sync::Arc;
#[derive(Debug)]
pub struct CommitBuilder {
store: Arc<StoreWrapper>,
commit: store::Commit,
}
pub fn new_change_id() -> ChangeId {
ChangeId(Uuid::new_v4().as_bytes().to_vec())
}
pub fn signature(settings: &UserSettings) -> Signature {
// TODO: check if it's slow to get the timezone etc for every signature
let timestamp = Timestamp::now();
Signature {
name: settings.user_name(),
email: settings.user_email(),
timestamp,
}
}
impl CommitBuilder {
pub fn for_new_commit(
settings: &UserSettings,
store: &Arc<StoreWrapper>,
tree_id: TreeId,
) -> CommitBuilder {
let signature = signature(settings);
let commit = store::Commit {
parents: vec![],
predecessors: vec![],
root_tree: tree_id,
change_id: new_change_id(),
description: String::new(),
author: signature.clone(),
committer: signature,
is_open: false,
is_pruned: false,
};
CommitBuilder {
store: store.clone(),
commit,
}
}
pub fn for_rewrite_from(
settings: &UserSettings,
store: &Arc<StoreWrapper>,
predecessor: &Commit,
) -> CommitBuilder {
let mut commit = predecessor.store_commit().clone();
commit.predecessors = vec![predecessor.id().clone()];
commit.committer = signature(settings);
CommitBuilder {
store: store.clone(),
commit,
}
}
pub fn for_open_commit(
settings: &UserSettings,
store: &Arc<StoreWrapper>,
parent_id: CommitId,
tree_id: TreeId,
) -> CommitBuilder {
let signature = signature(settings);
let commit = store::Commit {
parents: vec![parent_id],
predecessors: vec![],
root_tree: tree_id,
change_id: new_change_id(),
description: String::new(),
author: signature.clone(),
committer: signature,
is_open: true,
is_pruned: false,
};
CommitBuilder {
store: store.clone(),
commit,
}
}
pub fn set_parents(mut self, parents: Vec<CommitId>) -> Self {
self.commit.parents = parents;
self
}
pub fn set_predecessors(mut self, predecessors: Vec<CommitId>) -> Self {
self.commit.predecessors = predecessors;
self
}
pub fn set_tree(mut self, tree_id: TreeId) -> Self {
self.commit.root_tree = tree_id;
self
}
pub fn set_change_id(mut self, change_id: ChangeId) -> Self {
self.commit.change_id = change_id;
self
}
pub fn generate_new_change_id(mut self) -> Self {
self.commit.change_id = new_change_id();
self
}
pub fn set_description(mut self, description: String) -> Self {
self.commit.description = description;
self
}
pub fn set_open(mut self, is_open: bool) -> Self {
self.commit.is_open = is_open;
self
}
pub fn set_pruned(mut self, is_pruned: bool) -> Self {
self.commit.is_pruned = is_pruned;
self
}
pub fn set_author(mut self, author: Signature) -> Self {
self.commit.author = author;
self
}
pub fn set_committer(mut self, committer: Signature) -> Self {
self.commit.committer = committer;
self
}
pub fn write_to_new_transaction(self, repo: &ReadonlyRepo, description: &str) -> Commit {
let mut tx = repo.start_transaction(description);
let commit = self.write_to_transaction(&mut tx);
tx.commit();
commit
}
pub fn write_to_transaction(mut self, tx: &mut Transaction) -> Commit {
let parents = &mut self.commit.parents;
if parents.contains(self.store.root_commit_id()) {
assert_eq!(parents.len(), 1);
parents.clear();
}
tx.write_commit(self.commit)
}
}

101
lib/src/conflicts.rs Normal file
View file

@ -0,0 +1,101 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::files;
use crate::repo_path::RepoPath;
use crate::store::{Conflict, TreeValue};
use crate::store_wrapper::StoreWrapper;
use std::io::Write;
pub fn materialize_conflict(
store: &StoreWrapper,
path: &RepoPath,
conflict: &Conflict,
file: &mut dyn Write,
) {
match conflict.to_three_way() {
None => {
file.write_all(b"Unresolved complex conflict.\n").unwrap();
}
Some((Some(left), Some(base), Some(right))) => {
match (left.value, base.value, right.value) {
(
TreeValue::Normal {
id: left_id,
executable: false,
},
TreeValue::Normal {
id: base_id,
executable: false,
},
TreeValue::Normal {
id: right_id,
executable: false,
},
) => {
let mut left_contents: Vec<u8> = vec![];
let mut base_contents: Vec<u8> = vec![];
let mut right_contents: Vec<u8> = vec![];
let file_path = path.to_file_repo_path();
store
.read_file(&file_path, &left_id)
.unwrap()
.read_to_end(&mut left_contents)
.unwrap();
store
.read_file(&file_path, &base_id)
.unwrap()
.read_to_end(&mut base_contents)
.unwrap();
store
.read_file(&file_path, &right_id)
.unwrap()
.read_to_end(&mut right_contents)
.unwrap();
let merge_result =
files::merge(&base_contents, &left_contents, &right_contents);
match merge_result {
files::MergeResult::Resolved(contents) => {
file.write_all(&contents).unwrap();
}
files::MergeResult::Conflict(hunks) => {
for hunk in hunks {
match hunk {
files::MergeHunk::Resolved(contents) => {
file.write_all(&contents).unwrap();
}
files::MergeHunk::Conflict { base, left, right } => {
file.write_all(b"<<<<<<<").unwrap();
file.write_all(&left).unwrap();
file.write_all(b"|||||||").unwrap();
file.write_all(&base).unwrap();
file.write_all(b"=======").unwrap();
file.write_all(&right).unwrap();
file.write_all(b">>>>>>>").unwrap();
}
}
}
}
}
}
_ => {
file.write_all(b"Unresolved 3-way conflict.\n").unwrap();
}
}
}
Some(_) => {
file.write_all(b"Unresolved complex conflict.\n").unwrap();
}
}
}

456
lib/src/dag_walk.rs Normal file
View file

@ -0,0 +1,456 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::iter::Iterator;
use crate::commit::Commit;
use crate::store::CommitId;
use std::hash::Hash;
pub struct AncestorsIter {
bfs_iter: BfsIter<'static, 'static, Commit, CommitId, Vec<Commit>>,
}
impl Iterator for AncestorsIter {
type Item = Commit;
fn next(&mut self) -> Option<Self::Item> {
self.bfs_iter.next()
}
}
pub fn walk_ancestors<II>(start: II) -> AncestorsIter
where
II: IntoIterator<Item = Commit>,
{
let bfs_iter = bfs(
start,
Box::new(|commit| commit.id().clone()),
Box::new(|commit| commit.parents()),
);
AncestorsIter { bfs_iter }
}
pub struct BfsIter<'id_fn, 'neighbors_fn, T, ID, NI> {
id_fn: Box<dyn Fn(&T) -> ID + 'id_fn>,
neighbors_fn: Box<dyn FnMut(&T) -> NI + 'neighbors_fn>,
work: Vec<T>,
visited: HashSet<ID>,
}
impl<T, ID, NI> Iterator for BfsIter<'_, '_, T, ID, NI>
where
ID: Hash + Eq,
NI: IntoIterator<Item = T>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
while !self.work.is_empty() {
let c = self.work.pop().unwrap();
let id = (self.id_fn)(&c);
if self.visited.contains(&id) {
continue;
}
for p in (self.neighbors_fn)(&c) {
self.work.push(p);
}
self.visited.insert(id);
return Some(c);
}
None
}
}
pub fn bfs<'id_fn, 'neighbors_fn, T, ID, II, NI>(
start: II,
id_fn: Box<dyn Fn(&T) -> ID + 'id_fn>,
neighbors_fn: Box<dyn FnMut(&T) -> NI + 'neighbors_fn>,
) -> BfsIter<'id_fn, 'neighbors_fn, T, ID, NI>
where
ID: Hash + Eq,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
BfsIter {
id_fn,
neighbors_fn,
work: start.into_iter().collect(),
visited: Default::default(),
}
}
pub struct TopoIter<'id_fn, 'neighbors_fn, T, ID, NI> {
id_fn: Box<dyn Fn(&T) -> ID + 'id_fn>,
neighbors_fn: Box<dyn FnMut(&T) -> NI + 'neighbors_fn>,
work: Vec<T>,
visited: HashSet<ID>,
}
impl<T, ID, NI> Iterator for TopoIter<'_, '_, T, ID, NI>
where
ID: Hash + Eq,
NI: IntoIterator<Item = T>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
while !self.work.is_empty() {
let c = self.work.pop().unwrap();
let id = (self.id_fn)(&c);
if self.visited.contains(&id) {
continue;
}
for p in (self.neighbors_fn)(&c) {
self.work.push(p);
}
self.visited.insert(id);
return Some(c);
}
None
}
}
/// Returns neighbors before the node itself.
pub fn topo_order_reverse<T, ID, II, NI>(
start: II,
id_fn: Box<dyn Fn(&T) -> ID>,
mut neighbors_fn: Box<dyn FnMut(&T) -> NI>,
) -> Vec<T>
where
T: Hash + Eq + Clone,
ID: Hash + Eq + Clone,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let mut visiting = HashSet::new();
let mut emitted = HashSet::new();
let mut result = vec![];
let mut start_nodes: Vec<_> = start.into_iter().collect();
start_nodes.reverse();
for start_node in start_nodes {
let mut stack = vec![(start_node, false)];
while !stack.is_empty() {
let (node, neighbors_visited) = stack.pop().unwrap();
let id = id_fn(&node);
if emitted.contains(&id) {
continue;
}
if !neighbors_visited {
assert!(visiting.insert(id.clone()), "graph has cycle");
let neighbors = neighbors_fn(&node);
stack.push((node, true));
for neighbor in neighbors {
stack.push((neighbor, false));
}
} else {
visiting.remove(&id);
emitted.insert(id);
result.push(node);
}
}
}
result.reverse();
result
}
pub fn leaves<T, ID, II, NI>(
start: II,
neighbors_fn: &mut impl FnMut(&T) -> NI,
id_fn: &impl Fn(&T) -> ID,
) -> HashSet<T>
where
T: Hash + Eq + Clone,
ID: Hash + Eq,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let mut visited = HashSet::new();
let mut work: Vec<T> = start.into_iter().collect();
let mut leaves: HashSet<T> = work.iter().cloned().collect();
let mut non_leaves = HashSet::new();
while !work.is_empty() {
// TODO: make this not waste so much memory on the sets
let mut new_work = vec![];
for c in work {
let id: ID = id_fn(&c);
if visited.contains(&id) {
continue;
}
for p in neighbors_fn(&c) {
non_leaves.insert(c.clone());
new_work.push(p);
}
visited.insert(id);
leaves.insert(c);
}
work = new_work;
}
leaves.difference(&non_leaves).cloned().collect()
}
/// Find nodes in the start set that are not reachable from other nodes in the
/// start set.
pub fn unreachable<T, ID, II, NI>(
start: II,
neighbors_fn: &impl Fn(&T) -> NI,
id_fn: &impl Fn(&T) -> ID,
) -> HashSet<T>
where
T: Hash + Eq + Clone,
ID: Hash + Eq,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let start: Vec<T> = start.into_iter().collect();
let mut reachable: HashSet<T> = start.iter().cloned().collect();
for _node in bfs(
start.into_iter(),
Box::new(id_fn),
Box::new(|node| {
let neighbors: Vec<T> = neighbors_fn(node).into_iter().collect();
for neighbor in &neighbors {
reachable.remove(&neighbor);
}
neighbors
}),
) {}
reachable
}
pub fn common_ancestor<'a, I1, I2>(set1: I1, set2: I2) -> Commit
where
I1: IntoIterator<Item = &'a Commit>,
I2: IntoIterator<Item = &'a Commit>,
{
let set1: Vec<Commit> = set1.into_iter().cloned().collect();
let set2: Vec<Commit> = set2.into_iter().cloned().collect();
closest_common_node(set1, set2, &|commit| commit.parents(), &|commit| {
commit.id().clone()
})
.unwrap()
}
pub fn closest_common_node<T, ID, II1, II2, NI>(
set1: II1,
set2: II2,
neighbors_fn: &impl Fn(&T) -> NI,
id_fn: &impl Fn(&T) -> ID,
) -> Option<T>
where
T: Hash + Eq + Clone,
ID: Hash + Eq,
II1: IntoIterator<Item = T>,
II2: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let mut visited1 = HashSet::new();
let mut visited2 = HashSet::new();
let mut work1: Vec<T> = set1.into_iter().collect();
let mut work2: Vec<T> = set2.into_iter().collect();
while !work1.is_empty() || !work2.is_empty() {
let mut new_work1 = vec![];
for node in work1 {
let id: ID = id_fn(&node);
if visited2.contains(&id) {
return Some(node);
}
if visited1.insert(id) {
for neighbor in neighbors_fn(&node) {
new_work1.push(neighbor);
}
}
}
work1 = new_work1;
let mut new_work2 = vec![];
for node in work2 {
let id: ID = id_fn(&node);
if visited1.contains(&id) {
return Some(node);
}
if visited2.insert(id) {
for neighbor in neighbors_fn(&node) {
new_work2.push(neighbor);
}
}
}
work2 = new_work2;
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn topo_order_reverse_linear() {
// This graph:
// o C
// o B
// o A
let mut neighbors = HashMap::new();
neighbors.insert('A', vec![]);
neighbors.insert('B', vec!['A']);
neighbors.insert('C', vec!['B']);
let common = topo_order_reverse(
vec!['C'],
Box::new(|node| *node),
Box::new(move |node| neighbors[node].clone()),
);
assert_eq!(common, vec!['C', 'B', 'A']);
}
#[test]
fn topo_order_reverse_merge() {
// This graph:
// o F
// |\
// o | E
// | o D
// | o C
// | o B
// |/
// o A
let mut neighbors = HashMap::new();
neighbors.insert('A', vec![]);
neighbors.insert('B', vec!['A']);
neighbors.insert('C', vec!['B']);
neighbors.insert('D', vec!['C']);
neighbors.insert('E', vec!['A']);
neighbors.insert('F', vec!['E', 'D']);
let common = topo_order_reverse(
vec!['F'],
Box::new(|node| *node),
Box::new(move |node| neighbors[node].clone()),
);
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
}
#[test]
fn topo_order_reverse_multiple_heads() {
// This graph:
// o F
// |\
// o | E
// | o D
// | | o C
// | | |
// | | o B
// | |/
// |/
// o A
let mut neighbors = HashMap::new();
neighbors.insert('A', vec![]);
neighbors.insert('B', vec!['A']);
neighbors.insert('C', vec!['B']);
neighbors.insert('D', vec!['A']);
neighbors.insert('E', vec!['A']);
neighbors.insert('F', vec!['E', 'D']);
let common = topo_order_reverse(
vec!['F', 'C'],
Box::new(|node| *node),
Box::new(move |node| neighbors[node].clone()),
);
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
}
#[test]
fn closest_common_node_tricky() {
// Test this case where A is the shortest distance away, but we still want the
// result to be B because A is an ancestor of B. In other words, we want
// to minimize the longest distance.
//
// E H
// |\ /|
// | D G |
// | C F |
// \ \ / /
// \ B /
// \|/
// A
let mut neighbors = HashMap::new();
neighbors.insert('A', vec![]);
neighbors.insert('B', vec!['A']);
neighbors.insert('C', vec!['B']);
neighbors.insert('D', vec!['C']);
neighbors.insert('E', vec!['A', 'D']);
neighbors.insert('F', vec!['B']);
neighbors.insert('G', vec!['F']);
neighbors.insert('H', vec!['A', 'G']);
let common = closest_common_node(
vec!['E'],
vec!['H'],
&|node| neighbors[node].clone(),
&|node| *node,
);
// TODO: fix the implementation to return B
assert_eq!(common, Some('A'));
}
#[test]
fn unreachable_mixed() {
// Test the uppercase letters are in the start set
//
// D F
// |/|
// C e
// |/
// b
// |
// A
let mut neighbors = HashMap::new();
neighbors.insert('A', vec![]);
neighbors.insert('b', vec!['A']);
neighbors.insert('C', vec!['b']);
neighbors.insert('D', vec!['C']);
neighbors.insert('e', vec!['b']);
neighbors.insert('F', vec!['C', 'e']);
let expected: HashSet<char> = vec!['D', 'F'].into_iter().collect();
let actual = unreachable(
vec!['A', 'C', 'D', 'F'],
&|node| neighbors[node].clone(),
&|node| *node,
);
assert_eq!(actual, expected);
// Check with a different order in the start set
let actual = unreachable(
vec!['F', 'D', 'C', 'A'],
&|node| neighbors[node].clone(),
&|node| *node,
);
assert_eq!(actual, expected);
}
}

500
lib/src/evolution.rs Normal file
View file

@ -0,0 +1,500 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
use crate::commit::Commit;
use crate::commit_builder::CommitBuilder;
use crate::dag_walk::{bfs, closest_common_node, leaves, walk_ancestors};
use crate::repo::{ReadonlyRepo, Repo};
use crate::repo_path::DirRepoPath;
use crate::rewrite::{merge_commit_trees, rebase_commit};
use crate::settings::UserSettings;
use crate::store::{ChangeId, CommitId};
use crate::store_wrapper::StoreWrapper;
use crate::transaction::{MutableRepo, Transaction};
use crate::trees::merge_trees;
use crate::view::View;
#[derive(Debug, Clone)]
struct State {
/// Contains all successors whether they have the same change id or not.
successors: HashMap<CommitId, HashSet<CommitId>>,
/// Contains the subset of the keys in `successors` for which there is a
/// successor with the same change id.
obsolete_commits: HashSet<CommitId>,
orphan_commits: HashSet<CommitId>,
divergent_changes: HashMap<ChangeId, HashSet<CommitId>>,
}
impl State {
fn calculate(store: &StoreWrapper, view: &dyn View) -> State {
let mut successors = HashMap::new();
let mut obsolete_commits = HashSet::new();
let mut orphan_commits = HashSet::new();
let mut divergent_changes = HashMap::new();
let mut heads = vec![];
for commit_id in view.heads() {
heads.push(store.get_commit(commit_id).unwrap());
}
let mut commits = HashSet::new();
let mut children = HashMap::new();
let mut change_to_commits = HashMap::new();
for commit in walk_ancestors(heads) {
children.insert(commit.id().clone(), HashSet::new());
change_to_commits
.entry(commit.change_id().clone())
.or_insert_with(HashSet::new)
.insert(commit.id().clone());
commits.insert(commit);
}
// Scan all commits to find obsolete commits and to build a lookup of for
// children of a commit
for commit in &commits {
if commit.is_pruned() {
obsolete_commits.insert(commit.id().clone());
}
for predecessor in commit.predecessors() {
if !commits.contains(&predecessor) {
continue;
}
successors
.entry(predecessor.id().clone())
.or_insert_with(HashSet::new)
.insert(commit.id().clone());
if predecessor.change_id() == commit.change_id() {
obsolete_commits.insert(predecessor.id().clone());
}
}
for parent in commit.parents() {
if let Some(children) = children.get_mut(parent.id()) {
children.insert(commit.id().clone());
}
}
}
// Find divergent commits
for (change_id, commit_ids) in change_to_commits {
let divergent: HashSet<CommitId> =
commit_ids.difference(&obsolete_commits).cloned().collect();
if divergent.len() > 1 {
divergent_changes.insert(change_id, divergent);
}
}
// Find orphans by walking to the children of obsolete commits
let mut work: Vec<CommitId> = obsolete_commits.iter().map(ToOwned::to_owned).collect();
while !work.is_empty() {
let commit_id = work.pop().unwrap();
for child in children.get(&commit_id).unwrap() {
if orphan_commits.insert(child.clone()) {
work.push(child.clone());
}
}
}
orphan_commits = orphan_commits
.difference(&obsolete_commits)
.map(ToOwned::to_owned)
.collect();
State {
successors,
obsolete_commits,
orphan_commits,
divergent_changes,
}
}
pub fn new_parent(&self, store: &StoreWrapper, old_parent_id: &CommitId) -> HashSet<CommitId> {
let mut new_parents = HashSet::new();
if let Some(successor_ids) = self.successors.get(old_parent_id) {
let old_parent = store.get_commit(old_parent_id).unwrap();
let successors: HashSet<_> = successor_ids
.iter()
.map(|id| store.get_commit(id).unwrap())
.collect();
let mut children = HashMap::new();
for successor in &successors {
for parent in successor.parents() {
if let Some(parent) = successors.get(&parent) {
children
.entry(parent.clone())
.or_insert_with(HashSet::new)
.insert(successor.clone());
}
}
}
let mut all_candidates = HashSet::new();
for successor in &successors {
if successor.change_id() != old_parent.change_id() {
continue;
}
// Start with the successor as candidate.
let mut candidates = HashSet::new();
candidates.insert(successor.clone());
// If the successor has children that are successors of the same
// commit, we consider the original commit to be a split. We then return
// the tip-most successor.
candidates = leaves(
candidates,
&mut |commit: &Commit| -> HashSet<Commit> {
if let Some(children) = children.get(commit) {
children.clone()
} else {
HashSet::new()
}
},
&|commit: &Commit| -> CommitId { commit.id().clone() },
);
// If a successor is pruned, use its parent(s) instead.
candidates = leaves(
candidates,
&mut |commit: &Commit| -> Vec<Commit> {
if commit.is_pruned() {
commit.parents()
} else {
vec![]
}
},
&|commit: &Commit| -> CommitId { commit.id().clone() },
);
for candidate in candidates {
all_candidates.insert(candidate.clone());
}
}
// Filter out candidates that are ancestors of or other candidates.
let non_heads: Vec<_> = all_candidates
.iter()
.flat_map(|commit| commit.parents())
.collect();
for commit in walk_ancestors(non_heads) {
all_candidates.remove(&commit);
}
for candidate in all_candidates {
// TODO: Make this not recursive
for effective_successor in self.new_parent(store, candidate.id()) {
new_parents.insert(effective_successor);
}
}
}
if new_parents.is_empty() {
// TODO: Should we go to the parents here too if the commit is pruned?
new_parents.insert(old_parent_id.clone());
}
new_parents
}
}
pub trait Evolution {
fn successors(&self, commit_id: &CommitId) -> HashSet<CommitId>;
fn is_obsolete(&self, commit_id: &CommitId) -> bool;
fn is_orphan(&self, commit_id: &CommitId) -> bool;
fn is_divergent(&self, change_id: &ChangeId) -> bool;
/// Given a current parent, finds the new parent candidates. If the current
/// parent is not obsolete, then a singleton set of that commit will be
/// returned.
///
/// * If a successor is pruned, its parent(s) will instead be included (or
/// their parents if they are also pruned).
///
/// * If the commit has multiple live successors, the tip-most one(s) of
/// them will be chosen.
///
/// The second case is more complex than it probably seems. For example,
/// let's say commit A was split into B, A', and C (where A' has the same
/// change id as A). Then C is rebased to somewhere else and becomes C'.
/// We will choose that C' as effective successor even though it has a
/// different change id and is not a descendant of one that does.
fn new_parent(&self, old_parent_id: &CommitId) -> HashSet<CommitId>;
}
pub struct ReadonlyEvolution<'r> {
repo: &'r ReadonlyRepo,
state: Mutex<Option<Arc<State>>>,
}
pub trait EvolveListener {
fn orphan_evolved(&mut self, orphan: &Commit, new_commit: &Commit);
fn orphan_target_ambiguous(&mut self, orphan: &Commit);
fn divergent_resolved(&mut self, divergents: &[Commit], resolved: &Commit);
fn divergent_no_common_predecessor(&mut self, commit1: &Commit, commit2: &Commit);
}
impl Evolution for ReadonlyEvolution<'_> {
fn successors(&self, commit_id: &CommitId) -> HashSet<CommitId> {
self.get_state()
.successors
.get(commit_id)
.cloned()
.unwrap_or_else(HashSet::new)
}
fn is_obsolete(&self, commit_id: &CommitId) -> bool {
self.get_state().obsolete_commits.contains(commit_id)
}
fn is_orphan(&self, commit_id: &CommitId) -> bool {
self.get_state().orphan_commits.contains(commit_id)
}
fn is_divergent(&self, change_id: &ChangeId) -> bool {
self.get_state().divergent_changes.contains_key(change_id)
}
fn new_parent(&self, old_parent_id: &CommitId) -> HashSet<CommitId> {
self.get_state()
.new_parent(self.repo.store(), old_parent_id)
}
}
impl<'r> ReadonlyEvolution<'r> {
pub fn new(repo: &'r ReadonlyRepo) -> Self {
ReadonlyEvolution {
repo,
state: Mutex::new(None),
}
}
fn get_state(&self) -> Arc<State> {
let mut locked_state = self.state.lock().unwrap();
if locked_state.is_none() {
locked_state.replace(Arc::new(State::calculate(
self.repo.store(),
self.repo.view(),
)));
}
locked_state.as_ref().unwrap().clone()
}
pub fn start_modification<'m>(&self, repo: &'m MutableRepo<'r>) -> MutableEvolution<'r, 'm> {
MutableEvolution {
repo,
state: Mutex::new(self.state.lock().unwrap().clone()),
}
}
}
pub struct MutableEvolution<'r, 'm: 'r> {
repo: &'m MutableRepo<'r>,
state: Mutex<Option<Arc<State>>>,
}
impl Evolution for MutableEvolution<'_, '_> {
fn successors(&self, commit_id: &CommitId) -> HashSet<CommitId> {
self.get_state()
.successors
.get(commit_id)
.cloned()
.unwrap_or_else(HashSet::new)
}
fn is_obsolete(&self, commit_id: &CommitId) -> bool {
self.get_state().obsolete_commits.contains(commit_id)
}
fn is_orphan(&self, commit_id: &CommitId) -> bool {
self.get_state().orphan_commits.contains(commit_id)
}
fn is_divergent(&self, change_id: &ChangeId) -> bool {
self.get_state().divergent_changes.contains_key(change_id)
}
fn new_parent(&self, old_parent_id: &CommitId) -> HashSet<CommitId> {
self.get_state()
.new_parent(self.repo.store(), old_parent_id)
}
}
impl MutableEvolution<'_, '_> {
fn get_state(&self) -> Arc<State> {
let mut locked_state = self.state.lock().unwrap();
if locked_state.is_none() {
locked_state.replace(Arc::new(State::calculate(
self.repo.store(),
self.repo.view(),
)));
}
locked_state.as_ref().unwrap().clone()
}
pub fn invalidate(&mut self) {
let mut locked_state = self.state.lock();
locked_state.as_mut().unwrap().take();
}
}
pub fn evolve(
user_settings: &UserSettings,
tx: &mut Transaction,
listener: &mut dyn EvolveListener,
) {
let store = tx.store().clone();
// TODO: update the state in the transaction
let state = tx.as_repo_mut().evolution_mut().get_state();
// Resolving divergence can creates new orphans but not vice versa, so resolve
// divergence first.
for commit_ids in state.divergent_changes.values() {
let commits: HashSet<Commit> = commit_ids
.iter()
.map(|id| store.get_commit(&id).unwrap())
.collect();
evolve_divergent_change(user_settings, &store, tx, listener, &commits);
}
let orphans: HashSet<Commit> = state
.orphan_commits
.iter()
.map(|id| store.get_commit(&id).unwrap())
.collect();
let non_heads: HashSet<Commit> = orphans.iter().flat_map(|commit| commit.parents()).collect();
let orphan_heads: HashSet<Commit> = orphans.difference(&non_heads).cloned().collect();
let mut orphans_topo_order = vec![];
for commit in bfs(
orphan_heads,
Box::new(|commit| commit.id().clone()),
Box::new(|commit| {
commit
.parents()
.iter()
.filter(|commit| state.orphan_commits.contains(commit.id()))
.cloned()
.collect::<Vec<_>>()
}),
) {
orphans_topo_order.push(commit);
}
while !orphans_topo_order.is_empty() {
let orphan = orphans_topo_order.pop().unwrap();
let old_parents = orphan.parents();
let mut new_parents = vec![];
let mut ambiguous_new_parents = false;
for old_parent in &old_parents {
let new_parent_candidates = state.new_parent(&store, old_parent.id());
if new_parent_candidates.len() > 1 {
ambiguous_new_parents = true;
break;
}
new_parents.push(
store
.get_commit(new_parent_candidates.iter().next().unwrap())
.unwrap(),
);
}
if ambiguous_new_parents {
listener.orphan_target_ambiguous(&orphan);
} else {
let new_commit = rebase_commit(user_settings, tx, &orphan, &new_parents);
listener.orphan_evolved(&orphan, &new_commit);
}
}
}
fn evolve_divergent_change(
user_settings: &UserSettings,
store: &Arc<StoreWrapper>,
tx: &mut Transaction,
listener: &mut dyn EvolveListener,
commits: &HashSet<Commit>,
) {
// Resolve divergence pair-wise, starting with the two oldest commits.
let mut commits: Vec<Commit> = commits.iter().cloned().collect();
commits.sort_by(|a: &Commit, b: &Commit| a.committer().timestamp.cmp(&b.committer().timestamp));
commits.reverse();
// Create a copy to pass to the listener
let sources = commits.clone();
while commits.len() > 1 {
let commit2 = commits.pop().unwrap();
let commit1 = commits.pop().unwrap();
let common_predecessor = closest_common_node(
vec![commit1.clone()],
vec![commit2.clone()],
&|commit: &Commit| commit.predecessors(),
&|commit: &Commit| commit.id().clone(),
);
match common_predecessor {
None => {
listener.divergent_no_common_predecessor(&commit1, &commit2);
return;
}
Some(common_predecessor) => {
let resolved_commit = evolve_two_divergent_commits(
user_settings,
store,
tx,
&common_predecessor,
&commit1,
&commit2,
);
commits.push(resolved_commit);
}
}
}
let resolved = commits.pop().unwrap();
listener.divergent_resolved(&sources, &resolved);
}
fn evolve_two_divergent_commits(
user_settings: &UserSettings,
store: &Arc<StoreWrapper>,
tx: &mut Transaction,
common_predecessor: &Commit,
commit1: &Commit,
commit2: &Commit,
) -> Commit {
let new_parents = commit1.parents();
let rebased_tree2 = if commit2.parents() == new_parents {
commit2.tree()
} else {
let old_base_tree = merge_commit_trees(store, &commit2.parents());
let new_base_tree = merge_commit_trees(store, &new_parents);
let tree_id = merge_trees(&new_base_tree, &old_base_tree, &commit2.tree()).unwrap();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
let rebased_predecessor_tree = if common_predecessor.parents() == new_parents {
common_predecessor.tree()
} else {
let old_base_tree = merge_commit_trees(store, &common_predecessor.parents());
let new_base_tree = merge_commit_trees(store, &new_parents);
let tree_id =
merge_trees(&new_base_tree, &old_base_tree, &common_predecessor.tree()).unwrap();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
let resolved_tree =
merge_trees(&commit1.tree(), &rebased_predecessor_tree, &rebased_tree2).unwrap();
// TODO: Merge commit description and other commit metadata. How do we deal with
// conflicts? It's probably best to interactively ask the caller (which
// might ask the user in interactive use).
CommitBuilder::for_rewrite_from(user_settings, store, &commit1)
.set_tree(resolved_tree)
.set_predecessors(vec![commit1.id().clone(), commit2.id().clone()])
.write_to_transaction(tx)
}

351
lib/src/files.rs Normal file
View file

@ -0,0 +1,351 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use diff::slice as diff_slice;
use std::fmt::{Debug, Error, Formatter};
fn is_word_byte(a: u8) -> bool {
a.is_ascii_alphanumeric() || a == b'_'
}
fn is_same_word(a: u8, b: u8) -> bool {
// Don't allow utf-8 code points to be split into separate words
(is_word_byte(a) && is_word_byte(b)) || a & 0x80 != 0
}
fn tokenize(data: &[u8]) -> Vec<Vec<u8>> {
// TODO: Fix this code to not be so inefficient, and to allow the word
// delimiter to be configured.
let mut output = vec![];
let mut current = vec![];
let mut maybe_prev: Option<u8> = None;
for b in data {
let b = *b;
match maybe_prev {
None => current.push(b),
Some(prev) => {
if is_same_word(prev, b) {
current.push(b);
} else {
output.push(current);
current = vec![b];
}
}
}
maybe_prev = Some(b);
}
if !current.is_empty() {
output.push(current);
}
output
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum DiffHunk {
Unmodified(Vec<u8>),
Added(Vec<u8>),
Removed(Vec<u8>),
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct DiffLine {
pub left_line_number: u32,
pub right_line_number: u32,
pub has_left_content: bool,
pub has_right_content: bool,
pub hunks: Vec<DiffHunk>,
}
impl DiffLine {
fn reset_line(&mut self) {
self.has_left_content = false;
self.has_right_content = false;
self.hunks.clear();
}
pub fn is_unmodified(&self) -> bool {
self.hunks
.iter()
.all(|hunk| matches!(hunk, DiffHunk::Unmodified(_)))
}
}
pub fn diff(left: &[u8], right: &[u8], callback: &mut impl FnMut(&DiffLine)) {
// TODO: Should we attempt to interpret as utf-8 and otherwise break only at
// newlines?
let left_tokens = tokenize(left);
let right_tokens = tokenize(right);
let result = diff_slice(&left_tokens, &right_tokens);
let mut diff_line = DiffLine {
left_line_number: 1,
right_line_number: 1,
has_left_content: false,
has_right_content: false,
hunks: vec![],
};
for hunk in result {
match hunk {
diff::Result::Both(left, right) => {
assert!(left == right);
diff_line.has_left_content = true;
diff_line.has_right_content = true;
diff_line.hunks.push(DiffHunk::Unmodified(left.clone()));
if left == &[b'\n'] {
callback(&diff_line);
diff_line.left_line_number += 1;
diff_line.right_line_number += 1;
diff_line.reset_line();
}
}
diff::Result::Left(left) => {
diff_line.has_left_content = true;
diff_line.hunks.push(DiffHunk::Removed(left.clone()));
if left == &[b'\n'] {
callback(&diff_line);
diff_line.left_line_number += 1;
diff_line.reset_line();
}
}
diff::Result::Right(right) => {
diff_line.has_right_content = true;
diff_line.hunks.push(DiffHunk::Added(right.clone()));
if right == &[b'\n'] {
callback(&diff_line);
diff_line.right_line_number += 1;
diff_line.reset_line();
}
}
}
}
if !diff_line.hunks.is_empty() {
callback(&diff_line);
}
}
#[derive(PartialEq, Eq, Clone)]
pub enum MergeHunk {
Resolved(Vec<u8>),
Conflict {
base: Vec<u8>,
left: Vec<u8>,
right: Vec<u8>,
},
}
impl Debug for MergeHunk {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
match self {
MergeHunk::Resolved(data) => f
.debug_tuple("Resolved")
.field(&String::from_utf8_lossy(data))
.finish(),
MergeHunk::Conflict { base, left, right } => f
.debug_struct("Conflict")
.field("base", &String::from_utf8_lossy(base))
.field("left", &String::from_utf8_lossy(left))
.field("right", &String::from_utf8_lossy(right))
.finish(),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum MergeResult {
Resolved(Vec<u8>),
Conflict(Vec<MergeHunk>),
}
/// Returns None if the merge fails
pub fn merge(base: &[u8], left: &[u8], right: &[u8]) -> MergeResult {
let base_tokens = tokenize(base);
let left_tokens = tokenize(left);
let right_tokens = tokenize(right);
let left_diff = diff_slice(&base_tokens, &left_tokens);
let right_diff = diff_slice(&base_tokens, &right_tokens);
let mut hunk: Vec<u8> = vec![];
let mut hunks: Vec<MergeHunk> = vec![];
let mut left_it = left_diff.iter();
let mut right_it = right_diff.iter();
let mut left_hunk = left_it.next();
let mut right_hunk = right_it.next();
loop {
match (left_hunk, right_hunk) {
(None, None) => {
break;
}
(Some(diff::Result::Both(left_data_before, left_data_after)), _)
if left_data_before == left_data_after =>
{
// Left unmodified
match right_hunk.unwrap() {
diff::Result::Both(right_data_before, right_data_after) => {
// Left unmodified, right modified
assert_eq!(left_data_before, right_data_before);
hunk.append(&mut right_data_after.to_vec());
left_hunk = left_it.next();
right_hunk = right_it.next();
}
diff::Result::Left(right_data_before) => {
// Left unmodified, right deleted
assert_eq!(left_data_before, right_data_before);
left_hunk = left_it.next();
right_hunk = right_it.next();
}
diff::Result::Right(right_data_after) => {
// Left unmodified, right inserted
hunk.append(&mut right_data_after.to_vec());
right_hunk = right_it.next();
}
}
}
(_, Some(diff::Result::Both(right_data_before, right_data_after)))
if right_data_before == right_data_after =>
{
// Right unmodified
match left_hunk.unwrap() {
diff::Result::Both(left_data_before, left_data_after) => {
// Right unmodified, left modified
assert_eq!(left_data_before, right_data_before);
hunk.append(&mut left_data_after.to_vec());
left_hunk = left_it.next();
right_hunk = right_it.next();
}
diff::Result::Left(left_data_before) => {
// Right unmodified, left deleted
assert_eq!(left_data_before, right_data_before);
left_hunk = left_it.next();
right_hunk = right_it.next();
}
diff::Result::Right(left_data_after) => {
// Right unmodified, left inserted
hunk.append(&mut left_data_after.to_vec());
left_hunk = left_it.next();
}
}
}
(
Some(diff::Result::Left(left_data_before)),
Some(diff::Result::Left(right_data_before)),
) => {
// Both deleted the same
assert_eq!(left_data_before, right_data_before);
left_hunk = left_it.next();
right_hunk = right_it.next();
}
(
Some(diff::Result::Right(left_data_after)),
Some(diff::Result::Right(right_data_after)),
) => {
if left_data_after == right_data_after {
// Both inserted the same
hunk.append(&mut left_data_after.to_vec());
} else {
// Each side inserted different
if !hunk.is_empty() {
hunks.push(MergeHunk::Resolved(hunk));
}
hunks.push(MergeHunk::Conflict {
base: vec![],
left: left_data_after.to_vec(),
right: right_data_after.to_vec(),
});
hunk = vec![];
}
left_hunk = left_it.next();
right_hunk = right_it.next();
}
(Some(diff::Result::Right(left_data_after)), None) => {
// Left inserted at EOF
hunk.append(&mut left_data_after.to_vec());
left_hunk = left_it.next();
}
(None, Some(diff::Result::Right(right_data_after))) => {
// Right inserted at EOF
hunk.append(&mut right_data_after.to_vec());
right_hunk = right_it.next();
}
_ => {
panic!("unhandled merge case: {:?}, {:?}", left_hunk, right_hunk);
}
}
}
if hunks.is_empty() {
MergeResult::Resolved(hunk)
} else {
if !hunk.is_empty() {
hunks.push(MergeHunk::Resolved(hunk));
}
MergeResult::Conflict(hunks)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
assert_eq!(merge(b"", b"", b""), MergeResult::Resolved(b"".to_vec()));
assert_eq!(
merge(b"a", b"a", b"a"),
MergeResult::Resolved(b"a".to_vec())
);
assert_eq!(merge(b"a", b"", b"a"), MergeResult::Resolved(b"".to_vec()));
assert_eq!(merge(b"a", b"a", b""), MergeResult::Resolved(b"".to_vec()));
assert_eq!(merge(b"a", b"", b""), MergeResult::Resolved(b"".to_vec()));
assert_eq!(
merge(b"a", b"a b", b"a"),
MergeResult::Resolved(b"a b".to_vec())
);
assert_eq!(
merge(b"a", b"a", b"a b"),
MergeResult::Resolved(b"a b".to_vec())
);
assert_eq!(
merge(b"a", b"a b", b"a c"),
MergeResult::Conflict(vec![
MergeHunk::Resolved(b"a ".to_vec()),
MergeHunk::Conflict {
base: b"".to_vec(),
left: b"b".to_vec(),
right: b"c".to_vec()
}
])
);
assert_eq!(
merge(b"a", b"b", b"a"),
MergeResult::Resolved(b"b".to_vec())
);
assert_eq!(
merge(b"a", b"a", b"b"),
MergeResult::Resolved(b"b".to_vec())
);
// TODO: It seems like the a->b transition get reported as [Left(a),Right(b)]
// instead of [Both(a,b)], so there is unexpectedly no conflict
// here
assert_eq!(merge(b"a", b"", b"b"), MergeResult::Resolved(b"b".to_vec()));
assert_eq!(merge(b"a", b"b", b""), MergeResult::Resolved(b"b".to_vec()));
assert_eq!(
merge(b"a", b"b", b"c"),
MergeResult::Conflict(vec![MergeHunk::Conflict {
base: b"".to_vec(),
left: b"b".to_vec(),
right: b"c".to_vec()
}])
);
}
}

597
lib/src/git_store.rs Normal file
View file

@ -0,0 +1,597 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Debug, Error, Formatter};
use std::io::Cursor;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Mutex;
use std::time::Duration;
use git2::Oid;
use protobuf::Message;
use crate::repo_path::{DirRepoPath, FileRepoPath};
use crate::store::{
ChangeId, Commit, CommitId, Conflict, ConflictId, ConflictPart, FileId, MillisSinceEpoch,
Signature, Store, StoreError, StoreResult, SymlinkId, Timestamp, Tree, TreeId, TreeValue,
};
const NOTES_REF: &str = "refs/notes/jj/commits";
const NOTES_REF_LOCK: &str = "refs/notes/jj/commits.lock";
const CONFLICT_SUFFIX: &str = ".jjconflict";
impl From<git2::Error> for StoreError {
fn from(err: git2::Error) -> Self {
match err.code() {
git2::ErrorCode::NotFound => StoreError::NotFound,
_other => StoreError::Other(err.to_string()),
}
}
}
pub struct GitStore {
repo: Mutex<git2::Repository>,
empty_tree_id: TreeId,
}
impl GitStore {
pub fn load(path: PathBuf) -> Self {
let repo = Mutex::new(git2::Repository::open(path).unwrap());
let empty_tree_id =
TreeId(hex::decode("4b825dc642cb6eb9a060e54bf8d69288fbee4904").unwrap());
GitStore {
repo,
empty_tree_id,
}
}
}
fn signature_from_git(signature: git2::Signature) -> Signature {
let name = signature.name().unwrap_or("<no name>").to_owned();
let email = signature.email().unwrap_or("<no email>").to_owned();
let timestamp = MillisSinceEpoch((signature.when().seconds() * 1000) as u64);
let tz_offset = signature.when().offset_minutes();
Signature {
name,
email,
timestamp: Timestamp {
timestamp,
tz_offset,
},
}
}
fn signature_to_git(signature: &Signature) -> git2::Signature {
let name = &signature.name;
let email = &signature.email;
let time = git2::Time::new(
(signature.timestamp.timestamp.0 / 1000) as i64,
signature.timestamp.tz_offset,
);
git2::Signature::new(&name, &email, &time).unwrap()
}
fn serialize_note(commit: &Commit) -> String {
let mut proto = protos::store::Commit::new();
proto.is_open = commit.is_open;
proto.is_pruned = commit.is_pruned;
proto.change_id = commit.change_id.0.to_vec();
for predecessor in &commit.predecessors {
proto.predecessors.push(predecessor.0.to_vec());
}
let bytes = proto.write_to_bytes().unwrap();
hex::encode(bytes)
}
fn deserialize_note(commit: &mut Commit, note: &str) {
let bytes = hex::decode(note).unwrap();
let mut cursor = Cursor::new(bytes);
let proto: protos::store::Commit = protobuf::parse_from_reader(&mut cursor).unwrap();
commit.is_open = proto.is_open;
commit.is_pruned = proto.is_pruned;
commit.change_id = ChangeId(proto.change_id);
for predecessor in &proto.predecessors {
commit.predecessors.push(CommitId(predecessor.clone()));
}
}
impl Debug for GitStore {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("GitStore")
.field("path", &self.repo.lock().unwrap().path())
.finish()
}
}
impl Store for GitStore {
fn hash_length(&self) -> usize {
20
}
fn read_file(&self, _path: &FileRepoPath, id: &FileId) -> StoreResult<Box<dyn Read>> {
if id.0.len() != self.hash_length() {
return Err(StoreError::NotFound);
}
let locked_repo = self.repo.lock().unwrap();
let blob = locked_repo
.find_blob(Oid::from_bytes(id.0.as_slice()).unwrap())
.unwrap();
let content = blob.content().to_owned();
Ok(Box::new(Cursor::new(content)))
}
fn write_file(&self, _path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult<FileId> {
let mut bytes = Vec::new();
contents.read_to_end(&mut bytes).unwrap();
let locked_repo = self.repo.lock().unwrap();
let oid = locked_repo.blob(bytes.as_slice()).unwrap();
Ok(FileId(oid.as_bytes().to_vec()))
}
fn read_symlink(&self, _path: &FileRepoPath, id: &SymlinkId) -> Result<String, StoreError> {
if id.0.len() != self.hash_length() {
return Err(StoreError::NotFound);
}
let locked_repo = self.repo.lock().unwrap();
let blob = locked_repo
.find_blob(Oid::from_bytes(id.0.as_slice()).unwrap())
.unwrap();
let target = String::from_utf8(blob.content().to_owned()).unwrap();
Ok(target)
}
fn write_symlink(&self, _path: &FileRepoPath, target: &str) -> Result<SymlinkId, StoreError> {
let locked_repo = self.repo.lock().unwrap();
let oid = locked_repo.blob(target.as_bytes()).unwrap();
Ok(SymlinkId(oid.as_bytes().to_vec()))
}
fn empty_tree_id(&self) -> &TreeId {
&self.empty_tree_id
}
fn read_tree(&self, _path: &DirRepoPath, id: &TreeId) -> StoreResult<Tree> {
if id == &self.empty_tree_id {
return Ok(Tree::default());
}
if id.0.len() != self.hash_length() {
return Err(StoreError::NotFound);
}
let locked_repo = self.repo.lock().unwrap();
let git_tree = locked_repo
.find_tree(Oid::from_bytes(id.0.as_slice()).unwrap())
.unwrap();
let mut tree = Tree::default();
for entry in git_tree.iter() {
let name = entry.name().unwrap();
let (name, value) = match entry.kind().unwrap() {
git2::ObjectType::Tree => {
let id = TreeId(entry.id().as_bytes().to_vec());
(entry.name().unwrap(), TreeValue::Tree(id))
}
git2::ObjectType::Blob => match entry.filemode() {
0o100644 => {
let id = FileId(entry.id().as_bytes().to_vec());
if name.ends_with(CONFLICT_SUFFIX) {
(
&name[0..name.len() - CONFLICT_SUFFIX.len()],
TreeValue::Conflict(ConflictId(entry.id().as_bytes().to_vec())),
)
} else {
(
name,
TreeValue::Normal {
id,
executable: false,
},
)
}
}
0o100755 => {
let id = FileId(entry.id().as_bytes().to_vec());
(
name,
TreeValue::Normal {
id,
executable: true,
},
)
}
0o120000 => {
let id = SymlinkId(entry.id().as_bytes().to_vec());
(name, TreeValue::Symlink(id))
}
mode => panic!("unexpected file mode {:?}", mode),
},
git2::ObjectType::Commit => {
let id = CommitId(entry.id().as_bytes().to_vec());
(name, TreeValue::GitSubmodule(id))
}
kind => panic!("unexpected object type {:?}", kind),
};
tree.set(name.to_string(), value);
}
Ok(tree)
}
fn write_tree(&self, _path: &DirRepoPath, contents: &Tree) -> StoreResult<TreeId> {
let locked_repo = self.repo.lock().unwrap();
let mut builder = locked_repo.treebuilder(None).unwrap();
for entry in contents.entries() {
let name = entry.name().to_owned();
let (name, id, filemode) = match entry.value() {
TreeValue::Normal {
id,
executable: false,
} => (name, &id.0, 0o100644),
TreeValue::Normal {
id,
executable: true,
} => (name, &id.0, 0o100755),
TreeValue::Symlink(id) => (name, &id.0, 0o120000),
TreeValue::Tree(id) => (name, &id.0, 0o040000),
TreeValue::GitSubmodule(id) => (name, &id.0, 0o160000),
TreeValue::Conflict(id) => (name + CONFLICT_SUFFIX, &id.0, 0o100644),
};
builder
.insert(name, Oid::from_bytes(id).unwrap(), filemode)
.unwrap();
}
let oid = builder.write().unwrap();
Ok(TreeId(oid.as_bytes().to_vec()))
}
fn read_commit(&self, id: &CommitId) -> StoreResult<Commit> {
if id.0.len() != self.hash_length() {
return Err(StoreError::NotFound);
}
let locked_repo = self.repo.lock().unwrap();
let git_commit_id = Oid::from_bytes(id.0.as_slice())?;
let commit = locked_repo.find_commit(git_commit_id)?;
let change_id = ChangeId(id.0.clone().as_slice()[0..16].to_vec());
let parents: Vec<_> = commit
.parent_ids()
.map(|oid| CommitId(oid.as_bytes().to_vec()))
.collect();
let tree_id = TreeId(commit.tree_id().as_bytes().to_vec());
let description = commit.message().unwrap_or("<no message>").to_owned();
let author = signature_from_git(commit.author());
let committer = signature_from_git(commit.committer());
let mut commit = Commit {
parents,
predecessors: vec![],
root_tree: tree_id,
change_id,
description,
author,
committer,
is_open: false,
is_pruned: false,
};
let maybe_note = locked_repo.find_note(Some(NOTES_REF), git_commit_id).ok();
if let Some(note) = maybe_note {
deserialize_note(&mut commit, note.message().unwrap());
}
Ok(commit)
}
fn write_commit(&self, contents: &Commit) -> StoreResult<CommitId> {
// TODO: We shouldn't have to create an in-memory index just to write an
// object...
let locked_repo = self.repo.lock().unwrap();
let git_tree = locked_repo.find_tree(Oid::from_bytes(contents.root_tree.0.as_slice())?)?;
let author = signature_to_git(&contents.author);
let committer = signature_to_git(&contents.committer);
let message = &contents.description;
let mut parents = vec![];
for parent_id in &contents.parents {
let parent_git_commit =
locked_repo.find_commit(Oid::from_bytes(parent_id.0.as_slice())?)?;
parents.push(parent_git_commit);
}
let parent_refs: Vec<_> = parents.iter().collect();
let git_id =
locked_repo.commit(None, &author, &committer, &message, &git_tree, &parent_refs)?;
let id = CommitId(git_id.as_bytes().to_vec());
let note = serialize_note(contents);
// TODO: Include the extra commit data in commit headers instead of a ref.
// Unfortunately, it doesn't seem like libgit2-rs supports that. Perhaps
// we'll have to serialize/deserialize the commit data ourselves.
loop {
let note_status = locked_repo.note(
&committer,
&committer,
Some(NOTES_REF),
git_id,
&note,
false,
);
match note_status {
Err(err) if err.message().contains(NOTES_REF_LOCK) => {
// It seems that libgit2 doesn't retry when .git/refs/notes/jj/commits.lock
// already exists.
// TODO: Report this to libgit2.
let retry_delay = Duration::from_millis(10);
std::thread::sleep(retry_delay);
}
Err(err) => {
return Err(StoreError::from(err));
}
Ok(_) => {
break;
}
}
}
Ok(id)
}
fn read_conflict(&self, id: &ConflictId) -> StoreResult<Conflict> {
let mut file = self.read_file(&FileRepoPath::from("unused"), &FileId(id.0.clone()))?;
let mut data = String::new();
file.read_to_string(&mut data)?;
let json: serde_json::Value = serde_json::from_str(&data).unwrap();
Ok(Conflict {
removes: conflict_part_list_from_json(json.get("removes").unwrap()),
adds: conflict_part_list_from_json(json.get("adds").unwrap()),
})
}
fn write_conflict(&self, conflict: &Conflict) -> StoreResult<ConflictId> {
let json = serde_json::json!({
"removes": conflict_part_list_to_json(&conflict.removes),
"adds": conflict_part_list_to_json(&conflict.adds),
});
let json_string = json.to_string();
let mut bytes = json_string.as_bytes();
// TODO: add a ref pointing to it so it won't get GC'd
let file_id = self.write_file(&FileRepoPath::from("unused"), &mut bytes)?;
Ok(ConflictId(file_id.0))
}
}
fn conflict_part_list_to_json(parts: &[ConflictPart]) -> serde_json::Value {
serde_json::Value::Array(parts.iter().map(conflict_part_to_json).collect())
}
fn conflict_part_list_from_json(json: &serde_json::Value) -> Vec<ConflictPart> {
json.as_array()
.unwrap()
.iter()
.map(conflict_part_from_json)
.collect()
}
fn conflict_part_to_json(part: &ConflictPart) -> serde_json::Value {
serde_json::json!({
"value": tree_value_to_json(&part.value),
})
}
fn conflict_part_from_json(json: &serde_json::Value) -> ConflictPart {
let json_value = json.get("value").unwrap();
ConflictPart {
value: tree_value_from_json(json_value),
}
}
fn tree_value_to_json(value: &TreeValue) -> serde_json::Value {
match value {
TreeValue::Normal { id, executable } => serde_json::json!({
"file": {
"id": id.hex(),
"executable": executable,
},
}),
TreeValue::Symlink(id) => serde_json::json!({
"symlink_id": id.hex(),
}),
TreeValue::Tree(id) => serde_json::json!({
"tree_id": id.hex(),
}),
TreeValue::GitSubmodule(id) => serde_json::json!({
"submodule_id": id.hex(),
}),
TreeValue::Conflict(id) => serde_json::json!({
"conflict_id": id.hex(),
}),
}
}
fn tree_value_from_json(json: &serde_json::Value) -> TreeValue {
if let Some(json_file) = json.get("file") {
TreeValue::Normal {
id: FileId(bytes_vec_from_json(json_file.get("id").unwrap())),
executable: json_file.get("executable").unwrap().as_bool().unwrap(),
}
} else if let Some(json_id) = json.get("symlink_id") {
TreeValue::Symlink(SymlinkId(bytes_vec_from_json(json_id)))
} else if let Some(json_id) = json.get("tree_id") {
TreeValue::Tree(TreeId(bytes_vec_from_json(json_id)))
} else if let Some(json_id) = json.get("submodule_id") {
TreeValue::GitSubmodule(CommitId(bytes_vec_from_json(json_id)))
} else if let Some(json_id) = json.get("conflict_id") {
TreeValue::Conflict(ConflictId(bytes_vec_from_json(json_id)))
} else {
panic!("unexpected json value in conflict: {:#?}", json);
}
}
fn bytes_vec_from_json(value: &serde_json::Value) -> Vec<u8> {
hex::decode(value.as_str().unwrap()).unwrap()
}
#[cfg(test)]
mod tests {
use crate::store::{FileId, MillisSinceEpoch};
use super::*;
#[test]
fn read_plain_git_commit() {
let temp_dir = tempfile::tempdir().unwrap();
let git_repo_path = temp_dir.path();
let git_repo = git2::Repository::init(git_repo_path.clone()).unwrap();
// Add a commit with some files in
let blob1 = git_repo.blob(b"content1").unwrap();
let blob2 = git_repo.blob(b"normal").unwrap();
let mut dir_tree_builder = git_repo.treebuilder(None).unwrap();
dir_tree_builder.insert("normal", blob1, 0o100644).unwrap();
dir_tree_builder.insert("symlink", blob2, 0o120000).unwrap();
let dir_tree_id = dir_tree_builder.write().unwrap();
let mut root_tree_builder = git_repo.treebuilder(None).unwrap();
root_tree_builder
.insert("dir", dir_tree_id, 0o040000)
.unwrap();
let root_tree_id = root_tree_builder.write().unwrap();
let git_author = git2::Signature::new(
"git author",
"git.author@example.com",
&git2::Time::new(1000, 60),
)
.unwrap();
let git_committer = git2::Signature::new(
"git committer",
"git.committer@example.com",
&git2::Time::new(2000, -480),
)
.unwrap();
let git_tree = git_repo.find_tree(root_tree_id).unwrap();
let git_commit_id = git_repo
.commit(
None,
&git_author,
&git_committer,
"git commit message",
&git_tree,
&[],
)
.unwrap();
let commit_id = CommitId(git_commit_id.as_bytes().to_vec());
let store = GitStore::load(git_repo_path.to_owned());
let commit = store.read_commit(&commit_id).unwrap();
assert_eq!(
&commit.change_id,
&ChangeId(commit_id.0.as_slice()[0..16].to_vec())
);
assert_eq!(commit.parents, vec![]);
assert_eq!(commit.predecessors, vec![]);
assert_eq!(commit.root_tree.0.as_slice(), root_tree_id.as_bytes());
assert_eq!(commit.is_open, false);
assert_eq!(commit.is_pruned, false);
assert_eq!(commit.description, "git commit message");
assert_eq!(commit.author.name, "git author");
assert_eq!(commit.author.email, "git.author@example.com");
assert_eq!(
commit.author.timestamp.timestamp,
MillisSinceEpoch(1000 * 1000)
);
assert_eq!(commit.author.timestamp.tz_offset, 60);
assert_eq!(commit.committer.name, "git committer");
assert_eq!(commit.committer.email, "git.committer@example.com");
assert_eq!(
commit.committer.timestamp.timestamp,
MillisSinceEpoch(2000 * 1000)
);
assert_eq!(commit.committer.timestamp.tz_offset, -480);
let root_tree = store
.read_tree(
&DirRepoPath::root(),
&TreeId(root_tree_id.as_bytes().to_vec()),
)
.unwrap();
let mut root_entries = root_tree.entries();
let dir = root_entries.next().unwrap();
assert_eq!(root_entries.next(), None);
assert_eq!(dir.name(), "dir");
assert_eq!(
dir.value(),
&TreeValue::Tree(TreeId(dir_tree_id.as_bytes().to_vec()))
);
let dir_tree = store
.read_tree(
&DirRepoPath::from("dir/"),
&TreeId(dir_tree_id.as_bytes().to_vec()),
)
.unwrap();
let mut files = dir_tree.entries();
let normal_file = files.next().unwrap();
let symlink = files.next().unwrap();
assert_eq!(files.next(), None);
assert_eq!(normal_file.name(), "normal");
assert_eq!(
normal_file.value(),
&TreeValue::Normal {
id: FileId(blob1.as_bytes().to_vec()),
executable: false
}
);
assert_eq!(symlink.name(), "symlink");
assert_eq!(
symlink.value(),
&TreeValue::Symlink(SymlinkId(blob2.as_bytes().to_vec()))
);
}
#[test]
fn overlapping_git_commit_id() {
let temp_dir = tempfile::tempdir().unwrap();
let git_repo_path = temp_dir.path();
git2::Repository::init(git_repo_path.clone()).unwrap();
let store = GitStore::load(git_repo_path.to_owned());
let signature = Signature {
name: "Someone".to_string(),
email: "someone@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
};
let commit1 = Commit {
parents: vec![],
predecessors: vec![],
root_tree: store.empty_tree_id().clone(),
change_id: ChangeId(vec![]),
description: "initial".to_string(),
author: signature.clone(),
committer: signature,
is_open: false,
is_pruned: false,
};
let commit_id1 = store.write_commit(&commit1).unwrap();
let mut commit2 = commit1;
commit2.predecessors.push(commit_id1.clone());
let expected_error_message = format!("note for '{}' exists already", commit_id1.hex());
match store.write_commit(&commit2) {
Ok(_) => {
panic!("expectedly successfully wrote two commits with the same git commit object")
}
Err(StoreError::Other(message)) if message.contains(&expected_error_message) => {}
Err(err) => panic!("unexpected error: {:?}", err),
};
}
}

1521
lib/src/index.rs Normal file

File diff suppressed because it is too large Load diff

43
lib/src/lib.rs Normal file
View file

@ -0,0 +1,43 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(get_mut_unchecked)]
pub mod commit;
pub mod commit_builder;
pub mod conflicts;
pub mod dag_walk;
pub mod evolution;
pub mod files;
pub mod git_store;
pub mod index;
pub mod local_store;
pub mod lock;
pub mod matchers;
pub mod op_store;
pub mod operation;
pub mod repo;
pub mod repo_path;
pub mod rewrite;
pub mod settings;
pub mod simple_op_store;
pub mod store;
pub mod store_wrapper;
pub mod testutils;
pub mod transaction;
pub mod tree;
pub mod tree_builder;
pub mod trees;
pub mod view;
pub mod working_copy;

396
lib/src/local_store.rs Normal file
View file

@ -0,0 +1,396 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::io::{ErrorKind, Read};
use std::path::PathBuf;
use blake2::{Blake2b, Digest};
use protobuf::{Message, ProtobufError};
use tempfile::{NamedTempFile, PersistError};
use crate::repo_path::{DirRepoPath, FileRepoPath};
use crate::store::{
ChangeId, Commit, CommitId, Conflict, ConflictId, ConflictPart, FileId, MillisSinceEpoch,
Signature, Store, StoreError, StoreResult, SymlinkId, Timestamp, Tree, TreeId, TreeValue,
};
impl From<std::io::Error> for StoreError {
fn from(err: std::io::Error) -> Self {
StoreError::Other(err.to_string())
}
}
impl From<PersistError> for StoreError {
fn from(err: PersistError) -> Self {
StoreError::Other(err.to_string())
}
}
impl From<ProtobufError> for StoreError {
fn from(err: ProtobufError) -> Self {
StoreError::Other(err.to_string())
}
}
#[derive(Debug)]
pub struct LocalStore {
path: PathBuf,
empty_tree_id: TreeId,
}
impl LocalStore {
pub fn init(store_path: PathBuf) -> Self {
fs::create_dir(store_path.join("commits")).unwrap();
fs::create_dir(store_path.join("trees")).unwrap();
fs::create_dir(store_path.join("files")).unwrap();
fs::create_dir(store_path.join("symlinks")).unwrap();
fs::create_dir(store_path.join("conflicts")).unwrap();
let store = Self::load(store_path);
let empty_tree_id = store
.write_tree(&DirRepoPath::root(), &Tree::default())
.unwrap();
assert_eq!(empty_tree_id, store.empty_tree_id);
store
}
pub fn load(store_path: PathBuf) -> Self {
let empty_tree_id = TreeId(hex::decode("786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce").unwrap());
LocalStore {
path: store_path,
empty_tree_id,
}
}
fn file_path(&self, id: &FileId) -> PathBuf {
self.path.join("files").join(id.hex())
}
fn symlink_path(&self, id: &SymlinkId) -> PathBuf {
self.path.join("symlinks").join(id.hex())
}
fn tree_path(&self, id: &TreeId) -> PathBuf {
self.path.join("trees").join(id.hex())
}
fn commit_path(&self, id: &CommitId) -> PathBuf {
self.path.join("commits").join(id.hex())
}
fn conflict_path(&self, id: &ConflictId) -> PathBuf {
self.path.join("conflicts").join(id.hex())
}
}
fn not_found_to_store_error(err: std::io::Error) -> StoreError {
if err.kind() == ErrorKind::NotFound {
StoreError::NotFound
} else {
StoreError::from(err)
}
}
impl Store for LocalStore {
fn hash_length(&self) -> usize {
64
}
fn read_file(&self, _path: &FileRepoPath, id: &FileId) -> StoreResult<Box<dyn Read>> {
let path = self.file_path(&id);
let file = File::open(path).map_err(not_found_to_store_error)?;
Ok(Box::new(zstd::Decoder::new(file)?))
}
fn write_file(&self, _path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult<FileId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let mut encoder = zstd::Encoder::new(temp_file.as_file(), 0)?;
let mut hasher = Blake2b::new();
loop {
let mut buff: Vec<u8> = Vec::with_capacity(1 << 14);
let bytes_read;
unsafe {
buff.set_len(1 << 14);
bytes_read = contents.read(&mut buff)?;
buff.set_len(bytes_read);
}
if bytes_read == 0 {
break;
}
encoder.write_all(&buff)?;
hasher.input(&buff);
}
encoder.finish()?;
let id = FileId(hasher.result().to_vec());
temp_file.persist(self.file_path(&id))?;
Ok(id)
}
fn read_symlink(&self, _path: &FileRepoPath, id: &SymlinkId) -> Result<String, StoreError> {
let path = self.symlink_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let mut target = String::new();
file.read_to_string(&mut target).unwrap();
Ok(target)
}
fn write_symlink(&self, _path: &FileRepoPath, target: &str) -> Result<SymlinkId, StoreError> {
let mut temp_file = NamedTempFile::new_in(&self.path)?;
temp_file.write_all(target.as_bytes()).unwrap();
let mut hasher = Blake2b::new();
hasher.input(&target.as_bytes());
let id = SymlinkId(hasher.result().to_vec());
temp_file.persist(self.symlink_path(&id))?;
Ok(id)
}
fn empty_tree_id(&self) -> &TreeId {
&self.empty_tree_id
}
fn read_tree(&self, _path: &DirRepoPath, id: &TreeId) -> StoreResult<Tree> {
let path = self.tree_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let proto: protos::store::Tree = protobuf::parse_from_reader(&mut file)?;
Ok(tree_from_proto(&proto))
}
fn write_tree(&self, _path: &DirRepoPath, tree: &Tree) -> StoreResult<TreeId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let proto = tree_to_proto(tree);
let mut proto_bytes: Vec<u8> = Vec::new();
proto.write_to_writer(&mut proto_bytes)?;
temp_file.as_file().write_all(&proto_bytes)?;
let id = TreeId(Blake2b::digest(&proto_bytes).to_vec());
temp_file.persist(self.tree_path(&id))?;
Ok(id)
}
fn read_commit(&self, id: &CommitId) -> StoreResult<Commit> {
let path = self.commit_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let proto: protos::store::Commit = protobuf::parse_from_reader(&mut file)?;
Ok(commit_from_proto(&proto))
}
fn write_commit(&self, commit: &Commit) -> StoreResult<CommitId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let proto = commit_to_proto(commit);
let mut proto_bytes: Vec<u8> = Vec::new();
proto.write_to_writer(&mut proto_bytes)?;
temp_file.as_file().write_all(&proto_bytes)?;
let id = CommitId(Blake2b::digest(&proto_bytes).to_vec());
temp_file.persist(self.commit_path(&id))?;
Ok(id)
}
fn read_conflict(&self, id: &ConflictId) -> StoreResult<Conflict> {
let path = self.conflict_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let proto: protos::store::Conflict = protobuf::parse_from_reader(&mut file)?;
Ok(conflict_from_proto(&proto))
}
fn write_conflict(&self, conflict: &Conflict) -> StoreResult<ConflictId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let proto = conflict_to_proto(conflict);
let mut proto_bytes: Vec<u8> = Vec::new();
proto.write_to_writer(&mut proto_bytes)?;
temp_file.as_file().write_all(&proto_bytes)?;
let id = ConflictId(Blake2b::digest(&proto_bytes).to_vec());
temp_file.persist(self.conflict_path(&id))?;
Ok(id)
}
}
pub fn commit_to_proto(commit: &Commit) -> protos::store::Commit {
let mut proto = protos::store::Commit::new();
for parent in &commit.parents {
proto.parents.push(parent.0.clone());
}
for predecessor in &commit.predecessors {
proto.predecessors.push(predecessor.0.clone());
}
proto.set_root_tree(commit.root_tree.0.clone());
proto.set_change_id(commit.change_id.0.clone());
proto.set_description(commit.description.clone());
proto.set_author(signature_to_proto(&commit.author));
proto.set_committer(signature_to_proto(&commit.committer));
proto.set_is_open(commit.is_open);
proto.set_is_pruned(commit.is_pruned);
proto
}
fn commit_from_proto(proto: &protos::store::Commit) -> Commit {
let commit_id_from_proto = |parent: &Vec<u8>| CommitId(parent.clone());
let parents = proto.parents.iter().map(commit_id_from_proto).collect();
let predecessors = proto
.predecessors
.iter()
.map(commit_id_from_proto)
.collect();
let root_tree = TreeId(proto.root_tree.to_vec());
let change_id = ChangeId(proto.change_id.to_vec());
Commit {
parents,
predecessors,
root_tree,
change_id,
description: proto.description.clone(),
author: signature_from_proto(proto.author.get_ref()),
committer: signature_from_proto(proto.committer.get_ref()),
is_open: proto.is_open,
is_pruned: proto.is_pruned,
}
}
fn tree_to_proto(tree: &Tree) -> protos::store::Tree {
let mut proto = protos::store::Tree::new();
for entry in tree.entries() {
let mut proto_entry = protos::store::Tree_Entry::new();
proto_entry.set_name(entry.name().to_owned());
proto_entry.set_value(tree_value_to_proto(entry.value()));
proto.entries.push(proto_entry);
}
proto
}
fn tree_from_proto(proto: &protos::store::Tree) -> Tree {
let mut tree = Tree::default();
for proto_entry in proto.entries.iter() {
let value = tree_value_from_proto(proto_entry.value.as_ref().unwrap());
tree.set(proto_entry.name.to_string(), value);
}
tree
}
fn tree_value_to_proto(value: &TreeValue) -> protos::store::TreeValue {
let mut proto = protos::store::TreeValue::new();
match value {
TreeValue::Normal { id, executable } => {
let mut file = protos::store::TreeValue_NormalFile::new();
file.set_id(id.0.clone());
file.set_executable(*executable);
proto.set_normal_file(file);
}
TreeValue::Symlink(id) => {
proto.set_symlink_id(id.0.clone());
}
TreeValue::GitSubmodule(_id) => {
panic!("cannot store git submodules");
}
TreeValue::Tree(id) => {
proto.set_tree_id(id.0.clone());
}
TreeValue::Conflict(id) => {
proto.set_conflict_id(id.0.clone());
}
};
proto
}
fn tree_value_from_proto(proto: &protos::store::TreeValue) -> TreeValue {
match proto.value.as_ref().unwrap() {
protos::store::TreeValue_oneof_value::tree_id(id) => TreeValue::Tree(TreeId(id.clone())),
protos::store::TreeValue_oneof_value::normal_file(
protos::store::TreeValue_NormalFile { id, executable, .. },
) => TreeValue::Normal {
id: FileId(id.clone()),
executable: *executable,
},
protos::store::TreeValue_oneof_value::symlink_id(id) => {
TreeValue::Symlink(SymlinkId(id.clone()))
}
protos::store::TreeValue_oneof_value::conflict_id(id) => {
TreeValue::Conflict(ConflictId(id.clone()))
}
}
}
fn signature_to_proto(signature: &Signature) -> protos::store::Commit_Signature {
let mut proto = protos::store::Commit_Signature::new();
proto.set_name(signature.name.clone());
proto.set_email(signature.email.clone());
let mut timestamp_proto = protos::store::Commit_Timestamp::new();
timestamp_proto.set_millis_since_epoch(signature.timestamp.timestamp.0);
timestamp_proto.set_tz_offset(signature.timestamp.tz_offset);
proto.set_timestamp(timestamp_proto);
proto
}
fn signature_from_proto(proto: &protos::store::Commit_Signature) -> Signature {
let timestamp = proto.get_timestamp();
Signature {
name: proto.name.clone(),
email: proto.email.clone(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(timestamp.millis_since_epoch),
tz_offset: timestamp.tz_offset,
},
}
}
fn conflict_to_proto(conflict: &Conflict) -> protos::store::Conflict {
let mut proto = protos::store::Conflict::new();
for part in &conflict.adds {
proto.adds.push(conflict_part_to_proto(part));
}
for part in &conflict.removes {
proto.removes.push(conflict_part_to_proto(part));
}
proto
}
fn conflict_from_proto(proto: &protos::store::Conflict) -> Conflict {
let mut conflict = Conflict::default();
for part in &proto.removes {
conflict.removes.push(conflict_part_from_proto(part))
}
for part in &proto.adds {
conflict.adds.push(conflict_part_from_proto(part))
}
conflict
}
fn conflict_part_from_proto(proto: &protos::store::Conflict_Part) -> ConflictPart {
ConflictPart {
value: tree_value_from_proto(proto.content.as_ref().unwrap()),
}
}
fn conflict_part_to_proto(part: &ConflictPart) -> protos::store::Conflict_Part {
let mut proto = protos::store::Conflict_Part::new();
proto.set_content(tree_value_to_proto(&part.value));
proto
}

108
lib/src/lock.rs Normal file
View file

@ -0,0 +1,108 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::{File, OpenOptions};
use std::path::PathBuf;
use std::time::Duration;
pub struct FileLock {
path: PathBuf,
_file: File,
}
impl FileLock {
pub fn lock(path: PathBuf) -> FileLock {
let mut options = OpenOptions::new();
options.create_new(true);
options.write(true);
let retry_delay = Duration::from_millis(10);
loop {
match options.open(&path) {
Ok(file) => return FileLock { path, _file: file },
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
std::thread::sleep(retry_delay);
}
Err(err) => panic!(
"failed to create lock file {}: {}",
path.to_string_lossy(),
err
),
}
}
}
}
impl Drop for FileLock {
fn drop(&mut self) {
std::fs::remove_file(&self.path).expect("failed to delete lock file");
}
}
#[cfg(test)]
mod tests {
use std::env;
use std::thread;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use super::*;
#[test]
fn lock_basic() {
let number: u32 = rand::random();
let lock_path = env::temp_dir().join(format!("test-{}.lock", number));
assert!(!lock_path.exists());
{
let _lock = FileLock::lock(lock_path.clone());
assert!(lock_path.exists());
}
assert!(!lock_path.exists());
}
#[test]
fn lock_concurrent() {
let number: u32 = rand::random();
let data_path = env::temp_dir().join(format!("test-{}", number));
let lock_path = env::temp_dir().join(format!("test-{}.lock", number));
let mut data_file = OpenOptions::new()
.create(true)
.write(true)
.open(data_path.clone())
.unwrap();
data_file.write_u32::<LittleEndian>(0).unwrap();
let mut threads = vec![];
for _ in 0..100 {
let data_path = data_path.clone();
let lock_path = lock_path.clone();
let handle = thread::spawn(move || {
let _lock = FileLock::lock(lock_path);
let mut data_file = OpenOptions::new()
.read(true)
.open(data_path.clone())
.unwrap();
let value = data_file.read_u32::<LittleEndian>().unwrap();
thread::sleep(Duration::from_millis(1));
let mut data_file = OpenOptions::new().write(true).open(data_path).unwrap();
data_file.write_u32::<LittleEndian>(value + 1).unwrap();
});
threads.push(handle);
}
for thread in threads {
thread.join().ok().unwrap();
}
let mut data_file = OpenOptions::new().read(true).open(data_path).unwrap();
let value = data_file.read_u32::<LittleEndian>().unwrap();
assert_eq!(value, 100);
}
}

233
lib/src/matchers.rs Normal file
View file

@ -0,0 +1,233 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use std::collections::HashMap;
use std::collections::HashSet;
use crate::repo_path::DirRepoPath;
use crate::repo_path::DirRepoPathComponent;
use crate::repo_path::FileRepoPath;
use crate::repo_path::FileRepoPathComponent;
#[derive(PartialEq, Eq, Debug)]
pub struct Visit<'a> {
dirs: VisitDirs<'a>,
files: VisitFiles<'a>,
}
#[derive(PartialEq, Eq, Debug)]
pub enum VisitDirs<'a> {
All,
Set(&'a HashSet<DirRepoPathComponent>),
}
#[derive(PartialEq, Eq, Debug)]
pub enum VisitFiles<'a> {
All,
Set(&'a HashSet<FileRepoPathComponent>),
}
pub trait Matcher {
fn matches(&self, file: &FileRepoPath) -> bool;
fn visit(&self, dir: &DirRepoPath) -> Visit;
}
#[derive(PartialEq, Eq, Debug)]
pub struct AlwaysMatcher;
impl Matcher for AlwaysMatcher {
fn matches(&self, _file: &FileRepoPath) -> bool {
true
}
fn visit(&self, _dir: &DirRepoPath) -> Visit {
Visit {
dirs: VisitDirs::All,
files: VisitFiles::All,
}
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct FilesMatcher {
files: HashSet<FileRepoPath>,
dirs: Dirs,
}
impl FilesMatcher {
fn new(files: HashSet<FileRepoPath>) -> Self {
let mut dirs = Dirs::new();
for f in &files {
dirs.add_file(f);
}
FilesMatcher { files, dirs }
}
}
impl Matcher for FilesMatcher {
fn matches(&self, file: &FileRepoPath) -> bool {
self.files.contains(file)
}
fn visit(&self, dir: &DirRepoPath) -> Visit {
let dirs = self.dirs.get_dirs(dir);
let files = self.dirs.get_files(dir);
Visit {
dirs: VisitDirs::Set(dirs),
files: VisitFiles::Set(files),
}
}
}
/// Keeps track of which subdirectories and files of each directory need to be
/// visited.
#[derive(PartialEq, Eq, Debug)]
struct Dirs {
dirs: HashMap<DirRepoPath, HashSet<DirRepoPathComponent>>,
files: HashMap<DirRepoPath, HashSet<FileRepoPathComponent>>,
empty_dirs: HashSet<DirRepoPathComponent>,
empty_files: HashSet<FileRepoPathComponent>,
}
impl Dirs {
fn new() -> Self {
Dirs {
dirs: HashMap::new(),
files: HashMap::new(),
empty_dirs: HashSet::new(),
empty_files: HashSet::new(),
}
}
fn add_dir(&mut self, mut dir: DirRepoPath) {
let mut maybe_child = None;
loop {
let was_present = self.dirs.contains_key(&dir);
let children = self.dirs.entry(dir.clone()).or_default();
if let Some(child) = maybe_child {
children.insert(child);
}
if was_present {
break;
}
match dir.split() {
None => break,
Some((new_dir, new_child)) => {
dir = new_dir;
maybe_child = Some(new_child);
}
};
}
}
fn add_file(&mut self, file: &FileRepoPath) {
let (dir, basename) = file.split();
self.add_dir(dir.clone());
self.files
.entry(dir.clone())
.or_default()
.insert(basename.clone());
}
fn get_dirs(&self, dir: &DirRepoPath) -> &HashSet<DirRepoPathComponent> {
self.dirs.get(&dir).unwrap_or(&self.empty_dirs)
}
fn get_files(&self, dir: &DirRepoPath) -> &HashSet<FileRepoPathComponent> {
self.files.get(&dir).unwrap_or(&self.empty_files)
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use crate::repo_path::DirRepoPath;
use crate::repo_path::DirRepoPathComponent;
use crate::repo_path::FileRepoPath;
use crate::repo_path::FileRepoPathComponent;
#[test]
fn dirs_empty() {
let dirs = Dirs::new();
assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &HashSet::new());
}
#[test]
fn dirs_root() {
let mut dirs = Dirs::new();
dirs.add_dir(DirRepoPath::root());
assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &HashSet::new());
}
#[test]
fn dirs_dir() {
let mut dirs = Dirs::new();
dirs.add_dir(DirRepoPath::from("dir/"));
let mut expected_root_dirs = HashSet::new();
expected_root_dirs.insert(DirRepoPathComponent::from("dir"));
assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &expected_root_dirs);
}
#[test]
fn dirs_file() {
let mut dirs = Dirs::new();
dirs.add_file(&FileRepoPath::from("dir/file"));
let mut expected_root_dirs = HashSet::new();
expected_root_dirs.insert(DirRepoPathComponent::from("dir"));
assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &expected_root_dirs);
assert_eq!(dirs.get_files(&DirRepoPath::root()), &HashSet::new());
}
#[test]
fn filesmatcher_empty() {
let m = FilesMatcher::new(HashSet::new());
assert_eq!(m.matches(&FileRepoPath::from("file")), false);
assert_eq!(m.matches(&FileRepoPath::from("dir/file")), false);
assert_eq!(
m.visit(&DirRepoPath::root()),
Visit {
dirs: VisitDirs::Set(&HashSet::new()),
files: VisitFiles::Set(&HashSet::new()),
}
);
}
#[test]
fn filesmatcher_nonempty() {
let mut files = HashSet::new();
files.insert(FileRepoPath::from("dir1/subdir1/file1"));
files.insert(FileRepoPath::from("dir1/subdir1/file2"));
files.insert(FileRepoPath::from("dir1/subdir2/file3"));
files.insert(FileRepoPath::from("file4"));
let m = FilesMatcher::new(files);
let expected_root_files = vec![FileRepoPathComponent::from("file4")]
.into_iter()
.collect();
let expected_root_dirs = vec![DirRepoPathComponent::from("dir1")]
.into_iter()
.collect();
assert_eq!(
m.visit(&DirRepoPath::root()),
Visit {
dirs: VisitDirs::Set(&expected_root_dirs),
files: VisitFiles::Set(&expected_root_files),
}
);
}
}

131
lib/src/op_store.rs Normal file
View file

@ -0,0 +1,131 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::store::{CommitId, Timestamp};
use std::collections::HashSet;
use std::fmt::{Debug, Error, Formatter};
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct ViewId(pub Vec<u8>);
impl Debug for ViewId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("ViewId").field(&self.hex()).finish()
}
}
impl ViewId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct OperationId(pub Vec<u8>);
impl Debug for OperationId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("OperationId").field(&self.hex()).finish()
}
}
impl OperationId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
/// Represents the way the repo looks at a given time, just like how a Tree
/// object represents how the file system looks at a given time.
#[derive(Clone)]
pub struct View {
/// All head commits
pub head_ids: HashSet<CommitId>,
// The commit that *should be* checked out in the (default) working copy. Note that the
// working copy (.jj/working_copy/) has the source of truth about which commit *is* checked out
// (to be precise: the commit to which we most recently completed a checkout to).
// TODO: Allow multiple working copies
pub checkout: CommitId,
}
impl View {
pub fn new(checkout: CommitId) -> Self {
Self {
head_ids: HashSet::new(),
checkout,
}
}
}
/// Represents an operation (transaction) on the repo view, just like how a
/// Commit object represents an operation on the tree.
///
/// Operations and views are not meant to be exchanged between repos or users;
/// they represent local state and history.
///
/// The operation history will almost always be linear. It will only have
/// forks when parallel operations occurred. The parent is determined when
/// the transaction starts. When the transaction commits, a lock will be
/// taken and it will be checked that the current head of the operation
/// graph is unchanged. If the current head has changed, there has been
/// concurrent operation.
#[derive(Clone)]
pub struct Operation {
pub view_id: ViewId,
pub parents: Vec<OperationId>,
pub metadata: OperationMetadata,
}
#[derive(Clone)]
pub struct OperationMetadata {
pub start_time: Timestamp,
pub end_time: Timestamp,
// Whatever is useful to the user, such as exact command line call
pub description: String,
pub hostname: String,
pub username: String,
}
impl OperationMetadata {
pub fn new(description: String) -> Self {
let timestamp = Timestamp::now();
let hostname = whoami::hostname();
let username = whoami::username();
OperationMetadata {
start_time: timestamp.clone(),
end_time: timestamp,
description,
hostname,
username,
}
}
}
#[derive(Debug)]
pub enum OpStoreError {
NotFound,
Other(String),
}
pub type OpStoreResult<T> = Result<T, OpStoreError>;
pub trait OpStore: Send + Sync + Debug {
fn read_view(&self, id: &ViewId) -> OpStoreResult<View>;
fn write_view(&self, contents: &View) -> OpStoreResult<ViewId>;
fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation>;
fn write_operation(&self, contents: &Operation) -> OpStoreResult<OperationId>;
}

162
lib/src/operation.rs Normal file
View file

@ -0,0 +1,162 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::op_store;
use crate::op_store::{OpStore, OperationId, ViewId};
use crate::store::CommitId;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt::{Debug, Error, Formatter};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
#[derive(Clone)]
pub struct Operation {
op_store: Arc<dyn OpStore>,
id: OperationId,
data: op_store::Operation,
}
impl Debug for Operation {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("Operation").field("id", &self.id).finish()
}
}
impl PartialEq for Operation {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for Operation {}
impl Ord for Operation {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for Operation {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.id.cmp(&other.id))
}
}
impl Hash for Operation {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl Operation {
pub fn new(op_store: Arc<dyn OpStore>, id: OperationId, data: op_store::Operation) -> Self {
Operation { op_store, id, data }
}
pub fn op_store(&self) -> Arc<dyn OpStore> {
self.op_store.clone()
}
pub fn id(&self) -> &OperationId {
&self.id
}
pub fn parents(&self) -> Vec<Operation> {
let mut parents = Vec::new();
for parent_id in &self.data.parents {
let data = self.op_store.read_operation(parent_id).unwrap();
parents.push(Operation::new(
self.op_store.clone(),
parent_id.clone(),
data,
));
}
parents
}
pub fn view(&self) -> View {
let data = self.op_store.read_view(&self.data.view_id).unwrap();
View::new(self.op_store.clone(), self.data.view_id.clone(), data)
}
pub fn store_operation(&self) -> &op_store::Operation {
&self.data
}
}
#[derive(Clone)]
pub struct View {
op_store: Arc<dyn OpStore>,
id: ViewId,
data: op_store::View,
}
impl Debug for View {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("View").field("id", &self.id).finish()
}
}
impl PartialEq for View {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for View {}
impl Ord for View {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for View {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.id.cmp(&other.id))
}
}
impl Hash for View {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl View {
pub fn new(op_store: Arc<dyn OpStore>, id: ViewId, data: op_store::View) -> Self {
View { op_store, id, data }
}
pub fn op_store(&self) -> Arc<dyn OpStore> {
self.op_store.clone()
}
pub fn id(&self) -> &ViewId {
&self.id
}
pub fn store_view(&self) -> &op_store::View {
&self.data
}
pub fn take_store_view(self) -> op_store::View {
self.data
}
pub fn heads(&self) -> &HashSet<CommitId> {
&self.data.head_ids
}
}

308
lib/src/repo.rs Normal file
View file

@ -0,0 +1,308 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Debug, Formatter};
use std::fs;
use std::fs::File;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::sync::{Arc, Mutex, MutexGuard};
use thiserror::Error;
use crate::commit_builder::{new_change_id, signature};
use crate::evolution::{Evolution, ReadonlyEvolution};
use crate::git_store::GitStore;
use crate::index::Index;
use crate::local_store::LocalStore;
use crate::operation::Operation;
use crate::settings::{RepoSettings, UserSettings};
use crate::store;
use crate::store::{Store, StoreError};
use crate::store_wrapper::StoreWrapper;
use crate::transaction::Transaction;
use crate::view::{ReadonlyView, View};
use crate::working_copy::WorkingCopy;
#[derive(Debug, Error, PartialEq, Eq)]
pub enum RepoError {
#[error("Object not found")]
NotFound,
#[error("Error: {0}")]
Other(String),
}
impl From<StoreError> for RepoError {
fn from(err: StoreError) -> Self {
match err {
StoreError::NotFound => RepoError::NotFound,
StoreError::Other(description) => RepoError::Other(description),
}
}
}
pub type RepoResult<T> = Result<T, RepoError>;
pub trait Repo: Sync {
fn store(&self) -> &Arc<StoreWrapper>;
fn view(&self) -> &dyn View;
fn evolution(&self) -> &dyn Evolution;
}
pub struct ReadonlyRepo {
repo_path: PathBuf,
wc_path: PathBuf,
store: Arc<StoreWrapper>,
settings: RepoSettings,
index: Mutex<Option<Arc<Index<'static>>>>,
working_copy: Arc<Mutex<WorkingCopy>>,
view: ReadonlyView,
evolution: Option<ReadonlyEvolution<'static>>,
}
impl Debug for ReadonlyRepo {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
f.debug_struct("Repo")
.field("repo_path", &self.repo_path)
.field("wc_path", &self.wc_path)
.field("store", &self.store)
.finish()
}
}
impl ReadonlyRepo {
pub fn init_local(settings: &UserSettings, wc_path: PathBuf) -> Arc<ReadonlyRepo> {
let repo_path = wc_path.join(".jj");
fs::create_dir(repo_path.clone()).unwrap();
let store_path = repo_path.join("store");
fs::create_dir(&store_path).unwrap();
let store = Box::new(LocalStore::init(store_path));
ReadonlyRepo::init(settings, repo_path, wc_path, store)
}
pub fn init_git(
settings: &UserSettings,
wc_path: PathBuf,
git_store_path: PathBuf,
) -> Arc<ReadonlyRepo> {
let repo_path = wc_path.join(".jj");
fs::create_dir(repo_path.clone()).unwrap();
let store_path = repo_path.join("store");
let git_store_path = fs::canonicalize(git_store_path).unwrap();
let mut store_file = File::create(store_path).unwrap();
store_file
.write_all((String::from("git: ") + git_store_path.to_str().unwrap()).as_bytes())
.unwrap();
let store = Box::new(GitStore::load(git_store_path));
ReadonlyRepo::init(settings, repo_path, wc_path, store)
}
fn init(
user_settings: &UserSettings,
repo_path: PathBuf,
wc_path: PathBuf,
store: Box<dyn Store>,
) -> Arc<ReadonlyRepo> {
let repo_settings = user_settings.with_repo(&repo_path).unwrap();
let store = StoreWrapper::new(store);
fs::create_dir(repo_path.join("working_copy")).unwrap();
let working_copy = WorkingCopy::init(store.clone(), repo_path.join("working_copy"));
fs::create_dir(repo_path.join("view")).unwrap();
let signature = signature(user_settings);
let checkout_commit = store::Commit {
parents: vec![],
predecessors: vec![],
root_tree: store.empty_tree_id().clone(),
change_id: new_change_id(),
description: "".to_string(),
author: signature.clone(),
committer: signature,
is_open: true,
is_pruned: false,
};
let checkout_commit = store.write_commit(checkout_commit);
let view = ReadonlyView::init(
store.clone(),
repo_path.join("view"),
checkout_commit.id().clone(),
);
let repo = ReadonlyRepo {
repo_path: repo_path.clone(),
wc_path,
store,
settings: repo_settings,
index: Mutex::new(None),
working_copy: Arc::new(Mutex::new(working_copy)),
view,
evolution: None,
};
let mut repo = Arc::new(repo);
let repo_ref: &ReadonlyRepo = repo.as_ref();
let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) };
fs::create_dir(repo_path.join("index")).unwrap();
Index::init(repo_path.join("index"));
let evolution = ReadonlyEvolution::new(static_lifetime_repo);
ReadonlyRepo::init_cycles(&mut repo, evolution);
repo.working_copy_locked()
.check_out(&repo, checkout_commit)
.expect("failed to check out root commit");
repo
}
pub fn load(user_settings: &UserSettings, wc_path: PathBuf) -> Arc<ReadonlyRepo> {
let repo_path = wc_path.join(".jj");
let store_path = repo_path.join("store");
let store: Box<dyn Store>;
if store_path.is_dir() {
store = Box::new(LocalStore::load(store_path));
} else {
let mut store_file = File::open(store_path).unwrap();
let mut buf = Vec::new();
store_file.read_to_end(&mut buf).unwrap();
let contents = String::from_utf8(buf).unwrap();
assert!(contents.starts_with("git: "));
let git_store_path_str = contents[5..].to_string();
let git_store_path = PathBuf::from(git_store_path_str);
store = Box::new(GitStore::load(git_store_path));
}
let store = StoreWrapper::new(store);
let repo_settings = user_settings.with_repo(&repo_path).unwrap();
let working_copy = WorkingCopy::load(store.clone(), repo_path.join("working_copy"));
let view = ReadonlyView::load(store.clone(), repo_path.join("view"));
let repo = ReadonlyRepo {
repo_path,
wc_path,
store,
settings: repo_settings,
index: Mutex::new(None),
working_copy: Arc::new(Mutex::new(working_copy)),
view,
evolution: None,
};
let mut repo = Arc::new(repo);
let repo_ref: &ReadonlyRepo = repo.as_ref();
let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) };
let evolution = ReadonlyEvolution::new(static_lifetime_repo);
ReadonlyRepo::init_cycles(&mut repo, evolution);
repo
}
fn init_cycles(mut repo: &mut Arc<ReadonlyRepo>, evolution: ReadonlyEvolution<'static>) {
let mut repo_ref_mut = Arc::get_mut(&mut repo).unwrap();
repo_ref_mut.evolution = Some(evolution);
}
pub fn repo_path(&self) -> &PathBuf {
&self.repo_path
}
pub fn working_copy_path(&self) -> &PathBuf {
&self.wc_path
}
pub fn index<'r>(&'r self) -> Arc<Index<'r>> {
let mut locked_index = self.index.lock().unwrap();
if locked_index.is_none() {
let repo_ref: &ReadonlyRepo = self;
let op_id = self.view.base_op_head_id().clone();
let static_lifetime_repo: &'static ReadonlyRepo =
unsafe { std::mem::transmute(repo_ref) };
locked_index.replace(Arc::new(Index::load(
static_lifetime_repo,
self.repo_path.join("index"),
op_id,
)));
}
let index: Arc<Index<'static>> = locked_index.as_ref().unwrap().clone();
// cast to lifetime of self
let index: Arc<Index<'r>> = unsafe { std::mem::transmute(index) };
index
}
pub fn reindex(&mut self) -> Arc<Index> {
Index::reinit(self.repo_path.join("index"));
{
let mut locked_index = self.index.lock().unwrap();
locked_index.take();
}
self.index()
}
pub fn working_copy(&self) -> &Arc<Mutex<WorkingCopy>> {
&self.working_copy
}
pub fn working_copy_locked(&self) -> MutexGuard<WorkingCopy> {
self.working_copy.as_ref().lock().unwrap()
}
pub fn store(&self) -> &Arc<StoreWrapper> {
&self.store
}
pub fn settings(&self) -> &RepoSettings {
&self.settings
}
pub fn start_transaction(&self, description: &str) -> Transaction {
Transaction::new(
&self,
&self.view,
&self.evolution.as_ref().unwrap(),
description,
)
}
pub fn reload(&mut self) {
self.view.reload();
let repo_ref: &ReadonlyRepo = self;
let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) };
{
let mut locked_index = self.index.lock().unwrap();
locked_index.take();
}
self.evolution = Some(ReadonlyEvolution::new(static_lifetime_repo));
}
pub fn reload_at(&mut self, operation: &Operation) {
self.view.reload_at(operation);
let repo_ref: &ReadonlyRepo = self;
let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) };
{
let mut locked_index = self.index.lock().unwrap();
locked_index.take();
}
self.evolution = Some(ReadonlyEvolution::new(static_lifetime_repo));
}
}
impl Repo for ReadonlyRepo {
fn store(&self) -> &Arc<StoreWrapper> {
&self.store
}
fn view(&self) -> &dyn View {
&self.view
}
fn evolution(&self) -> &dyn Evolution {
self.evolution.as_ref().unwrap()
}
}

518
lib/src/repo_path.rs Normal file
View file

@ -0,0 +1,518 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Debug, Error, Formatter};
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct RepoPathComponent {
value: String,
}
impl RepoPathComponent {
pub fn value(&self) -> &str {
&self.value
}
}
impl From<&str> for RepoPathComponent {
fn from(value: &str) -> Self {
assert!(!value.contains('/'));
RepoPathComponent {
value: value.to_owned(),
}
}
}
// Does not include a trailing slash
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct DirRepoPathComponent {
value: String,
}
impl DirRepoPathComponent {
pub fn value(&self) -> &str {
&self.value
}
}
impl From<&str> for DirRepoPathComponent {
fn from(value: &str) -> Self {
assert!(!value.contains('/'));
DirRepoPathComponent {
value: value.to_owned(),
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct FileRepoPathComponent {
value: String,
}
impl FileRepoPathComponent {
pub fn value(&self) -> &str {
&self.value
}
}
impl From<&str> for FileRepoPathComponent {
fn from(value: &str) -> Self {
assert!(!value.contains('/'));
assert!(!value.is_empty());
FileRepoPathComponent {
value: value.to_owned(),
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RepoPath {
dir: DirRepoPath,
basename: RepoPathComponent,
}
impl Debug for RepoPath {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.write_fmt(format_args!("{:?}", &self.to_internal_string()))
}
}
impl RepoPath {
pub fn root() -> Self {
RepoPath {
dir: DirRepoPath::root(),
basename: RepoPathComponent {
value: String::from(""),
},
}
}
pub fn new(dir: DirRepoPath, basename: RepoPathComponent) -> Self {
RepoPath { dir, basename }
}
/// The full string form used internally, not for presenting to users (where
/// we may want to use the platform's separator).
pub fn to_internal_string(&self) -> String {
self.dir.to_internal_string() + self.basename.value()
}
pub fn to_file_repo_path(&self) -> FileRepoPath {
FileRepoPath {
dir: self.dir.clone(),
basename: FileRepoPathComponent {
value: self.basename.value.clone(),
},
}
}
pub fn to_dir_repo_path(&self) -> DirRepoPath {
if self.is_root() {
DirRepoPath::root()
} else {
self.dir.join(&DirRepoPathComponent {
value: self.basename.value.clone(),
})
}
}
pub fn is_root(&self) -> bool {
self.dir.is_root() && self.basename.value.is_empty()
}
pub fn dir(&self) -> Option<&DirRepoPath> {
if self.is_root() {
None
} else {
Some(&self.dir)
}
}
pub fn split(&self) -> Option<(&DirRepoPath, &RepoPathComponent)> {
if self.is_root() {
None
} else {
Some((&self.dir, &self.basename))
}
}
}
impl From<&str> for RepoPath {
fn from(value: &str) -> Self {
assert!(!value.ends_with('/'));
match value.rfind('/') {
None => RepoPath {
dir: DirRepoPath::root(),
basename: RepoPathComponent::from(value),
},
Some(i) => RepoPath {
dir: DirRepoPath::from(&value[..=i]),
basename: RepoPathComponent::from(&value[i + 1..]),
},
}
}
}
// Includes a trailing slash
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DirRepoPath {
value: Vec<DirRepoPathComponent>,
}
impl Debug for DirRepoPath {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.write_fmt(format_args!("{:?}", &self.to_internal_string()))
}
}
impl DirRepoPath {
pub fn root() -> Self {
DirRepoPath { value: Vec::new() }
}
pub fn is_root(&self) -> bool {
return self.components().is_empty();
}
/// The full string form used internally, not for presenting to users (where
/// we may want to use the platform's separator).
pub fn to_internal_string(&self) -> String {
let mut result = String::new();
for component in &self.value {
result.push_str(component.value());
result.push('/');
}
result
}
pub fn contains_dir(&self, other: &DirRepoPath) -> bool {
other.value.starts_with(&self.value)
}
pub fn contains_file(&self, other: &FileRepoPath) -> bool {
other.dir.value.starts_with(&self.value)
}
// TODO: consider making this return a Option<DirRepoPathSlice> or similar,
// where the slice would borrow from this instance.
pub fn parent(&self) -> Option<DirRepoPath> {
match self.value.len() {
0 => None,
n => Some(DirRepoPath {
value: self.value[..n - 1].to_vec(),
}),
}
}
pub fn split(&self) -> Option<(DirRepoPath, DirRepoPathComponent)> {
match self.value.len() {
0 => None,
n => Some((
DirRepoPath {
value: self.value[..n - 1].to_vec(),
},
self.value[n - 1].clone(),
)),
}
}
pub fn components(&self) -> &Vec<DirRepoPathComponent> {
&self.value
}
}
impl From<&str> for DirRepoPath {
fn from(value: &str) -> Self {
assert!(value.is_empty() || value.ends_with('/'));
let mut parts: Vec<&str> = value.split('/').collect();
// remove the trailing empty string
parts.pop();
DirRepoPath {
value: parts
.iter()
.map(|x| DirRepoPathComponent {
value: x.to_string(),
})
.collect(),
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct FileRepoPath {
dir: DirRepoPath,
basename: FileRepoPathComponent,
}
impl Debug for FileRepoPath {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.write_fmt(format_args!("{:?}", &self.to_internal_string()))
}
}
impl FileRepoPath {
/// The full string form used internally, not for presenting to users (where
/// we may want to use the platform's separator).
pub fn to_internal_string(&self) -> String {
self.dir.to_internal_string() + self.basename.value()
}
pub fn dir(&self) -> &DirRepoPath {
&self.dir
}
pub fn split(&self) -> (&DirRepoPath, &FileRepoPathComponent) {
(&self.dir, &self.basename)
}
pub fn to_repo_path(&self) -> RepoPath {
RepoPath {
dir: self.dir.clone(),
basename: RepoPathComponent {
value: self.basename.value.clone(),
},
}
}
}
impl From<&str> for FileRepoPath {
fn from(value: &str) -> Self {
assert!(!value.ends_with('/'));
match value.rfind('/') {
None => FileRepoPath {
dir: DirRepoPath::root(),
basename: FileRepoPathComponent::from(value),
},
Some(i) => FileRepoPath {
dir: DirRepoPath::from(&value[..=i]),
basename: FileRepoPathComponent::from(&value[i + 1..]),
},
}
}
}
pub trait RepoPathJoin<T> {
type Result;
fn join(&self, entry: &T) -> Self::Result;
}
impl RepoPathJoin<DirRepoPathComponent> for DirRepoPath {
type Result = DirRepoPath;
fn join(&self, entry: &DirRepoPathComponent) -> DirRepoPath {
let mut new_dir = self.value.clone();
new_dir.push(entry.clone());
DirRepoPath { value: new_dir }
}
}
impl RepoPathJoin<FileRepoPathComponent> for DirRepoPath {
type Result = FileRepoPath;
fn join(&self, entry: &FileRepoPathComponent) -> FileRepoPath {
FileRepoPath {
dir: self.clone(),
basename: entry.clone(),
}
}
}
impl RepoPathJoin<RepoPathComponent> for DirRepoPath {
type Result = RepoPath;
fn join(&self, entry: &RepoPathComponent) -> RepoPath {
RepoPath {
dir: self.clone(),
basename: entry.clone(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_root() {
assert_eq!(RepoPath::root().is_root(), true);
assert_eq!(RepoPath::from("").is_root(), true);
assert_eq!(RepoPath::from("foo").is_root(), false);
assert_eq!(DirRepoPath::root().is_root(), true);
assert_eq!(DirRepoPath::from("").is_root(), true);
assert_eq!(DirRepoPath::from("foo/").is_root(), false);
}
#[test]
fn value() {
assert_eq!(RepoPath::root().to_internal_string(), "");
assert_eq!(RepoPath::from("dir").to_internal_string(), "dir");
assert_eq!(RepoPath::from("file").to_internal_string(), "file");
assert_eq!(RepoPath::from("dir/file").to_internal_string(), "dir/file");
assert_eq!(DirRepoPath::root().to_internal_string(), "");
assert_eq!(DirRepoPath::from("dir/").to_internal_string(), "dir/");
assert_eq!(
DirRepoPath::from("dir/subdir/").to_internal_string(),
"dir/subdir/"
);
assert_eq!(FileRepoPath::from("file").to_internal_string(), "file");
assert_eq!(
FileRepoPath::from("dir/file").to_internal_string(),
"dir/file"
);
}
#[test]
fn order() {
assert_eq!(DirRepoPath::root() < DirRepoPath::from("dir/"), true);
assert_eq!(DirRepoPath::from("dir/") < DirRepoPath::from("dirx/"), true);
// '#' < '/'
assert_eq!(DirRepoPath::from("dir/") < DirRepoPath::from("dir#/"), true);
assert_eq!(
DirRepoPath::from("dir/") < DirRepoPath::from("dir/sub/"),
true
);
assert_eq!(
FileRepoPath::from("abc") < FileRepoPath::from("dir/file"),
true
);
assert_eq!(
FileRepoPath::from("dir") < FileRepoPath::from("dir/file"),
true
);
assert_eq!(
FileRepoPath::from("dis") < FileRepoPath::from("dir/file"),
true
);
assert_eq!(
FileRepoPath::from("xyz") < FileRepoPath::from("dir/file"),
true
);
assert_eq!(
FileRepoPath::from("dir1/xyz") < FileRepoPath::from("dir2/abc"),
true
);
}
#[test]
fn join() {
let root = DirRepoPath::root();
let dir_component = DirRepoPathComponent::from("dir");
let subdir_component = DirRepoPathComponent::from("subdir");
let file_component = FileRepoPathComponent::from("file");
assert_eq!(root.join(&file_component), FileRepoPath::from("file"));
let dir = root.join(&dir_component);
assert_eq!(dir, DirRepoPath::from("dir/"));
assert_eq!(dir.join(&file_component), FileRepoPath::from("dir/file"));
let subdir = dir.join(&subdir_component);
assert_eq!(subdir, DirRepoPath::from("dir/subdir/"));
assert_eq!(
subdir.join(&file_component),
FileRepoPath::from("dir/subdir/file")
);
}
#[test]
fn parent() {
let root = DirRepoPath::root();
let dir_component = DirRepoPathComponent::from("dir");
let subdir_component = DirRepoPathComponent::from("subdir");
let dir = root.join(&dir_component);
let subdir = dir.join(&subdir_component);
assert_eq!(root.parent(), None);
assert_eq!(dir.parent(), Some(root));
assert_eq!(subdir.parent(), Some(dir));
}
#[test]
fn split_dir() {
let root = DirRepoPath::root();
let dir_component = DirRepoPathComponent::from("dir");
let subdir_component = DirRepoPathComponent::from("subdir");
let dir = root.join(&dir_component);
let subdir = dir.join(&subdir_component);
assert_eq!(root.split(), None);
assert_eq!(dir.split(), Some((root, dir_component)));
assert_eq!(subdir.split(), Some((dir, subdir_component)));
}
#[test]
fn split_file() {
let root = DirRepoPath::root();
let dir_component = DirRepoPathComponent::from("dir");
let file_component = FileRepoPathComponent::from("file");
let dir = root.join(&dir_component);
assert_eq!(
root.join(&file_component).split(),
(&root, &file_component.clone())
);
assert_eq!(dir.join(&file_component).split(), (&dir, &file_component));
}
#[test]
fn dir() {
let root = DirRepoPath::root();
let dir_component = DirRepoPathComponent::from("dir");
let file_component = FileRepoPathComponent::from("file");
let dir = root.join(&dir_component);
assert_eq!(root.join(&file_component).dir(), &root);
assert_eq!(dir.join(&file_component).dir(), &dir);
}
#[test]
fn components() {
assert_eq!(DirRepoPath::root().components(), &vec![]);
assert_eq!(
DirRepoPath::from("dir/").components(),
&vec![DirRepoPathComponent::from("dir")]
);
assert_eq!(
DirRepoPath::from("dir/subdir/").components(),
&vec![
DirRepoPathComponent::from("dir"),
DirRepoPathComponent::from("subdir")
]
);
}
#[test]
fn convert() {
assert_eq!(RepoPath::root().to_dir_repo_path(), DirRepoPath::root());
assert_eq!(
RepoPath::from("dir").to_dir_repo_path(),
DirRepoPath::from("dir/")
);
assert_eq!(
RepoPath::from("dir/subdir").to_dir_repo_path(),
DirRepoPath::from("dir/subdir/")
);
assert_eq!(
RepoPath::from("file").to_file_repo_path(),
FileRepoPath::from("file")
);
assert_eq!(
RepoPath::from("dir/file").to_file_repo_path(),
FileRepoPath::from("dir/file")
);
}
}

86
lib/src/rewrite.rs Normal file
View file

@ -0,0 +1,86 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::commit::Commit;
use crate::commit_builder::CommitBuilder;
use crate::dag_walk::common_ancestor;
use crate::repo_path::DirRepoPath;
use crate::settings::UserSettings;
use crate::store_wrapper::StoreWrapper;
use crate::transaction::Transaction;
use crate::tree::Tree;
use crate::trees::merge_trees;
pub fn merge_commit_trees(store: &StoreWrapper, commits: &[Commit]) -> Tree {
if commits.is_empty() {
store
.get_tree(&DirRepoPath::root(), store.empty_tree_id())
.unwrap()
} else {
let mut new_tree = commits[0].tree();
for (i, other_commit) in commits.iter().enumerate().skip(1) {
let ancestor = common_ancestor(&commits[0..i], vec![other_commit]);
let new_tree_id =
merge_trees(&new_tree, &ancestor.tree(), &other_commit.tree()).unwrap();
new_tree = store.get_tree(&DirRepoPath::root(), &new_tree_id).unwrap();
}
new_tree
}
}
pub fn rebase_commit(
settings: &UserSettings,
tx: &mut Transaction,
old_commit: &Commit,
new_parents: &[Commit],
) -> Commit {
let store = tx.store();
let old_base_tree = merge_commit_trees(store, &old_commit.parents());
let new_base_tree = merge_commit_trees(store, &new_parents);
// TODO: pass in labels for the merge parts
let new_tree_id = merge_trees(&new_base_tree, &old_base_tree, &old_commit.tree()).unwrap();
let new_parent_ids = new_parents
.iter()
.map(|commit| commit.id().clone())
.collect();
CommitBuilder::for_rewrite_from(settings, store, &old_commit)
.set_parents(new_parent_ids)
.set_tree(new_tree_id)
.write_to_transaction(tx)
}
pub fn back_out_commit(
settings: &UserSettings,
tx: &mut Transaction,
old_commit: &Commit,
new_parents: &[Commit],
) -> Commit {
let store = tx.store();
let old_base_tree = merge_commit_trees(store, &old_commit.parents());
let new_base_tree = merge_commit_trees(store, &new_parents);
// TODO: pass in labels for the merge parts
let new_tree_id = merge_trees(&new_base_tree, &old_commit.tree(), &old_base_tree).unwrap();
let new_parent_ids = new_parents
.iter()
.map(|commit| commit.id().clone())
.collect();
// TODO: i18n the description based on repo language
CommitBuilder::for_new_commit(settings, store, new_tree_id)
.set_parents(new_parent_ids)
.set_description(format!(
"backout of commit {}",
hex::encode(&old_commit.id().0)
))
.write_to_transaction(tx)
}

70
lib/src/settings.rs Normal file
View file

@ -0,0 +1,70 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
#[derive(Debug, Clone)]
pub struct UserSettings {
config: config::Config,
}
#[derive(Debug, Clone)]
pub struct RepoSettings {
config: config::Config,
}
impl UserSettings {
pub fn from_config(config: config::Config) -> Self {
UserSettings { config }
}
pub fn for_user() -> Result<Self, config::ConfigError> {
let mut config = config::Config::new();
if let Some(home_dir) = dirs::home_dir() {
config.merge(
config::File::from(home_dir.join(".jjconfig"))
.required(false)
.format(config::FileFormat::Toml),
)?;
}
Ok(UserSettings { config })
}
pub fn with_repo(&self, repo_path: &PathBuf) -> Result<RepoSettings, config::ConfigError> {
let mut config = self.config.clone();
config.merge(
config::File::from(repo_path.join("config"))
.required(false)
.format(config::FileFormat::Toml),
)?;
Ok(RepoSettings { config })
}
pub fn user_name(&self) -> String {
self.config.get_str("user.name").expect("no user.name set")
}
pub fn user_email(&self) -> String {
self.config
.get_str("user.email")
.expect("no user.email set")
}
pub fn config(&self) -> &config::Config {
&self.config
}
}

208
lib/src/simple_op_store.rs Normal file
View file

@ -0,0 +1,208 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::fs;
use std::fs::File;
use std::io::ErrorKind;
use std::io::Write;
use std::path::PathBuf;
use blake2::{Blake2b, Digest};
use protobuf::{Message, ProtobufError};
use tempfile::{NamedTempFile, PersistError};
use crate::op_store::{
OpStore, OpStoreError, OpStoreResult, Operation, OperationId, OperationMetadata, View, ViewId,
};
use crate::store::{CommitId, MillisSinceEpoch, Timestamp};
impl From<std::io::Error> for OpStoreError {
fn from(err: std::io::Error) -> Self {
OpStoreError::Other(err.to_string())
}
}
impl From<PersistError> for OpStoreError {
fn from(err: PersistError) -> Self {
OpStoreError::Other(err.to_string())
}
}
impl From<ProtobufError> for OpStoreError {
fn from(err: ProtobufError) -> Self {
OpStoreError::Other(err.to_string())
}
}
#[derive(Debug)]
pub struct SimpleOpStore {
path: PathBuf,
}
impl SimpleOpStore {
pub fn init(store_path: PathBuf) -> Self {
fs::create_dir(store_path.join("views")).unwrap();
fs::create_dir(store_path.join("operations")).unwrap();
Self::load(store_path)
}
pub fn load(store_path: PathBuf) -> Self {
SimpleOpStore { path: store_path }
}
fn view_path(&self, id: &ViewId) -> PathBuf {
self.path.join("views").join(id.hex())
}
fn operation_path(&self, id: &OperationId) -> PathBuf {
self.path.join("operations").join(id.hex())
}
}
fn not_found_to_store_error(err: std::io::Error) -> OpStoreError {
if err.kind() == ErrorKind::NotFound {
OpStoreError::NotFound
} else {
OpStoreError::from(err)
}
}
impl OpStore for SimpleOpStore {
fn read_view(&self, id: &ViewId) -> OpStoreResult<View> {
let path = self.view_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let proto: protos::op_store::View = protobuf::parse_from_reader(&mut file)?;
Ok(view_from_proto(&proto))
}
fn write_view(&self, view: &View) -> OpStoreResult<ViewId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let proto = view_to_proto(view);
let mut proto_bytes: Vec<u8> = Vec::new();
proto.write_to_writer(&mut proto_bytes)?;
temp_file.as_file().write_all(&proto_bytes)?;
let id = ViewId(Blake2b::digest(&proto_bytes).to_vec());
temp_file.persist(self.view_path(&id))?;
Ok(id)
}
fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
let path = self.operation_path(&id);
let mut file = File::open(path).map_err(not_found_to_store_error)?;
let proto: protos::op_store::Operation = protobuf::parse_from_reader(&mut file)?;
Ok(operation_from_proto(&proto))
}
fn write_operation(&self, operation: &Operation) -> OpStoreResult<OperationId> {
let temp_file = NamedTempFile::new_in(&self.path)?;
let proto = operation_to_proto(operation);
let mut proto_bytes: Vec<u8> = Vec::new();
proto.write_to_writer(&mut proto_bytes)?;
temp_file.as_file().write_all(&proto_bytes)?;
let id = OperationId(Blake2b::digest(&proto_bytes).to_vec());
temp_file.persist(self.operation_path(&id))?;
Ok(id)
}
}
fn timestamp_to_proto(timestamp: &Timestamp) -> protos::op_store::Timestamp {
let mut proto = protos::op_store::Timestamp::new();
proto.set_millis_since_epoch(timestamp.timestamp.0);
proto.set_tz_offset(timestamp.tz_offset);
proto
}
fn timestamp_from_proto(proto: &protos::op_store::Timestamp) -> Timestamp {
Timestamp {
timestamp: MillisSinceEpoch(proto.millis_since_epoch),
tz_offset: proto.tz_offset,
}
}
fn operation_metadata_to_proto(
metadata: &OperationMetadata,
) -> protos::op_store::OperationMetadata {
let mut proto = protos::op_store::OperationMetadata::new();
proto.set_start_time(timestamp_to_proto(&metadata.start_time));
proto.set_end_time(timestamp_to_proto(&metadata.end_time));
proto.set_description(metadata.description.clone());
proto.set_hostname(metadata.hostname.clone());
proto.set_username(metadata.username.clone());
proto
}
fn operation_metadata_from_proto(proto: &protos::op_store::OperationMetadata) -> OperationMetadata {
let start_time = timestamp_from_proto(proto.get_start_time());
let end_time = timestamp_from_proto(proto.get_end_time());
let description = proto.get_description().to_owned();
let hostname = proto.get_hostname().to_owned();
let username = proto.get_username().to_owned();
OperationMetadata {
start_time,
end_time,
description,
hostname,
username,
}
}
fn operation_to_proto(operation: &Operation) -> protos::op_store::Operation {
let mut proto = protos::op_store::Operation::new();
proto.set_view_id(operation.view_id.0.clone());
for parent in &operation.parents {
proto.parents.push(parent.0.clone());
}
proto.set_metadata(operation_metadata_to_proto(&operation.metadata));
proto
}
fn operation_from_proto(proto: &protos::op_store::Operation) -> Operation {
let operation_id_from_proto = |parent: &Vec<u8>| OperationId(parent.clone());
let parents = proto.parents.iter().map(operation_id_from_proto).collect();
let view_id = ViewId(proto.view_id.to_vec());
let metadata = operation_metadata_from_proto(proto.get_metadata());
Operation {
view_id,
parents,
metadata,
}
}
fn view_to_proto(view: &View) -> protos::op_store::View {
let mut proto = protos::op_store::View::new();
proto.checkout = view.checkout.0.clone();
for head_id in &view.head_ids {
proto.head_ids.push(head_id.0.clone());
}
proto
}
fn view_from_proto(proto: &protos::op_store::View) -> View {
let mut view = View::new(CommitId(proto.checkout.clone()));
for head_id_bytes in proto.head_ids.iter() {
view.head_ids.insert(CommitId(head_id_bytes.to_vec()));
}
view
}

354
lib/src/store.rs Normal file
View file

@ -0,0 +1,354 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::fmt::{Debug, Error, Formatter};
use std::io::Read;
use std::result::Result;
use std::vec::Vec;
use crate::repo_path::DirRepoPath;
use crate::repo_path::FileRepoPath;
use std::borrow::Borrow;
use thiserror::Error;
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct CommitId(pub Vec<u8>);
impl Debug for CommitId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("CommitId").field(&self.hex()).finish()
}
}
impl CommitId {
pub fn from_hex(hex: &str) -> Self {
CommitId(hex::decode(hex).unwrap())
}
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct ChangeId(pub Vec<u8>);
impl Debug for ChangeId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("ChangeId").field(&self.hex()).finish()
}
}
impl ChangeId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct TreeId(pub Vec<u8>);
impl Debug for TreeId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("TreeId").field(&self.hex()).finish()
}
}
impl TreeId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct FileId(pub Vec<u8>);
impl Debug for FileId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("FileId").field(&self.hex()).finish()
}
}
impl FileId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct SymlinkId(pub Vec<u8>);
impl Debug for SymlinkId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("SymlinkId").field(&self.hex()).finish()
}
}
impl SymlinkId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
pub struct ConflictId(pub Vec<u8>);
impl Debug for ConflictId {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_tuple("ConflictId").field(&self.hex()).finish()
}
}
impl ConflictId {
pub fn hex(&self) -> String {
hex::encode(&self.0)
}
}
pub enum Phase {
Public,
Draft,
}
#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub struct MillisSinceEpoch(pub u64);
#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub struct Timestamp {
pub timestamp: MillisSinceEpoch,
// time zone offset in minutes
pub tz_offset: i32,
}
impl Timestamp {
pub fn now() -> Self {
let now = chrono::offset::Local::now();
Self {
timestamp: MillisSinceEpoch(now.timestamp_millis() as u64),
tz_offset: now.offset().local_minus_utc() / 60,
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Signature {
pub name: String,
pub email: String,
pub timestamp: Timestamp,
}
#[derive(Debug, Clone)]
pub struct Commit {
pub parents: Vec<CommitId>,
pub predecessors: Vec<CommitId>,
pub root_tree: TreeId,
pub change_id: ChangeId,
pub description: String,
pub author: Signature,
pub committer: Signature,
pub is_open: bool,
pub is_pruned: bool,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct ConflictPart {
// TODO: Store e.g. CommitId here too? Labels (theirs/ours/base)? Would those still be
// useful e.g. after rebasing this conflict?
pub value: TreeValue,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Conflict {
// A conflict is represented by a list of positive and negative states that need to be applied.
// In a simple 3-way merge of B and C with merge base A, the conflict will be { add: [B, C],
// remove: [A] }. Also note that a conflict of the form { add: [A], remove: [] } is the
// same as non-conflict A.
pub removes: Vec<ConflictPart>,
pub adds: Vec<ConflictPart>,
}
impl Conflict {
// Returns (left,base,right) if this conflict is a 3-way conflict
pub fn to_three_way(
&self,
) -> Option<(
Option<ConflictPart>,
Option<ConflictPart>,
Option<ConflictPart>,
)> {
if self.removes.len() == 1 && self.adds.len() == 2 {
// Regular (modify/modify) 3-way conflict
Some((
Some(self.adds[0].clone()),
Some(self.removes[0].clone()),
Some(self.adds[1].clone()),
))
} else if self.removes.is_empty() && self.adds.len() == 2 {
// Add/add conflict
Some((Some(self.adds[0].clone()), None, Some(self.adds[1].clone())))
} else if self.removes.len() == 1 && self.adds.len() == 1 {
// Modify/delete conflict
Some((
Some(self.adds[0].clone()),
Some(self.removes[0].clone()),
None,
))
} else {
None
}
}
}
impl Default for Conflict {
fn default() -> Self {
Conflict {
removes: Default::default(),
adds: Default::default(),
}
}
}
#[derive(Debug, Error, PartialEq, Eq)]
pub enum StoreError {
#[error("Object not found")]
NotFound,
#[error("Error: {0}")]
Other(String),
}
pub type StoreResult<T> = Result<T, StoreError>;
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub enum TreeValue {
Normal { id: FileId, executable: bool },
Symlink(SymlinkId),
Tree(TreeId),
GitSubmodule(CommitId),
Conflict(ConflictId),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct TreeEntry<'a> {
name: &'a str,
value: &'a TreeValue,
}
impl<'a> TreeEntry<'a> {
pub fn new(name: &'a str, value: &'a TreeValue) -> Self {
TreeEntry { name, value }
}
pub fn name(&self) -> &'a str {
&self.name
}
pub fn value(&self) -> &'a TreeValue {
&self.value
}
}
pub struct TreeEntriesIter<'a> {
iter: std::collections::btree_map::Iter<'a, String, TreeValue>,
}
impl<'a> Iterator for TreeEntriesIter<'a> {
type Item = TreeEntry<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(name, value)| TreeEntry { name, value })
}
}
#[derive(Debug, Clone)]
pub struct Tree {
entries: BTreeMap<String, TreeValue>,
}
impl Default for Tree {
fn default() -> Self {
Self {
entries: BTreeMap::new(),
}
}
}
impl Tree {
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn entries(&self) -> TreeEntriesIter {
TreeEntriesIter {
iter: self.entries.iter(),
}
}
pub fn set(&mut self, name: String, value: TreeValue) {
self.entries.insert(name, value);
}
pub fn remove<N>(&mut self, name: &N)
where
N: Borrow<str> + ?Sized,
{
self.entries.remove(name.borrow());
}
pub fn entry<N>(&self, name: &N) -> Option<TreeEntry>
where
N: Borrow<str> + ?Sized,
{
self.entries
.get_key_value(name.borrow())
.map(|(name, value)| TreeEntry { name, value })
}
pub fn value<N>(&self, name: &N) -> Option<&TreeValue>
where
N: Borrow<str> + ?Sized,
{
self.entries.get(name.borrow())
}
}
pub trait Store: Send + Sync + Debug {
fn hash_length(&self) -> usize;
fn read_file(&self, path: &FileRepoPath, id: &FileId) -> StoreResult<Box<dyn Read>>;
fn write_file(&self, path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult<FileId>;
fn read_symlink(&self, path: &FileRepoPath, id: &SymlinkId) -> StoreResult<String>;
fn write_symlink(&self, path: &FileRepoPath, target: &str) -> StoreResult<SymlinkId>;
fn empty_tree_id(&self) -> &TreeId;
fn read_tree(&self, path: &DirRepoPath, id: &TreeId) -> StoreResult<Tree>;
fn write_tree(&self, path: &DirRepoPath, contents: &Tree) -> StoreResult<TreeId>;
fn read_commit(&self, id: &CommitId) -> StoreResult<Commit>;
fn write_commit(&self, contents: &Commit) -> StoreResult<CommitId>;
// TODO: Pass in the paths here too even though they are unused, just like for
// files and trees?
fn read_conflict(&self, id: &ConflictId) -> StoreResult<Conflict>;
fn write_conflict(&self, contents: &Conflict) -> StoreResult<ConflictId>;
}

199
lib/src/store_wrapper.rs Normal file
View file

@ -0,0 +1,199 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use crate::commit::Commit;
use crate::repo_path::{DirRepoPath, FileRepoPath};
use crate::store;
use crate::store::{
ChangeId, CommitId, Conflict, ConflictId, FileId, MillisSinceEpoch, Signature, Store,
StoreResult, SymlinkId, Timestamp, TreeId,
};
use crate::tree::Tree;
use crate::tree_builder::TreeBuilder;
use std::io::Read;
/// Wraps the low-level store and makes it return more convenient types. Also
/// adds the root commit and adds caching.
/// TODO: Come up with a better name, possibly by renaming the current Store
/// trait to something else.
#[derive(Debug)]
pub struct StoreWrapper {
weak_self: Option<Weak<StoreWrapper>>,
store: Box<dyn Store>,
root_commit_id: CommitId,
commit_cache: RwLock<HashMap<CommitId, Arc<store::Commit>>>,
tree_cache: RwLock<HashMap<(DirRepoPath, TreeId), Arc<store::Tree>>>,
}
impl StoreWrapper {
pub fn new(store: Box<dyn Store>) -> Arc<Self> {
let root_commit_id = CommitId(vec![0; store.hash_length()]);
let mut wrapper = Arc::new(StoreWrapper {
weak_self: None,
store,
root_commit_id,
commit_cache: Default::default(),
tree_cache: Default::default(),
});
let weak_self = Arc::downgrade(&wrapper);
let mut ref_mut = unsafe { Arc::get_mut_unchecked(&mut wrapper) };
ref_mut.weak_self = Some(weak_self);
wrapper
}
pub fn hash_length(&self) -> usize {
self.store.hash_length()
}
pub fn empty_tree_id(&self) -> &TreeId {
self.store.empty_tree_id()
}
pub fn root_commit_id(&self) -> &CommitId {
&self.root_commit_id
}
pub fn root_commit(&self) -> Commit {
self.get_commit(&self.root_commit_id).unwrap()
}
pub fn get_commit(&self, id: &CommitId) -> StoreResult<Commit> {
let data = self.get_store_commit(id)?;
Ok(Commit::new(
self.weak_self.as_ref().unwrap().upgrade().unwrap(),
id.clone(),
data,
))
}
fn make_root_commit(&self) -> store::Commit {
let timestamp = Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
};
let signature = Signature {
name: String::new(),
email: String::new(),
timestamp,
};
let change_id = ChangeId(vec![0; 16]);
store::Commit {
parents: vec![],
predecessors: vec![],
root_tree: self.store.empty_tree_id().clone(),
change_id,
description: String::new(),
author: signature.clone(),
committer: signature,
is_open: false,
is_pruned: false,
}
}
fn get_store_commit(&self, id: &CommitId) -> StoreResult<Arc<store::Commit>> {
{
let read_locked_cached = self.commit_cache.read().unwrap();
if let Some(data) = read_locked_cached.get(id).cloned() {
return Ok(data);
}
}
let commit = if id == self.root_commit_id() {
self.make_root_commit()
} else {
self.store.read_commit(id)?
};
let data = Arc::new(commit);
let mut write_locked_cache = self.commit_cache.write().unwrap();
write_locked_cache.insert(id.clone(), data.clone());
Ok(data)
}
pub fn write_commit(&self, commit: store::Commit) -> Commit {
let commit_id = self.store.write_commit(&commit).unwrap();
let data = Arc::new(commit);
{
let mut write_locked_cache = self.commit_cache.write().unwrap();
write_locked_cache.insert(commit_id.clone(), data.clone());
}
let commit = Commit::new(
self.weak_self.as_ref().unwrap().upgrade().unwrap(),
commit_id,
data,
);
commit
}
pub fn get_tree(&self, dir: &DirRepoPath, id: &TreeId) -> StoreResult<Tree> {
let data = self.get_store_tree(dir, id)?;
Ok(Tree::new(
self.weak_self.as_ref().unwrap().upgrade().unwrap(),
dir.clone(),
id.clone(),
data,
))
}
fn get_store_tree(&self, dir: &DirRepoPath, id: &TreeId) -> StoreResult<Arc<store::Tree>> {
let key = (dir.clone(), id.clone());
{
let read_locked_cache = self.tree_cache.read().unwrap();
if let Some(data) = read_locked_cache.get(&key).cloned() {
return Ok(data);
}
}
let data = Arc::new(self.store.read_tree(dir, id)?);
let mut write_locked_cache = self.tree_cache.write().unwrap();
write_locked_cache.insert(key, data.clone());
Ok(data)
}
pub fn write_tree(&self, path: &DirRepoPath, contents: &store::Tree) -> StoreResult<TreeId> {
// TODO: This should also do caching like write_commit does.
self.store.write_tree(path, contents)
}
pub fn read_file(&self, path: &FileRepoPath, id: &FileId) -> StoreResult<Box<dyn Read>> {
self.store.read_file(path, id)
}
pub fn write_file(&self, path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult<FileId> {
self.store.write_file(path, contents)
}
pub fn read_symlink(&self, path: &FileRepoPath, id: &SymlinkId) -> StoreResult<String> {
self.store.read_symlink(path, id)
}
pub fn write_symlink(&self, path: &FileRepoPath, contents: &str) -> StoreResult<SymlinkId> {
self.store.write_symlink(path, contents)
}
pub fn read_conflict(&self, id: &ConflictId) -> StoreResult<Conflict> {
self.store.read_conflict(id)
}
pub fn write_conflict(&self, contents: &Conflict) -> StoreResult<ConflictId> {
self.store.write_conflict(contents)
}
pub fn tree_builder(&self, base_tree_id: TreeId) -> TreeBuilder {
TreeBuilder::new(
self.weak_self.as_ref().unwrap().upgrade().unwrap(),
base_tree_id,
)
}
}

123
lib/src/testutils.rs Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
use std::sync::Arc;
use tempfile::TempDir;
use crate::commit_builder::CommitBuilder;
use crate::repo::ReadonlyRepo;
use crate::repo_path::{DirRepoPath, FileRepoPath};
use crate::settings::UserSettings;
use crate::store::{FileId, TreeId, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::tree::Tree;
use crate::tree_builder::TreeBuilder;
pub fn user_settings() -> UserSettings {
let mut config = config::Config::new();
config.set("user.name", "Test User").unwrap();
config.set("user.email", "test.user@example.com").unwrap();
UserSettings::from_config(config)
}
pub fn init_repo(settings: &UserSettings, use_git: bool) -> (TempDir, Arc<ReadonlyRepo>) {
let temp_dir = tempfile::tempdir().unwrap();
let wc_path = temp_dir.path().join("repo");
fs::create_dir(&wc_path).unwrap();
let repo = if use_git {
let git_path = temp_dir.path().join("git-repo");
git2::Repository::init(&git_path).unwrap();
ReadonlyRepo::init_git(&settings, wc_path, git_path)
} else {
ReadonlyRepo::init_local(&settings, wc_path)
};
(temp_dir, repo)
}
pub fn write_file(store: &StoreWrapper, path: &FileRepoPath, contents: &str) -> FileId {
store.write_file(path, &mut contents.as_bytes()).unwrap()
}
pub fn write_normal_file(tree_builder: &mut TreeBuilder, path: &FileRepoPath, contents: &str) {
let id = write_file(tree_builder.repo(), path, contents);
tree_builder.set(
path.to_repo_path(),
TreeValue::Normal {
id,
executable: false,
},
);
}
pub fn write_executable_file(tree_builder: &mut TreeBuilder, path: &FileRepoPath, contents: &str) {
let id = write_file(tree_builder.repo(), path, contents);
tree_builder.set(
path.to_repo_path(),
TreeValue::Normal {
id,
executable: true,
},
);
}
pub fn write_symlink(tree_builder: &mut TreeBuilder, path: &FileRepoPath, target: &str) {
let id = tree_builder.repo().write_symlink(path, target).unwrap();
tree_builder.set(path.to_repo_path(), TreeValue::Symlink(id));
}
pub fn create_tree(repo: &ReadonlyRepo, path_contents: &[(&FileRepoPath, &str)]) -> Tree {
let store = repo.store();
let mut tree_builder = store.tree_builder(store.empty_tree_id().clone());
for (path, contents) in path_contents {
write_normal_file(&mut tree_builder, path, contents);
}
let id = tree_builder.write_tree();
store.get_tree(&DirRepoPath::root(), &id).unwrap()
}
#[must_use]
pub fn create_random_tree(repo: &ReadonlyRepo) -> TreeId {
let mut tree_builder = repo
.store()
.tree_builder(repo.store().empty_tree_id().clone());
let number = rand::random::<u32>();
let path = FileRepoPath::from(format!("file{}", number).as_str());
write_normal_file(&mut tree_builder, &path, "contents");
tree_builder.write_tree()
}
#[must_use]
pub fn create_random_commit(settings: &UserSettings, repo: &ReadonlyRepo) -> CommitBuilder {
let tree_id = create_random_tree(repo);
let number = rand::random::<u32>();
CommitBuilder::for_new_commit(settings, repo.store(), tree_id)
.set_description(format!("random commit {}", number))
}
pub fn write_working_copy_file(repo: &ReadonlyRepo, path: &FileRepoPath, contents: &str) {
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(repo.working_copy_path().join(path.to_internal_string()))
.unwrap();
file.write_all(contents.as_bytes()).unwrap();
}

226
lib/src/transaction.rs Normal file
View file

@ -0,0 +1,226 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::commit::Commit;
use crate::commit_builder::CommitBuilder;
use crate::conflicts;
use crate::evolution::{Evolution, MutableEvolution, ReadonlyEvolution};
use crate::op_store;
use crate::operation::Operation;
use crate::repo::{ReadonlyRepo, Repo};
use crate::settings::UserSettings;
use crate::store;
use crate::store::{CommitId, Timestamp, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::view::{MutableView, ReadonlyView, View};
use std::io::Cursor;
use std::ops::Deref;
use std::sync::Arc;
pub struct Transaction<'r> {
repo: Option<Arc<MutableRepo<'r>>>,
description: String,
start_time: Timestamp,
closed: bool,
}
pub struct MutableRepo<'r> {
repo: &'r ReadonlyRepo,
view: Option<MutableView>,
evolution: Option<MutableEvolution<'static, 'static>>,
}
impl<'r> Transaction<'r> {
pub fn new(
repo: &'r ReadonlyRepo,
view: &ReadonlyView,
evolution: &ReadonlyEvolution<'r>,
description: &str,
) -> Transaction<'r> {
let mut_view = view.start_modification();
let internal = Arc::new(MutableRepo {
repo,
view: Some(mut_view),
evolution: None,
});
let repo_ref: &MutableRepo = internal.as_ref();
let static_lifetime_repo: &'static MutableRepo = unsafe { std::mem::transmute(repo_ref) };
let mut tx = Transaction {
repo: Some(internal),
description: description.to_owned(),
start_time: Timestamp::now(),
closed: false,
};
let mut_evolution: MutableEvolution<'_, '_> =
evolution.start_modification(static_lifetime_repo);
let static_lifetime_mut_evolution: MutableEvolution<'static, 'static> =
unsafe { std::mem::transmute(mut_evolution) };
Arc::get_mut(tx.repo.as_mut().unwrap()).unwrap().evolution =
Some(static_lifetime_mut_evolution);
tx
}
pub fn base_repo(&self) -> &'r ReadonlyRepo {
self.repo.as_ref().unwrap().repo
}
pub fn store(&self) -> &Arc<StoreWrapper> {
self.repo.as_ref().unwrap().repo.store()
}
pub fn as_repo<'a: 'r>(&'a self) -> &(impl Repo + 'a) {
self.repo.as_ref().unwrap().deref()
}
pub fn as_repo_mut(&mut self) -> &mut MutableRepo<'r> {
Arc::get_mut(self.repo.as_mut().unwrap()).unwrap()
}
pub fn write_commit(&mut self, commit: store::Commit) -> Commit {
let commit = self
.repo
.as_ref()
.unwrap()
.repo
.store()
.write_commit(commit);
self.add_head(&commit);
commit
}
pub fn check_out(&mut self, settings: &UserSettings, commit: &Commit) -> Commit {
let current_checkout_id = self.as_repo().view().checkout().clone();
let current_checkout = self.store().get_commit(&current_checkout_id).unwrap();
assert!(current_checkout.is_open(), "current checkout is closed");
if current_checkout.is_empty()
&& !(current_checkout.is_pruned()
|| self.as_repo().evolution().is_obsolete(&current_checkout_id))
{
// Prune the checkout we're leaving if it's empty.
// TODO: Also prune it if the only changes are conflicts that got materialized.
CommitBuilder::for_rewrite_from(settings, self.store(), &current_checkout)
.set_pruned(true)
.write_to_transaction(self);
}
let store = self.store();
// Create a new tree with any conflicts resolved.
let mut tree_builder = store.tree_builder(commit.tree().id().clone());
for (path, conflict_id) in commit.tree().conflicts() {
let conflict = store.read_conflict(&conflict_id).unwrap();
let mut buf = vec![];
conflicts::materialize_conflict(store, &path, &conflict, &mut buf);
let file_id = store
.write_file(&path.to_file_repo_path(), &mut Cursor::new(&buf))
.unwrap();
tree_builder.set(
path,
TreeValue::Normal {
id: file_id,
executable: false,
},
);
}
let tree_id = tree_builder.write_tree();
let open_commit;
if !commit.is_open() {
// If the commit is closed, create a new open commit on top
open_commit = CommitBuilder::for_open_commit(
settings,
self.store(),
commit.id().clone(),
tree_id,
)
.write_to_transaction(self);
} else if &tree_id != commit.tree().id() {
// If the commit is open but had conflicts, create a successor with the
// conflicts materialized.
open_commit = CommitBuilder::for_rewrite_from(settings, self.store(), commit)
.set_tree(tree_id)
.write_to_transaction(self);
} else {
// Otherwise the commit was open and didn't have any conflicts, so just use
// that commit as is.
open_commit = commit.clone();
}
let id = open_commit.id().clone();
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.view.as_mut().unwrap().set_checkout(id);
open_commit
}
pub fn set_checkout(&mut self, id: CommitId) {
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.view.as_mut().unwrap().set_checkout(id);
}
pub fn add_head(&mut self, head: &Commit) {
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.view.as_mut().unwrap().add_head(head);
mut_repo.evolution.as_mut().unwrap().invalidate();
}
pub fn remove_head(&mut self, head: &Commit) {
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.view.as_mut().unwrap().remove_head(head);
mut_repo.evolution.as_mut().unwrap().invalidate();
}
pub fn set_view(&mut self, data: op_store::View) {
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.view.as_mut().unwrap().set_view(data);
mut_repo.evolution.as_mut().unwrap().invalidate();
}
pub fn commit(mut self) -> Operation {
let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap();
mut_repo.evolution = None;
let mut internal = Arc::try_unwrap(self.repo.take().unwrap()).ok().unwrap();
let view = internal.view.take().unwrap();
let operation = view.save(self.description.clone(), self.start_time.clone());
self.closed = true;
operation
}
pub fn discard(mut self) {
self.closed = true;
}
}
impl<'r> Drop for Transaction<'r> {
fn drop(&mut self) {
if !std::thread::panicking() {
assert!(self.closed);
}
}
}
impl<'r> Repo for MutableRepo<'r> {
fn store(&self) -> &Arc<StoreWrapper> {
self.repo.store()
}
fn view(&self) -> &dyn View {
self.view.as_ref().unwrap()
}
fn evolution(&self) -> &dyn Evolution {
self.evolution.as_ref().unwrap()
}
}
impl MutableRepo<'_> {
pub fn evolution_mut(&mut self) -> &MutableEvolution {
self.evolution.as_mut().unwrap()
}
}

199
lib/src/tree.rs Normal file
View file

@ -0,0 +1,199 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Borrow;
use std::fmt::{Debug, Error, Formatter};
use std::sync::Arc;
use crate::matchers::AlwaysMatcher;
use crate::repo_path::{DirRepoPath, DirRepoPathComponent, FileRepoPath, RepoPath, RepoPathJoin};
use crate::store;
use crate::store::{ConflictId, TreeEntriesIter, TreeEntry, TreeId, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::trees::{recursive_tree_diff, walk_entries, TreeValueDiff};
#[derive(Clone)]
pub struct Tree {
store: Arc<StoreWrapper>,
dir: DirRepoPath,
id: TreeId,
data: Arc<store::Tree>,
}
impl Debug for Tree {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("Tree")
.field("dir", &self.dir)
.field("id", &self.id)
.finish()
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct DiffSummary {
pub modified: Vec<FileRepoPath>,
pub added: Vec<FileRepoPath>,
pub removed: Vec<FileRepoPath>,
}
impl Tree {
pub fn new(
store: Arc<StoreWrapper>,
dir: DirRepoPath,
id: TreeId,
data: Arc<store::Tree>,
) -> Self {
Tree {
store,
dir,
id,
data,
}
}
pub fn null(store: Arc<StoreWrapper>, dir: DirRepoPath) -> Self {
Tree {
store,
dir,
id: TreeId(vec![]),
data: Arc::new(store::Tree::default()),
}
}
pub fn store(&self) -> &Arc<StoreWrapper> {
&self.store
}
pub fn dir(&self) -> &DirRepoPath {
&self.dir
}
pub fn id(&self) -> &TreeId {
&self.id
}
pub fn data(&self) -> &store::Tree {
&self.data
}
pub fn entries(&self) -> TreeEntriesIter {
self.data.entries()
}
pub fn entry<N>(&self, basename: &N) -> Option<TreeEntry>
where
N: Borrow<str> + ?Sized,
{
self.data.entry(basename)
}
pub fn value<N>(&self, basename: &N) -> Option<&TreeValue>
where
N: Borrow<str> + ?Sized,
{
self.data.value(basename)
}
pub fn path_value(&self, path: &RepoPath) -> Option<TreeValue> {
assert_eq!(self.dir(), &DirRepoPath::root());
match path.split() {
Some((dir, basename)) => self
.sub_tree_recursive(dir.components())
.and_then(|tree| tree.data.value(basename.value()).cloned()),
None => Some(TreeValue::Tree(self.id.clone())),
}
}
pub fn sub_tree(&self, name: &DirRepoPathComponent) -> Option<Tree> {
self.data
.value(name.value())
.and_then(|sub_tree| match sub_tree {
TreeValue::Tree(sub_tree_id) => {
let subdir = self.dir.join(name);
Some(self.store.get_tree(&subdir, sub_tree_id).unwrap())
}
_ => None,
})
}
pub fn known_sub_tree(&self, name: &DirRepoPathComponent, id: &TreeId) -> Tree {
let subdir = self.dir.join(name);
self.store.get_tree(&subdir, id).unwrap()
}
fn sub_tree_recursive(&self, components: &[DirRepoPathComponent]) -> Option<Tree> {
if components.is_empty() {
// TODO: It would be nice to be able to return a reference here, but
// then we would have to figure out how to share Tree instances
// across threads.
Some(Tree {
store: self.store.clone(),
dir: self.dir.clone(),
id: self.id.clone(),
data: self.data.clone(),
})
} else {
match self.data.entry(components[0].value()) {
None => None,
Some(entry) => match entry.value() {
TreeValue::Tree(sub_tree_id) => {
let sub_tree = self
.known_sub_tree(&DirRepoPathComponent::from(entry.name()), sub_tree_id);
sub_tree.sub_tree_recursive(&components[1..])
}
_ => None,
},
}
}
}
pub fn diff(&self, other: &Tree, callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff)) {
recursive_tree_diff(self.clone(), other.clone(), &AlwaysMatcher {}, callback);
}
pub fn diff_summary(&self, other: &Tree) -> DiffSummary {
let mut modified = vec![];
let mut added = vec![];
let mut removed = vec![];
self.diff(other, &mut |file, diff| match diff {
TreeValueDiff::Modified(_, _) => modified.push(file.clone()),
TreeValueDiff::Added(_) => added.push(file.clone()),
TreeValueDiff::Removed(_) => removed.push(file.clone()),
});
modified.sort();
added.sort();
removed.sort();
DiffSummary {
modified,
added,
removed,
}
}
pub fn has_conflict(&self) -> bool {
!self.conflicts().is_empty()
}
pub fn conflicts(&self) -> Vec<(RepoPath, ConflictId)> {
let mut conflicts = vec![];
walk_entries(&self, &mut |name, value| -> Result<(), ()> {
if let TreeValue::Conflict(id) = value {
conflicts.push((name.clone(), id.clone()));
}
Ok(())
})
.unwrap();
conflicts
}
}

148
lib/src/tree_builder.rs Normal file
View file

@ -0,0 +1,148 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap, HashSet};
use crate::repo_path::{DirRepoPath, RepoPath, RepoPathJoin};
use crate::store;
use crate::store::{TreeId, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::tree::Tree;
use std::sync::Arc;
#[derive(Debug)]
enum Override {
Tombstone,
Replace(TreeValue),
}
#[derive(Debug)]
pub struct TreeBuilder {
store: Arc<StoreWrapper>,
base_tree_id: TreeId,
overrides: BTreeMap<RepoPath, Override>,
}
impl TreeBuilder {
pub fn new(store: Arc<StoreWrapper>, base_tree_id: TreeId) -> TreeBuilder {
let overrides = BTreeMap::new();
TreeBuilder {
store,
base_tree_id,
overrides,
}
}
pub fn repo(&self) -> &StoreWrapper {
self.store.as_ref()
}
pub fn set(&mut self, path: RepoPath, value: TreeValue) {
self.overrides.insert(path, Override::Replace(value));
}
pub fn remove(&mut self, path: RepoPath) {
self.overrides.insert(path, Override::Tombstone);
}
pub fn write_tree(mut self) -> TreeId {
let mut trees_to_write = self.get_base_trees();
if trees_to_write.is_empty() {
return self.base_tree_id;
}
// Update entries in parent trees for file overrides
for (path, file_override) in self.overrides {
if let Some((dir, basename)) = path.split() {
let tree = trees_to_write.get_mut(dir).unwrap();
match file_override {
Override::Replace(value) => {
tree.set(basename.value().to_string(), value);
}
Override::Tombstone => {
tree.remove(basename.value());
}
}
}
}
// Write trees level by level, starting with trees without children.
let store = self.store.as_ref();
loop {
let mut dirs_to_write: HashSet<DirRepoPath> =
trees_to_write.keys().cloned().into_iter().collect();
for dir in trees_to_write.keys() {
if let Some(parent) = dir.parent() {
dirs_to_write.remove(&parent);
}
}
for dir in dirs_to_write {
let tree = trees_to_write.remove(&dir).unwrap();
if let Some((parent, basename)) = dir.split() {
let parent_tree = trees_to_write.get_mut(&parent).unwrap();
if tree.is_empty() {
parent_tree.remove(basename.value());
} else {
let tree_id = store.write_tree(&dir, &tree).unwrap();
parent_tree.set(basename.value().to_string(), TreeValue::Tree(tree_id));
}
} else {
// We're writing the root tree. Write it even if empty. Return its id.
return store.write_tree(&dir, &tree).unwrap();
}
}
}
}
fn get_base_trees(&mut self) -> BTreeMap<DirRepoPath, store::Tree> {
let mut tree_cache = BTreeMap::new();
let mut base_trees = BTreeMap::new();
let store = self.store.clone();
let mut populate_trees = |dir: &DirRepoPath| {
let mut current_dir = DirRepoPath::root();
if !tree_cache.contains_key(&current_dir) {
let tree = store.get_tree(&current_dir, &self.base_tree_id).unwrap();
let store_tree = tree.data().clone();
tree_cache.insert(current_dir.clone(), tree);
base_trees.insert(current_dir.clone(), store_tree);
}
for component in dir.components() {
let next_dir = current_dir.join(component);
let current_tree = tree_cache.get(&current_dir).unwrap();
if !tree_cache.contains_key(&next_dir) {
let tree = current_tree
.sub_tree(component)
.unwrap_or_else(|| Tree::null(self.store.clone(), next_dir.clone()));
let store_tree = tree.data().clone();
tree_cache.insert(next_dir.clone(), tree);
base_trees.insert(next_dir.clone(), store_tree);
}
current_dir = next_dir;
}
};
for path in self.overrides.keys() {
if let Some(parent) = path.dir() {
populate_trees(&parent);
}
}
base_trees
}
}

496
lib/src/trees.rs Normal file
View file

@ -0,0 +1,496 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::files;
use crate::files::MergeResult;
use crate::matchers::Matcher;
use crate::repo_path::{
DirRepoPath, DirRepoPathComponent, FileRepoPath, FileRepoPathComponent, RepoPath,
RepoPathComponent, RepoPathJoin,
};
use crate::store::{Conflict, ConflictPart, StoreError, TreeId, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::tree::Tree;
use std::cmp::Ordering;
pub fn walk_entries<E>(
tree: &Tree,
callback: &mut impl FnMut(&RepoPath, &TreeValue) -> Result<(), E>,
) -> Result<(), E> {
for entry in tree.entries() {
let path = RepoPath::new(tree.dir().clone(), RepoPathComponent::from(entry.name()));
match entry.value() {
TreeValue::Tree(id) => {
let subtree = tree.known_sub_tree(&DirRepoPathComponent::from(entry.name()), id);
walk_entries(&subtree, callback)?;
}
other => {
callback(&path, other)?;
}
};
}
Ok(())
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Diff<T> {
Modified(T, T),
Added(T),
Removed(T),
}
pub type TreeValueDiff<'a> = Diff<&'a TreeValue>;
fn diff_entries<'a, E>(
tree1: &'a Tree,
tree2: &'a Tree,
callback: &mut impl FnMut(&'a str, TreeValueDiff<'a>) -> Result<(), E>,
) -> Result<(), E> {
let mut it1 = tree1.entries();
let mut it2 = tree2.entries();
let mut entry1 = it1.next();
let mut entry2 = it2.next();
loop {
let name: &'a str;
let mut value_before: Option<&'a TreeValue> = None;
let mut value_after: Option<&'a TreeValue> = None;
match (&entry1, &entry2) {
(Some(before), Some(after)) => {
match before.name().cmp(after.name()) {
Ordering::Less => {
// entry removed
name = before.name();
value_before = Some(before.value());
}
Ordering::Greater => {
// entry added
name = after.name();
value_after = Some(after.value());
}
Ordering::Equal => {
// entry modified
name = before.name();
value_before = Some(before.value());
value_after = Some(after.value());
}
}
}
(Some(before), None) => {
// second iterator exhausted
name = before.name();
value_before = Some(before.value());
}
(None, Some(after)) => {
// first iterator exhausted
name = after.name();
value_after = Some(after.value());
}
(None, None) => {
// both iterators exhausted
break;
}
}
match (value_before, value_after) {
(Some(before), Some(after)) => {
if before != after {
callback(name, TreeValueDiff::Modified(before, after))?;
}
entry1 = it1.next();
entry2 = it2.next();
}
(Some(before), None) => {
callback(name, TreeValueDiff::Removed(before))?;
entry1 = it1.next();
}
(None, Some(after)) => {
callback(name, TreeValueDiff::Added(after))?;
entry2 = it2.next();
}
(None, None) => {
panic!("should have been handled above");
}
}
}
Ok(())
}
pub fn recursive_tree_diff<M>(
root1: Tree,
root2: Tree,
matcher: &M,
callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff),
) where
M: Matcher,
{
internal_recursive_tree_diff(vec![(DirRepoPath::root(), root1, root2)], matcher, callback)
}
fn internal_recursive_tree_diff<M>(
work: Vec<(DirRepoPath, Tree, Tree)>,
_matcher: &M,
callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff),
) where
M: Matcher,
{
let mut new_work = Vec::new();
// Diffs for which to invoke the callback after having visited subtrees. This is
// used for making sure that when a directory gets replaced by a file, we
// call the callback for the addition of the file after we call the callback
// for removing files in the directory.
let mut late_file_diffs = Vec::new();
for (dir, tree1, tree2) in &work {
diff_entries(tree1, tree2, &mut |name,
diff: TreeValueDiff|
-> Result<(), ()> {
let file_path = dir.join(&FileRepoPathComponent::from(name));
let subdir = DirRepoPathComponent::from(name);
let subdir_path = dir.join(&subdir);
// TODO: simplify this mess
match diff {
TreeValueDiff::Modified(TreeValue::Tree(id_before), TreeValue::Tree(id_after)) => {
new_work.push((
subdir_path,
tree1.known_sub_tree(&subdir, &id_before),
tree2.known_sub_tree(&subdir, &id_after),
));
}
TreeValueDiff::Modified(TreeValue::Tree(id_before), file_after) => {
new_work.push((
subdir_path.clone(),
tree1.known_sub_tree(&subdir, &id_before),
Tree::null(tree2.store().clone(), subdir_path),
));
late_file_diffs.push((file_path, TreeValueDiff::Added(file_after)));
}
TreeValueDiff::Modified(file_before, TreeValue::Tree(id_after)) => {
new_work.push((
subdir_path.clone(),
Tree::null(tree1.store().clone(), subdir_path),
tree2.known_sub_tree(&subdir, &id_after),
));
callback(&file_path, TreeValueDiff::Removed(file_before));
}
TreeValueDiff::Modified(_, _) => {
callback(&file_path, diff);
}
TreeValueDiff::Added(TreeValue::Tree(id_after)) => {
new_work.push((
subdir_path.clone(),
Tree::null(tree1.store().clone(), subdir_path),
tree2.known_sub_tree(&subdir, &id_after),
));
}
TreeValueDiff::Added(_) => {
callback(&file_path, diff);
}
TreeValueDiff::Removed(TreeValue::Tree(id_before)) => {
new_work.push((
subdir_path.clone(),
tree1.known_sub_tree(&subdir, &id_before),
Tree::null(tree2.store().clone(), subdir_path),
));
}
TreeValueDiff::Removed(_) => {
callback(&file_path, diff);
}
};
Ok(())
})
.unwrap(); // safe because the callback always returns Ok
}
if !new_work.is_empty() {
internal_recursive_tree_diff(new_work, _matcher, callback)
}
for (file_path, diff) in late_file_diffs {
callback(&file_path, diff);
}
}
pub fn merge_trees(
side1_tree: &Tree,
base_tree: &Tree,
side2_tree: &Tree,
) -> Result<TreeId, StoreError> {
let store = base_tree.store().as_ref();
let dir = base_tree.dir();
assert_eq!(side1_tree.dir(), dir);
assert_eq!(side2_tree.dir(), dir);
if base_tree.id() == side1_tree.id() {
return Ok(side2_tree.id().clone());
}
if base_tree.id() == side2_tree.id() || side1_tree.id() == side2_tree.id() {
return Ok(side1_tree.id().clone());
}
// Start with a tree identical to side 1 and modify based on changes from base
// to side 2.
let mut new_tree = side1_tree.data().clone();
diff_entries(base_tree, side2_tree, &mut |basename,
diff|
-> Result<(), StoreError> {
let maybe_side1 = side1_tree.value(basename);
let (maybe_base, maybe_side2) = match diff {
TreeValueDiff::Modified(base, side2) => (Some(base), Some(side2)),
TreeValueDiff::Added(side2) => (None, Some(side2)),
TreeValueDiff::Removed(base) => (Some(base), None),
};
if maybe_side1 == maybe_base {
// side 1 is unchanged: use the value from side 2
match maybe_side2 {
None => new_tree.remove(basename),
Some(side2) => new_tree.set(basename.to_owned(), side2.clone()),
};
} else if maybe_side1 == maybe_side2 {
// Both sides changed in the same way: new_tree already has the
// value
} else {
// The two sides changed in different ways
let new_value =
merge_tree_value(store, dir, basename, maybe_base, maybe_side1, maybe_side2)?;
match new_value {
None => new_tree.remove(basename),
Some(value) => new_tree.set(basename.to_owned(), value),
}
}
Ok(())
})?;
store.write_tree(dir, &new_tree)
}
fn merge_tree_value(
store: &StoreWrapper,
dir: &DirRepoPath,
basename: &str,
maybe_base: Option<&TreeValue>,
maybe_side1: Option<&TreeValue>,
maybe_side2: Option<&TreeValue>,
) -> Result<Option<TreeValue>, StoreError> {
// Resolve non-trivial conflicts:
// * resolve tree conflicts by recursing
// * try to resolve file conflicts by merging the file contents
// * leave other conflicts (e.g. file/dir conflicts, remove/modify conflicts)
// unresolved
Ok(match (maybe_base, maybe_side1, maybe_side2) {
(
Some(TreeValue::Tree(base)),
Some(TreeValue::Tree(side1)),
Some(TreeValue::Tree(side2)),
) => {
let subdir = dir.join(&DirRepoPathComponent::from(basename));
let merged_tree_id = merge_trees(
&store.get_tree(&subdir, &side1).unwrap(),
&store.get_tree(&subdir, &base).unwrap(),
&store.get_tree(&subdir, &side2).unwrap(),
)?;
if &merged_tree_id == store.empty_tree_id() {
None
} else {
Some(TreeValue::Tree(merged_tree_id))
}
}
_ => {
let maybe_merged = match (maybe_base, maybe_side1, maybe_side2) {
(
Some(TreeValue::Normal {
id: base_id,
executable: base_executable,
}),
Some(TreeValue::Normal {
id: side1_id,
executable: side1_executable,
}),
Some(TreeValue::Normal {
id: side2_id,
executable: side2_executable,
}),
) => {
let executable = if base_executable == side1_executable {
*side2_executable
} else if base_executable == side2_executable {
*side1_executable
} else {
assert_eq!(side1_executable, side2_executable);
*side1_executable
};
let filename = dir.join(&FileRepoPathComponent::from(basename));
let mut base_content = vec![];
store
.read_file(&filename, &base_id)?
.read_to_end(&mut base_content)?;
let mut side1_content = vec![];
store
.read_file(&filename, &side1_id)?
.read_to_end(&mut side1_content)?;
let mut side2_content = vec![];
store
.read_file(&filename, &side2_id)?
.read_to_end(&mut side2_content)?;
let merge_result = files::merge(&base_content, &side1_content, &side2_content);
match merge_result {
MergeResult::Resolved(merged_content) => {
let id = store.write_file(&filename, &mut merged_content.as_slice())?;
Some(TreeValue::Normal { id, executable })
}
MergeResult::Conflict(_) => None,
}
}
_ => None,
};
match maybe_merged {
Some(merged) => Some(merged),
None => {
let mut conflict = Conflict::default();
if let Some(base) = maybe_base {
conflict.removes.push(ConflictPart {
value: base.clone(),
});
}
if let Some(side1) = maybe_side1 {
conflict.adds.push(ConflictPart {
value: side1.clone(),
});
}
if let Some(side2) = maybe_side2 {
conflict.adds.push(ConflictPart {
value: side2.clone(),
});
}
simplify_conflict(store, &conflict)?
}
}
}
})
}
fn conflict_part_to_conflict(
store: &StoreWrapper,
part: &ConflictPart,
) -> Result<Conflict, StoreError> {
match &part.value {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id)?;
Ok(conflict)
}
other => Ok(Conflict {
removes: vec![],
adds: vec![ConflictPart {
value: other.clone(),
}],
}),
}
}
fn simplify_conflict(
store: &StoreWrapper,
conflict: &Conflict,
) -> Result<Option<TreeValue>, StoreError> {
// Important cases to simplify:
//
// D
// |
// B C
// |/
// A
//
// 1. rebase C to B, then back to A => there should be no conflict
// 2. rebase C to B, then to D => the conflict should not mention B
// 3. rebase B to C and D to B', then resolve the conflict in B' and rebase D'
// on top => the conflict should be between B'', B, and D; it should not
// mention the conflict in B'
// Case 1 above:
// After first rebase, the conflict is {+B-A+C}. After rebasing back,
// the unsimplified conflict is {+A-B+{+B-A+C}}. Since the
// inner conflict is positive, we can simply move it into the outer conflict. We
// thus get {+A-B+B-A+C}, which we can then simplify to just C (because {+C} ==
// C).
//
// Case 2 above:
// After first rebase, the conflict is {+B-A+C}. After rebasing to D,
// the unsimplified conflict is {+D-C+{+B-A+C}}. As in the
// previous case, the inner conflict can be moved into the outer one. We then
// get {+D-C+B-A+C}. That can be simplified to
// {+D+B-A}, which is the desired conflict.
//
// Case 3 above:
// TODO: describe this case
// First expand any diffs with nested conflicts.
let mut new_removes = vec![];
let mut new_adds = vec![];
for part in &conflict.adds {
match part.value {
TreeValue::Conflict(_) => {
let conflict = conflict_part_to_conflict(&store, part)?;
new_removes.extend_from_slice(&conflict.removes);
new_adds.extend_from_slice(&conflict.adds);
}
_ => {
new_adds.push(part.clone());
}
}
}
for part in &conflict.removes {
match part.value {
TreeValue::Conflict(_) => {
let conflict = conflict_part_to_conflict(&store, part)?;
new_removes.extend_from_slice(&conflict.adds);
new_adds.extend_from_slice(&conflict.removes);
}
_ => {
new_removes.push(part.clone());
}
}
}
// Remove pairs of entries that match in the removes and adds.
let mut add_index = 0;
while add_index < new_adds.len() {
let add = &new_adds[add_index];
add_index += 1;
for (remove_index, remove) in new_removes.iter().enumerate() {
if remove.value == add.value {
new_removes.remove(remove_index);
add_index -= 1;
new_adds.remove(add_index);
break;
}
}
}
// TODO: We should probably remove duplicate entries here too. So if we have
// {+A+A}, that would become just {+A}. Similarly {+B-A+B} would be just
// {+B-A}.
if new_adds.is_empty() {
// If there are no values to add, then the path doesn't exist (so return None to
// indicate that).
return Ok(None);
}
if new_removes.is_empty() && new_adds.len() == 1 {
// A single add means that the current state is that state.
return Ok(Some(new_adds[0].value.clone()));
}
let conflict_id = store.write_conflict(&Conflict {
adds: new_adds,
removes: new_removes,
})?;
Ok(Some(TreeValue::Conflict(conflict_id)))
}

416
lib/src/view.rs Normal file
View file

@ -0,0 +1,416 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::min;
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use thiserror::Error;
use crate::commit::Commit;
use crate::dag_walk;
use crate::lock::FileLock;
use crate::op_store;
use crate::op_store::{OpStore, OpStoreResult, OperationId, OperationMetadata};
use crate::operation::Operation;
use crate::simple_op_store::SimpleOpStore;
use crate::store::{CommitId, Timestamp};
use crate::store_wrapper::StoreWrapper;
pub trait View {
fn checkout(&self) -> &CommitId;
fn heads<'a>(&'a self) -> Box<dyn Iterator<Item = &'a CommitId> + 'a>;
fn op_store(&self) -> Arc<dyn OpStore>;
fn base_op_head_id(&self) -> &OperationId;
fn get_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
let data = self.op_store().read_operation(id)?;
Ok(Operation::new(self.op_store().clone(), id.clone(), data))
}
fn base_op_head(&self) -> Operation {
self.get_operation(self.base_op_head_id()).unwrap()
}
}
pub struct ReadonlyView {
store: Arc<StoreWrapper>,
path: PathBuf,
op_store: Arc<SimpleOpStore>,
op_id: OperationId,
data: op_store::View,
}
pub struct MutableView {
store: Arc<StoreWrapper>,
path: PathBuf,
op_store: Arc<SimpleOpStore>,
base_op_head_id: OperationId,
data: op_store::View,
}
fn heads_of_set(
store: &StoreWrapper,
commit_ids: impl Iterator<Item = CommitId>,
) -> HashSet<CommitId> {
let mut visited = HashSet::new();
let mut work = vec![];
let mut oldest = std::u64::MAX;
let mut heads: HashSet<CommitId> = commit_ids.collect();
for commit_id in &heads {
let commit = store.get_commit(commit_id).unwrap();
oldest = min(oldest, commit.committer().timestamp.timestamp.0);
work.push(commit);
}
// Assume clock skew less than a month:
// TODO: use generation numbers here
let threshold = oldest.saturating_sub(1000 * 3600 * 24 * 30);
while !work.is_empty() {
let commit = work.pop().unwrap();
if visited.contains(commit.id()) {
continue;
}
visited.insert(commit.id().clone());
for parent in commit.parents() {
if parent.committer().timestamp.timestamp.0 < threshold {
continue;
}
heads.remove(parent.id());
work.push(parent);
}
}
heads
}
#[derive(Debug, Error, PartialEq, Eq)]
pub enum OpHeadResolutionError {
#[error("Operation log has no heads")]
NoHeads,
}
fn add_op_head(op_heads_dir: &PathBuf, id: &OperationId) {
std::fs::write(op_heads_dir.join(id.hex()), "").unwrap();
}
fn remove_op_head(op_heads_dir: &PathBuf, id: &OperationId) {
// It's fine if the old head was not found. It probably means
// that we're on a distributed file system where the locking
// doesn't work. We'll probably end up with two current
// heads. We'll detect that next time we load the view.
std::fs::remove_file(op_heads_dir.join(id.hex())).ok();
}
fn get_op_heads(op_heads_dir: &PathBuf) -> Vec<OperationId> {
let mut op_heads = vec![];
for op_head_entry in std::fs::read_dir(op_heads_dir).unwrap() {
let op_head_file_name = op_head_entry.unwrap().file_name();
let op_head_file_name = op_head_file_name.to_str().unwrap();
if let Ok(op_head) = hex::decode(op_head_file_name) {
op_heads.push(OperationId(op_head));
}
}
op_heads
}
pub fn merge_views(
store: &StoreWrapper,
left: &op_store::View,
base: &op_store::View,
right: &op_store::View,
) -> op_store::View {
let mut result = left.clone();
if right.checkout == base.checkout || right.checkout == left.checkout {
// Keep the left side
} else if left.checkout == base.checkout {
result.checkout = right.checkout.clone();
} else {
// TODO: Return an error here. Or should we just pick one of the sides
// and emit a warning?
}
for removed_head in base.head_ids.difference(&right.head_ids) {
result.head_ids.remove(removed_head);
}
for added_head in right.head_ids.difference(&base.head_ids) {
result.head_ids.insert(added_head.clone());
}
result.head_ids = heads_of_set(store, result.head_ids.into_iter());
// TODO: Should it be considered a conflict if a commit-head is removed on one
// side while a child or successor is created on another side? Maybe a
// warning?
result
}
// TODO: Introduce context objects (like commit::Commit) so we won't have to
// pass around OperationId and Operation separately like we do here.
fn get_single_op_head(
store: &StoreWrapper,
op_store: &dyn OpStore,
op_heads_dir: &PathBuf,
) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> {
let mut op_heads = get_op_heads(&op_heads_dir);
if op_heads.is_empty() {
return Err(OpHeadResolutionError::NoHeads);
}
if op_heads.len() == 1 {
let operation_id = op_heads.pop().unwrap();
let operation = op_store.read_operation(&operation_id).unwrap();
let view = op_store.read_view(&operation.view_id).unwrap();
return Ok((operation_id, operation, view));
}
// There are multiple heads. We take a lock, then check if there are still
// multiple heads (it's likely that another process was in the process of
// deleting on of them). If there are still multiple heads, we attempt to
// merge all the views into one. We then write that view and a corresponding
// operation to the op-store.
// Note that the locking isn't necessary for correctness; we take the lock
// only to avoid other concurrent processes from doing the same work (and
// producing another set of divergent heads).
let _lock = FileLock::lock(op_heads_dir.join("lock"));
let op_heads = get_op_heads(&op_heads_dir);
if op_heads.is_empty() {
return Err(OpHeadResolutionError::NoHeads);
}
if op_heads.len() == 1 {
let op_head_id = op_heads[0].clone();
let op_head = op_store.read_operation(&op_head_id).unwrap();
// Return early so we don't write a merge operation with a single parent
let view = op_store.read_view(&op_head.view_id).unwrap();
return Ok((op_head_id, op_head, view));
}
let (merge_operation_id, merge_operation, merged_view) =
merge_op_heads(store, op_store, &op_heads)?;
add_op_head(&op_heads_dir, &merge_operation_id);
for old_op_head_id in op_heads {
// The merged one will be in the input to the merge if it's a "fast-forward"
// merge.
if old_op_head_id != merge_operation_id {
remove_op_head(&op_heads_dir, &old_op_head_id);
}
}
Ok((merge_operation_id, merge_operation, merged_view))
}
fn merge_op_heads(
store: &StoreWrapper,
op_store: &dyn OpStore,
op_heads: &[OperationId],
) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> {
let neighbors_fn = |op_id: &OperationId| op_store.read_operation(op_id).unwrap().parents;
// Remove ancestors so we don't create merge operation with an operation and its
// ancestor
let op_heads = dag_walk::unreachable(
op_heads.iter().cloned(),
&neighbors_fn,
&|op_id: &OperationId| op_id.clone(),
);
let mut op_heads: Vec<_> = op_heads.into_iter().collect();
op_heads.sort_by_key(|op_id| op_id.0.clone());
let first_op_head = op_store.read_operation(&op_heads[0]).unwrap();
let mut merged_view = op_store.read_view(&first_op_head.view_id).unwrap();
// Return without creating a merge operation
if op_heads.len() == 1 {
return Ok((op_heads[0].clone(), first_op_head, merged_view));
}
for (i, other_op_head_id) in op_heads.iter().enumerate().skip(1) {
let ancestor_op_id = dag_walk::closest_common_node(
op_heads[0..i].to_vec(),
vec![other_op_head_id.clone()],
&neighbors_fn,
&|op_id: &OperationId| op_id.clone(),
)
.unwrap();
let ancestor_op = op_store.read_operation(&ancestor_op_id).unwrap();
let ancestor_view = op_store.read_view(&ancestor_op.view_id).unwrap();
let other_op = op_store.read_operation(other_op_head_id).unwrap();
let other_view = op_store.read_view(&other_op.view_id).unwrap();
merged_view = merge_views(store, &merged_view, &ancestor_view, &other_view);
}
let merged_view_id = op_store.write_view(&merged_view).unwrap();
let operation_metadata = OperationMetadata::new("resolve concurrent operations".to_string());
let merge_operation = op_store::Operation {
view_id: merged_view_id,
parents: op_heads,
metadata: operation_metadata,
};
let merge_operation_id = op_store.write_operation(&merge_operation).unwrap();
Ok((merge_operation_id, merge_operation, merged_view))
}
impl View for ReadonlyView {
fn checkout(&self) -> &CommitId {
&self.data.checkout
}
fn heads<'a>(&'a self) -> Box<dyn Iterator<Item = &'a CommitId> + 'a> {
Box::new(self.data.head_ids.iter())
}
fn op_store(&self) -> Arc<dyn OpStore> {
self.op_store.clone()
}
fn base_op_head_id(&self) -> &OperationId {
&self.op_id
}
}
impl ReadonlyView {
pub fn init(store: Arc<StoreWrapper>, path: PathBuf, checkout: CommitId) -> Self {
std::fs::create_dir(path.join("op_store")).unwrap();
let op_store = Arc::new(SimpleOpStore::init(path.join("op_store")));
let mut root_view = op_store::View::new(checkout.clone());
root_view.head_ids.insert(checkout);
let root_view_id = op_store.write_view(&root_view).unwrap();
let operation_metadata = OperationMetadata::new("initialize repo".to_string());
let init_operation = op_store::Operation {
view_id: root_view_id,
parents: vec![],
metadata: operation_metadata,
};
let init_operation_id = op_store.write_operation(&init_operation).unwrap();
let op_heads_dir = path.join("op_heads");
std::fs::create_dir(&op_heads_dir).unwrap();
add_op_head(&op_heads_dir, &init_operation_id);
ReadonlyView {
store,
path,
op_store,
op_id: init_operation_id,
data: root_view,
}
}
pub fn load(store: Arc<StoreWrapper>, path: PathBuf) -> Self {
let op_store = Arc::new(SimpleOpStore::load(path.join("op_store")));
let op_heads_dir = path.join("op_heads");
let (op_id, _operation, view) =
get_single_op_head(&store, op_store.as_ref(), &op_heads_dir).unwrap();
ReadonlyView {
store,
path,
op_store,
op_id,
data: view,
}
}
pub fn reload(&mut self) -> OperationId {
let op_heads_dir = self.path.join("op_heads");
let (op_id, _operation, view) =
get_single_op_head(&self.store, self.op_store.as_ref(), &op_heads_dir).unwrap();
self.op_id = op_id;
self.data = view;
self.op_id.clone()
}
pub fn reload_at(&mut self, operation: &Operation) {
self.op_id = operation.id().clone();
self.data = operation.view().take_store_view();
}
pub fn start_modification(&self) -> MutableView {
// TODO: Avoid the cloning of the sets here.
MutableView {
store: self.store.clone(),
path: self.path.clone(),
op_store: self.op_store.clone(),
base_op_head_id: self.op_id.clone(),
data: self.data.clone(),
}
}
}
impl View for MutableView {
fn checkout(&self) -> &CommitId {
&self.data.checkout
}
fn heads<'a>(&'a self) -> Box<dyn Iterator<Item = &'a CommitId> + 'a> {
Box::new(self.data.head_ids.iter())
}
fn op_store(&self) -> Arc<dyn OpStore> {
self.op_store.clone()
}
fn base_op_head_id(&self) -> &OperationId {
&self.base_op_head_id
}
}
impl MutableView {
pub fn set_checkout(&mut self, id: CommitId) {
self.data.checkout = id;
}
pub fn add_head(&mut self, head: &Commit) {
self.data.head_ids.insert(head.id().clone());
for parent in head.parents() {
self.data.head_ids.remove(parent.id());
}
}
pub fn remove_head(&mut self, head: &Commit) {
self.data.head_ids.remove(head.id());
for parent in head.parents() {
self.data.head_ids.insert(parent.id().clone());
}
}
pub fn set_view(&mut self, data: op_store::View) {
self.data = data;
}
pub fn save(mut self, description: String, operation_start_time: Timestamp) -> Operation {
let op_heads_dir = self.path.join("op_heads");
// First write the current view whether or not there have been any concurrent
// operations. We'll later create a merge operation if necessary.
self.data.head_ids = heads_of_set(&self.store, self.heads().cloned());
let view_id = self.op_store.write_view(&self.data).unwrap();
let mut operation_metadata = OperationMetadata::new(description);
operation_metadata.start_time = operation_start_time;
let operation = op_store::Operation {
view_id,
parents: vec![self.base_op_head_id.clone()],
metadata: operation_metadata,
};
let old_op_head_id = self.base_op_head_id.clone();
let new_op_head_id = self.op_store.write_operation(&operation).unwrap();
// Update .jj/view/op_heads/.
{
let _op_heads_lock = FileLock::lock(op_heads_dir.join("lock"));
add_op_head(&op_heads_dir, &new_op_head_id);
remove_op_head(&op_heads_dir, &old_op_head_id);
}
Operation::new(self.op_store, new_op_head_id, operation)
}
}

667
lib/src/working_copy.rs Normal file
View file

@ -0,0 +1,667 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cell::{RefCell, RefMut};
use std::collections::{BTreeMap, HashSet};
use std::convert::TryInto;
use std::fs;
use std::fs::{File, OpenOptions};
#[cfg(not(windows))]
use std::os::unix::fs::symlink;
#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;
#[cfg(windows)]
use std::os::windows::fs::symlink_file;
use std::path::{Path, PathBuf};
use std::time::UNIX_EPOCH;
use protobuf::Message;
use tempfile::NamedTempFile;
use thiserror::Error;
use crate::commit::Commit;
use crate::commit_builder::CommitBuilder;
use crate::lock::FileLock;
use crate::repo::ReadonlyRepo;
use crate::repo_path::{
DirRepoPath, DirRepoPathComponent, FileRepoPath, FileRepoPathComponent, RepoPathJoin,
};
use crate::settings::UserSettings;
use crate::store::{CommitId, FileId, MillisSinceEpoch, StoreError, SymlinkId, TreeId, TreeValue};
use crate::store_wrapper::StoreWrapper;
use crate::trees::TreeValueDiff;
use std::sync::Arc;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum FileType {
Normal,
Executable,
Symlink,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct FileState {
pub file_type: FileType,
pub mtime: MillisSinceEpoch,
pub size: u64,
/* TODO: What else do we need here? Git stores a lot of fields.
* TODO: Could possibly handle case-insensitive file systems keeping an
* Option<PathBuf> with the actual path here. */
}
impl FileState {
fn null() -> FileState {
FileState {
file_type: FileType::Normal,
mtime: MillisSinceEpoch(0),
size: 0,
}
}
}
struct TreeState {
store: Arc<StoreWrapper>,
path: PathBuf,
tree_id: TreeId,
file_states: BTreeMap<FileRepoPath, FileState>,
read_time: MillisSinceEpoch,
}
fn file_state_from_proto(proto: &protos::working_copy::FileState) -> FileState {
let file_type = match proto.file_type {
protos::working_copy::FileType::Normal => FileType::Normal,
protos::working_copy::FileType::Symlink => FileType::Symlink,
protos::working_copy::FileType::Executable => FileType::Executable,
};
FileState {
file_type,
mtime: MillisSinceEpoch(proto.mtime_millis_since_epoch),
size: proto.size,
}
}
fn file_state_to_proto(file_state: &FileState) -> protos::working_copy::FileState {
let mut proto = protos::working_copy::FileState::new();
let file_type = match &file_state.file_type {
FileType::Normal => protos::working_copy::FileType::Normal,
FileType::Symlink => protos::working_copy::FileType::Symlink,
FileType::Executable => protos::working_copy::FileType::Executable,
};
proto.file_type = file_type;
proto.mtime_millis_since_epoch = file_state.mtime.0;
proto.size = file_state.size;
proto
}
fn file_states_from_proto(
proto: &protos::working_copy::TreeState,
) -> BTreeMap<FileRepoPath, FileState> {
let mut file_states = BTreeMap::new();
for (path_str, proto_file_state) in &proto.file_states {
let path = FileRepoPath::from(path_str.as_str());
file_states.insert(path, file_state_from_proto(&proto_file_state));
}
file_states
}
fn create_parent_dirs(disk_path: &PathBuf) {
fs::create_dir_all(disk_path.parent().unwrap())
.unwrap_or_else(|_| panic!("failed to create parent directories for {:?}", &disk_path));
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct CheckoutStats {
pub updated_files: u32,
pub added_files: u32,
pub removed_files: u32,
}
#[derive(Debug, Error, PartialEq, Eq)]
pub enum CheckoutError {
#[error("Update target not found")]
TargetNotFound,
// The current checkout was deleted, maybe by an overly aggressive GC that happened while
// the current process was running.
#[error("Current checkout not found")]
SourceNotFound,
// Another process checked out a commit while the current process was running (after the
// working copy was read by the current process).
#[error("Concurrent checkout")]
ConcurrentCheckout,
#[error("Internal error: {0:?}")]
InternalStoreError(StoreError),
}
impl TreeState {
pub fn current_tree_id(&self) -> &TreeId {
&self.tree_id
}
pub fn file_states(&self) -> &BTreeMap<FileRepoPath, FileState> {
&self.file_states
}
pub fn init(store: Arc<StoreWrapper>, path: PathBuf) -> TreeState {
let mut wc = TreeState::empty(store, path);
wc.save();
wc
}
fn empty(store: Arc<StoreWrapper>, path: PathBuf) -> TreeState {
let tree_id = store.empty_tree_id().clone();
TreeState {
store,
path,
tree_id,
file_states: BTreeMap::new(),
read_time: MillisSinceEpoch(0),
}
}
pub fn load(store: Arc<StoreWrapper>, path: PathBuf) -> TreeState {
let maybe_file = File::open(path.join("tree_state"));
let file = match maybe_file {
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
return TreeState::init(store, path);
}
result => result.unwrap(),
};
let mut wc = TreeState::empty(store, path);
wc.read(file);
wc
}
fn update_read_time(&mut self) {
let own_file_state = self
.file_state(&self.path.join("tree_state"))
.unwrap_or_else(FileState::null);
self.read_time = own_file_state.mtime;
}
fn read(&mut self, mut file: File) {
self.update_read_time();
let proto: protos::working_copy::TreeState =
protobuf::parse_from_reader(&mut file).unwrap();
self.tree_id = TreeId(proto.tree_id.clone());
self.file_states = file_states_from_proto(&proto);
}
fn save(&mut self) {
let mut proto = protos::working_copy::TreeState::new();
proto.tree_id = self.tree_id.0.clone();
for (file, file_state) in &self.file_states {
proto
.file_states
.insert(file.to_internal_string(), file_state_to_proto(file_state));
}
let mut temp_file = NamedTempFile::new_in(&self.path).unwrap();
// update read time while we still have the file open for writes, so we know
// there is no unknown data in it
self.update_read_time();
proto.write_to_writer(temp_file.as_file_mut()).unwrap();
temp_file.persist(self.path.join("tree_state")).unwrap();
}
fn file_state(&self, path: &PathBuf) -> Option<FileState> {
let metadata = path.symlink_metadata().ok()?;
let time = metadata.modified().unwrap();
let since_epoch = time.duration_since(UNIX_EPOCH).unwrap();
let mtime = MillisSinceEpoch(since_epoch.as_millis().try_into().unwrap());
let size = metadata.len();
let metadata_file_type = metadata.file_type();
let file_type = if metadata_file_type.is_dir() {
panic!("expected file, not directory: {:?}", path);
} else if metadata_file_type.is_symlink() {
FileType::Symlink
} else {
let mode = metadata.permissions().mode();
if mode & 0o111 != 0 {
FileType::Executable
} else {
FileType::Normal
}
};
Some(FileState {
file_type,
mtime,
size,
})
}
fn write_file_to_store(&self, path: &FileRepoPath, disk_path: &PathBuf) -> FileId {
let file = File::open(disk_path).unwrap();
self.store.write_file(path, &mut Box::new(file)).unwrap()
}
fn write_symlink_to_store(&self, path: &FileRepoPath, disk_path: &PathBuf) -> SymlinkId {
let target = disk_path.read_link().unwrap();
let str_target = target.to_str().unwrap();
self.store.write_symlink(path, str_target).unwrap()
}
// Look for changes to the working copy. If there are any changes, create
// a new tree from it and return it, and also update the dirstate on disk.
// TODO: respect ignores
pub fn write_tree(&mut self, working_copy_path: PathBuf) -> &TreeId {
let mut work = vec![(DirRepoPath::root(), working_copy_path)];
let mut tree_builder = self.store.tree_builder(self.tree_id.clone());
let mut deleted_files: HashSet<&FileRepoPath> = self.file_states.keys().collect();
let mut modified_files = BTreeMap::new();
while !work.is_empty() {
let (dir, disk_dir) = work.pop().unwrap();
for maybe_entry in disk_dir.read_dir().unwrap() {
let entry = maybe_entry.unwrap();
let file_type = entry.file_type().unwrap();
let file_name = entry.file_name();
let name = file_name.to_str().unwrap();
if name == ".jj" {
continue;
}
if file_type.is_dir() {
let subdir = dir.join(&DirRepoPathComponent::from(name));
let disk_subdir = disk_dir.join(file_name);
work.push((subdir, disk_subdir));
} else {
let file = dir.join(&FileRepoPathComponent::from(name));
deleted_files.remove(&file);
let new_file_state = self.file_state(&entry.path()).unwrap();
let clean = match self.file_states.get(&file) {
None => false, // untracked
Some(current_entry) => {
current_entry == &new_file_state && current_entry.mtime < self.read_time
}
};
if !clean {
let disk_file = disk_dir.join(file_name);
let file_value = match new_file_state.file_type {
FileType::Normal | FileType::Executable => {
let id = self.write_file_to_store(&file, &disk_file);
TreeValue::Normal {
id,
executable: new_file_state.file_type == FileType::Executable,
}
}
FileType::Symlink => {
let id = self.write_symlink_to_store(&file, &disk_file);
TreeValue::Symlink(id)
}
};
tree_builder.set(file.to_repo_path(), file_value);
modified_files.insert(file, new_file_state);
}
}
}
}
let deleted_files: Vec<FileRepoPath> = deleted_files.iter().cloned().cloned().collect();
for file in &deleted_files {
self.file_states.remove(file);
tree_builder.remove(file.to_repo_path());
}
for (file, file_state) in modified_files {
self.file_states.insert(file, file_state);
}
self.tree_id = tree_builder.write_tree();
self.save();
&self.tree_id
}
fn write_file(
&self,
disk_path: &PathBuf,
path: &FileRepoPath,
id: &FileId,
executable: bool,
) -> FileState {
create_parent_dirs(disk_path);
let mut file = OpenOptions::new()
.write(true)
.create_new(true)
.truncate(true)
.open(disk_path)
.unwrap_or_else(|_| panic!("failed to open {:?} for write", &disk_path));
let mut contents = self.store.read_file(path, id).unwrap();
std::io::copy(&mut contents, &mut file).unwrap();
self.set_executable(disk_path, executable);
// Read the file state while we still have the write lock. That way there is no
// race with other processes modifying it. We know that the file exists,
// and we know that the stat information is accurate. (The mtime is set
// at write time and won't change when we close the file.)
self.file_state(&disk_path).unwrap()
}
fn write_symlink(&self, disk_path: &PathBuf, path: &FileRepoPath, id: &SymlinkId) -> FileState {
create_parent_dirs(disk_path);
#[cfg(windows)]
{
unimplemented!();
}
#[cfg(not(windows))]
{
let target = self.store.read_symlink(path, id).unwrap();
let target = PathBuf::from(&target);
symlink(target, disk_path).unwrap();
}
self.file_state(&disk_path).unwrap()
}
fn set_executable(&self, disk_path: &PathBuf, executable: bool) {
let mode = if executable { 0o755 } else { 0o644 };
fs::set_permissions(disk_path, fs::Permissions::from_mode(mode)).unwrap();
}
pub fn check_out(
&mut self,
tree_id: TreeId,
working_copy_path: &Path,
) -> Result<CheckoutStats, CheckoutError> {
let old_tree = self
.store
.get_tree(&DirRepoPath::root(), &self.tree_id)
.map_err(|err| match err {
StoreError::NotFound => CheckoutError::SourceNotFound,
other => CheckoutError::InternalStoreError(other),
})?;
let new_tree = self
.store
.get_tree(&DirRepoPath::root(), &tree_id)
.map_err(|err| match err {
StoreError::NotFound => CheckoutError::TargetNotFound,
other => CheckoutError::InternalStoreError(other),
})?;
let mut stats = CheckoutStats {
updated_files: 0,
added_files: 0,
removed_files: 0,
};
old_tree.diff(&new_tree, &mut |path, diff| {
let disk_path = working_copy_path.join(PathBuf::from(path.to_internal_string()));
// TODO: Check that the file has not changed before overwriting/removing it.
match diff {
TreeValueDiff::Removed(_before) => {
fs::remove_file(&disk_path).ok();
let mut parent_dir = disk_path.parent().unwrap();
loop {
if fs::remove_dir(&parent_dir).is_err() {
break;
}
parent_dir = parent_dir.parent().unwrap();
}
self.file_states.remove(&path);
stats.removed_files += 1;
}
TreeValueDiff::Added(after) => {
let file_state = match after {
TreeValue::Normal { id, executable } => {
self.write_file(&disk_path, path, id, *executable)
}
TreeValue::Symlink(id) => self.write_symlink(&disk_path, path, id),
TreeValue::GitSubmodule(_id) => {
println!("ignoring git submodule at {:?}", path);
return;
}
TreeValue::Tree(_id) => {
panic!("unexpected tree entry in diff at {:?}", path);
}
TreeValue::Conflict(_id) => {
panic!(
"conflicts cannot be represented in the working copy: {:?}",
path
);
}
};
self.file_states.insert(path.clone(), file_state);
stats.added_files += 1;
}
TreeValueDiff::Modified(before, after) => {
fs::remove_file(&disk_path).ok();
let file_state = match (before, after) {
(
TreeValue::Normal {
id: old_id,
executable: old_executable,
},
TreeValue::Normal { id, executable },
) if id == old_id => {
// Optimization for when only the executable bit changed
assert_ne!(executable, old_executable);
self.set_executable(&disk_path, *executable);
let mut file_state = self.file_states.get(&path).unwrap().clone();
file_state.file_type = if *executable {
FileType::Executable
} else {
FileType::Normal
};
file_state
}
(_, TreeValue::Normal { id, executable }) => {
self.write_file(&disk_path, path, id, *executable)
}
(_, TreeValue::Symlink(id)) => self.write_symlink(&disk_path, path, id),
(_, TreeValue::GitSubmodule(_id)) => {
println!("ignoring git submodule at {:?}", path);
self.file_states.remove(path);
return;
}
(_, TreeValue::Tree(_id)) => {
panic!("unexpected tree entry in diff at {:?}", path);
}
(_, TreeValue::Conflict(_id)) => {
panic!(
"conflicts cannot be represented in the working copy: {:?}",
path
);
}
};
self.file_states.insert(path.clone(), file_state);
stats.updated_files += 1;
}
}
});
self.tree_id = tree_id;
self.save();
Ok(stats)
}
}
pub struct WorkingCopy {
store: Arc<StoreWrapper>,
path: PathBuf,
commit_id: RefCell<Option<CommitId>>,
tree_state: RefCell<Option<TreeState>>,
// cached commit
commit: RefCell<Option<Commit>>,
}
impl WorkingCopy {
pub fn init(store: Arc<StoreWrapper>, path: PathBuf) -> WorkingCopy {
// Leave the commit_id empty so a subsequent call to check out the root revision
// will have an effect.
let proto = protos::working_copy::Checkout::new();
let mut file = OpenOptions::new()
.create_new(true)
.write(true)
.open(path.join("checkout"))
.unwrap();
proto.write_to_writer(&mut file).unwrap();
WorkingCopy {
store,
path,
commit_id: RefCell::new(None),
tree_state: RefCell::new(None),
commit: RefCell::new(None),
}
}
pub fn load(store: Arc<StoreWrapper>, path: PathBuf) -> WorkingCopy {
WorkingCopy {
store,
path,
commit_id: RefCell::new(None),
tree_state: RefCell::new(None),
commit: RefCell::new(None),
}
}
fn write_proto(&self, proto: protos::working_copy::Checkout) {
let mut temp_file = NamedTempFile::new_in(&self.path).unwrap();
proto.write_to_writer(temp_file.as_file_mut()).unwrap();
temp_file.persist(self.path.join("checkout")).unwrap();
}
fn read_proto(&self) -> protos::working_copy::Checkout {
let mut file = File::open(self.path.join("checkout")).unwrap();
protobuf::parse_from_reader(&mut file).unwrap()
}
/// The id of the commit that's currently checked out in the working copy.
/// Note that the View is the source of truth for which commit *should*
/// be checked out. That should be kept up to date within a Transaction.
/// The WorkingCopy is only updated at the end.
pub fn current_commit_id(&self) -> CommitId {
if self.commit_id.borrow().is_none() {
let proto = self.read_proto();
let commit_id = CommitId(proto.commit_id);
self.commit_id.replace(Some(commit_id));
}
self.commit_id.borrow().as_ref().unwrap().clone()
}
/// The commit that's currently checked out in the working copy. Note that
/// the View is the source of truth for which commit *should* be checked
/// out. That should be kept up to date within a Transaction. The
/// WorkingCopy is only updated at the end.
pub fn current_commit(&self) -> Commit {
let commit_id = self.current_commit_id();
let stale = match self.commit.borrow().as_ref() {
None => true,
Some(value) => value.id() != &commit_id,
};
if stale {
self.commit
.replace(Some(self.store.get_commit(&commit_id).unwrap()));
}
self.commit.borrow().as_ref().unwrap().clone()
}
fn tree_state(&self) -> RefMut<Option<TreeState>> {
if self.tree_state.borrow().is_none() {
self.tree_state
.replace(Some(TreeState::load(self.store.clone(), self.path.clone())));
}
self.tree_state.borrow_mut()
}
pub fn current_tree_id(&self) -> TreeId {
self.tree_state()
.as_ref()
.unwrap()
.current_tree_id()
.clone()
}
pub fn file_states(&self) -> BTreeMap<FileRepoPath, FileState> {
self.tree_state().as_ref().unwrap().file_states().clone()
}
fn save(&self) {
let mut proto = protos::working_copy::Checkout::new();
proto.commit_id = self.current_commit_id().0;
self.write_proto(proto);
}
pub fn check_out(
&self,
repo: &ReadonlyRepo,
commit: Commit,
) -> Result<CheckoutStats, CheckoutError> {
assert!(commit.is_open());
let lock_path = self.path.join("working_copy.lock");
let _lock = FileLock::lock(lock_path);
// TODO: Write a "pending_checkout" file with the old and new TreeIds so we can
// continue an interrupted checkout if we find such a file. Write
// access to that file can also serve as lock so only one process
// at once can do a checkout.
// Check if the current checkout has changed on disk after we read it. It's safe
// to check out another commit regardless, but it's probably not what
// the caller wanted, so we let them know.
//
// We could safely add a version of this function without the check if we see a
// need for it.
let current_proto = self.read_proto();
if let Some(commit_id_at_read_time) = self.commit_id.borrow().as_ref() {
if current_proto.commit_id != commit_id_at_read_time.0 {
return Err(CheckoutError::ConcurrentCheckout);
}
}
let stats = self
.tree_state()
.as_mut()
.unwrap()
.check_out(commit.tree().id().clone(), repo.working_copy_path())?;
self.commit_id.replace(Some(commit.id().clone()));
self.commit.replace(Some(commit));
self.save();
// TODO: Clear the "pending_checkout" file here.
Ok(stats)
}
pub fn commit(&self, settings: &UserSettings, repo: &mut ReadonlyRepo) -> Commit {
let lock_path = self.path.join("working_copy.lock");
let _lock = FileLock::lock(lock_path);
// Check if the current checkout has changed on disk after we read it. It's fine
// if it has, but we'll want our new commit to be a successor of the one
// just created in that case, so we need to reset our state to have the new
// commit id.
let current_proto = self.read_proto();
self.commit_id
.replace(Some(CommitId(current_proto.commit_id)));
let current_commit = self.current_commit();
let new_tree_id = self
.tree_state()
.as_mut()
.unwrap()
.write_tree(repo.working_copy_path().clone())
.clone();
if &new_tree_id != current_commit.tree().id() {
let mut tx = repo.start_transaction("commit working copy");
let commit = CommitBuilder::for_rewrite_from(settings, repo.store(), &current_commit)
.set_tree(new_tree_id)
.write_to_transaction(&mut tx);
tx.set_checkout(commit.id().clone());
let operation = tx.commit();
repo.reload_at(&operation);
self.commit_id.replace(Some(commit.id().clone()));
self.commit.replace(Some(commit));
self.save();
}
self.commit.borrow().as_ref().unwrap().clone()
}
}

View file

@ -0,0 +1,179 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::path::PathBuf;
use tempfile::TempDir;
use jj_lib::repo::{ReadonlyRepo, Repo};
use jj_lib::testutils;
use std::sync::Arc;
use test_case::test_case;
fn copy_directory(src: &PathBuf, dst: &PathBuf) {
std::fs::create_dir(dst).ok();
for entry in std::fs::read_dir(src).unwrap() {
let child_src = entry.unwrap().path();
let base_name = child_src.file_name().unwrap();
let child_dst = dst.join(base_name);
if child_src.is_dir() {
copy_directory(&child_src, &child_dst)
} else {
std::fs::copy(&child_src, &child_dst).unwrap();
}
}
}
fn merge_directories(left: &PathBuf, base: &PathBuf, right: &PathBuf, output: &PathBuf) {
std::fs::create_dir(output).ok();
let mut sub_dirs = vec![];
// Walk the left side and copy to the output
for entry in std::fs::read_dir(left).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_left = left.join(base_name);
let child_output = output.join(base_name);
if child_left.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else {
std::fs::copy(&child_left, &child_output).unwrap();
}
}
// Walk the base and find files removed in the right side, then remove them in
// the output
for entry in std::fs::read_dir(base).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_base = base.join(base_name);
let child_right = right.join(base_name);
let child_output = output.join(base_name);
if child_base.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else if !child_right.exists() {
std::fs::remove_file(child_output).ok();
}
}
// Walk the right side and find files added in the right side, then add them in
// the output
for entry in std::fs::read_dir(right).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_base = base.join(base_name);
let child_right = right.join(base_name);
let child_output = output.join(base_name);
if child_right.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else if !child_base.exists() {
// This overwrites the left side if that's been written. That's fine, since the
// point of the test is that it should be okay for either side to win.
std::fs::copy(&child_right, &child_output).unwrap();
}
}
// Do the merge in subdirectories
for base_name in sub_dirs {
let child_base = base.join(&base_name);
let child_right = right.join(&base_name);
let child_left = left.join(&base_name);
let child_output = output.join(&base_name);
merge_directories(&child_left, &child_base, &child_right, &child_output);
}
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_bad_locking_children(use_git: bool) {
// Test that two new commits created on separate machines are both visible (not
// lost due to lack of locking)
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let initial = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![repo.store().root_commit_id().clone()])
.write_to_new_transaction(&repo, "test");
// Simulate a write of a commit that happens on one machine
let machine1_path = TempDir::new().unwrap().into_path();
copy_directory(repo.working_copy_path(), &machine1_path);
let machine1_repo = ReadonlyRepo::load(&settings, machine1_path);
let child1 = testutils::create_random_commit(&settings, &machine1_repo)
.set_parents(vec![initial.id().clone()])
.write_to_new_transaction(&machine1_repo, "test");
// Simulate a write of a commit that happens on another machine
let machine2_path = TempDir::new().unwrap().into_path();
copy_directory(repo.working_copy_path(), &machine2_path);
let machine2_repo = ReadonlyRepo::load(&settings, machine2_path);
let child2 = testutils::create_random_commit(&settings, &machine2_repo)
.set_parents(vec![initial.id().clone()])
.write_to_new_transaction(&machine2_repo, "test");
// Simulate that the distributed file system now has received the changes from
// both machines
let merged_path = TempDir::new().unwrap().into_path();
merge_directories(
machine1_repo.working_copy_path(),
repo.working_copy_path(),
machine2_repo.working_copy_path(),
&merged_path,
);
let merged_repo = ReadonlyRepo::load(&settings, merged_path);
let heads: HashSet<_> = merged_repo.view().heads().cloned().collect();
assert!(heads.contains(child1.id()));
assert!(heads.contains(child2.id()));
let op_head_id = merged_repo.view().base_op_head_id().clone();
let op_head = merged_repo
.view()
.op_store()
.read_operation(&op_head_id)
.unwrap();
assert_eq!(op_head.parents.len(), 2);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_bad_locking_interrupted(use_git: bool) {
// Test that an interrupted update of the op-heads resulting in on op-head
// that's a descendant of the other is resolved without creating a new
// operation.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let initial = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![repo.store().root_commit_id().clone()])
.write_to_new_transaction(&repo, "test");
Arc::get_mut(&mut repo).unwrap().reload();
// Simulate a crash that resulted in the old op-head left in place. We simulate
// it somewhat hackily by copying the view/op_heads/ directory before the
// operation and then copying that back afterwards, leaving the existing
// op-head(s) in place.
let op_heads_dir = repo.repo_path().join("view").join("op_heads");
let backup_path = TempDir::new().unwrap().into_path();
copy_directory(&op_heads_dir, &backup_path);
let mut tx = repo.start_transaction("test");
testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let op_head_id = tx.commit().id().clone();
copy_directory(&backup_path, &op_heads_dir);
// Reload the repo and check that only the new head is present.
let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id);
// Reload once more to make sure that the view/op_heads/ directory was updated
// correctly.
let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id);
}

View file

@ -0,0 +1,135 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::repo_path::FileRepoPath;
use jj_lib::settings::UserSettings;
use jj_lib::testutils;
use jj_lib::tree::DiffSummary;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_initial(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let root_file_path = FileRepoPath::from("file");
let dir_file_path = FileRepoPath::from("dir/file");
let tree = testutils::create_tree(
&repo,
&[
(&root_file_path, "file contents"),
(&dir_file_path, "dir/file contents"),
],
);
let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone())
.set_parents(vec![store.root_commit_id().clone()])
.write_to_new_transaction(&repo, "test");
assert_eq!(commit.parents(), vec![store.root_commit()]);
assert_eq!(commit.predecessors(), vec![]);
assert_eq!(commit.is_open(), false);
assert_eq!(commit.description(), "");
assert_eq!(commit.author().name, settings.user_name());
assert_eq!(commit.author().email, settings.user_email());
assert_eq!(commit.committer().name, settings.user_name());
assert_eq!(commit.committer().email, settings.user_email());
assert_eq!(
store.root_commit().tree().diff_summary(&commit.tree()),
DiffSummary {
modified: vec![],
added: vec![root_file_path, dir_file_path],
removed: vec![]
}
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_rewrite(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let root_file_path = FileRepoPath::from("file");
let dir_file_path = FileRepoPath::from("dir/file");
let initial_tree = testutils::create_tree(
&repo,
&[
(&root_file_path, "file contents"),
(&dir_file_path, "dir/file contents"),
],
);
let initial_commit = CommitBuilder::for_new_commit(&settings, store, initial_tree.id().clone())
.set_parents(vec![store.root_commit_id().clone()])
.write_to_new_transaction(&repo, "test");
let rewritten_tree = testutils::create_tree(
&repo,
&[
(&root_file_path, "file contents"),
(&dir_file_path, "updated dir/file contents"),
],
);
let mut config = config::Config::new();
config.set("user.name", "Rewrite User").unwrap();
config
.set("user.email", "rewrite.user@example.com")
.unwrap();
let rewrite_settings = UserSettings::from_config(config);
let rewritten_commit =
CommitBuilder::for_rewrite_from(&rewrite_settings, store, &initial_commit)
.set_tree(rewritten_tree.id().clone())
.write_to_new_transaction(&repo, "test");
assert_eq!(rewritten_commit.parents(), vec![store.root_commit()]);
assert_eq!(
rewritten_commit.predecessors(),
vec![initial_commit.clone()]
);
assert_eq!(rewritten_commit.is_open(), false);
assert_eq!(rewritten_commit.author().name, settings.user_name());
assert_eq!(rewritten_commit.author().email, settings.user_email());
assert_eq!(
rewritten_commit.committer().name,
rewrite_settings.user_name()
);
assert_eq!(
rewritten_commit.committer().email,
rewrite_settings.user_email()
);
assert_eq!(
store
.root_commit()
.tree()
.diff_summary(&rewritten_commit.tree()),
DiffSummary {
modified: vec![],
added: vec![root_file_path, dir_file_path.clone()],
removed: vec![]
}
);
assert_eq!(
initial_commit.tree().diff_summary(&rewritten_commit.tree()),
DiffSummary {
modified: vec![dir_file_path],
added: vec![],
removed: vec![]
}
);
}

View file

@ -0,0 +1,101 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::thread;
use jj_lib::dag_walk;
use jj_lib::repo::{ReadonlyRepo, Repo};
use jj_lib::testutils;
use std::sync::Arc;
use test_case::test_case;
fn verify_view(repo: &ReadonlyRepo) {
let view = repo.view();
let op_store = view.op_store();
let op_head_id = view.base_op_head_id().clone();
let mut num_ops = 0;
// Count non-merge commits
for op_id in dag_walk::bfs(
vec![op_head_id],
Box::new(|op_id| op_id.clone()),
Box::new(|op_id| op_store.read_operation(&op_id).unwrap().parents),
) {
if op_store.read_operation(&op_id).unwrap().parents.len() <= 1 {
num_ops += 1;
}
}
// One operation for initializing the repo (containing the root id and the
// initial working copy commit).
assert_eq!(num_ops, 101);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_commit_parallel(use_git: bool) {
// This loads a Repo instance and creates and commits many concurrent
// transactions from it. It then reloads the repo. That should merge all the
// operations and all commits should be visible.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut threads = vec![];
for _ in 0..100 {
let settings = settings.clone();
let repo = repo.clone();
let handle = thread::spawn(move || {
testutils::create_random_commit(&settings, &repo)
.write_to_new_transaction(&repo, "test");
});
threads.push(handle);
}
for thread in threads {
thread.join().ok().unwrap();
}
Arc::get_mut(&mut repo).unwrap().reload();
// One commit per thread plus the commit from the initial checkout on top of the
// root commit
assert_eq!(repo.view().heads().count(), 101);
verify_view(&repo);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_commit_parallel_instances(use_git: bool) {
// Like the test above but creates a new repo instance for every thread, which
// makes it behave very similar to separate processes.
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let mut threads = vec![];
for _ in 0..100 {
let settings = settings.clone();
let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
let handle = thread::spawn(move || {
testutils::create_random_commit(&settings, &repo)
.write_to_new_transaction(&repo, "test");
});
threads.push(handle);
}
for thread in threads {
thread.join().ok().unwrap();
}
// One commit per thread plus the commit from the initial checkout on top of the
// root commit
let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
assert_eq!(repo.view().heads().count(), 101);
verify_view(&repo);
}

View file

@ -0,0 +1,152 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::repo_path::FileRepoPath;
use jj_lib::testutils;
use jj_lib::tree::DiffSummary;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_types(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let clean_path = FileRepoPath::from("clean");
let modified_path = FileRepoPath::from("modified");
let added_path = FileRepoPath::from("added");
let removed_path = FileRepoPath::from("removed");
let tree1 = testutils::create_tree(
&repo,
&[
(&clean_path, "clean"),
(&modified_path, "contents before"),
(&removed_path, "removed contents"),
],
);
let tree2 = testutils::create_tree(
&repo,
&[
(&clean_path, "clean"),
(&modified_path, "contents after"),
(&added_path, "added contents"),
],
);
assert_eq!(
tree1.diff_summary(&tree2),
DiffSummary {
modified: vec![modified_path],
added: vec![added_path],
removed: vec![removed_path]
}
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_tree_file_transition(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let dir_file_path = FileRepoPath::from("dir/file");
let dir_path = FileRepoPath::from("dir");
let tree1 = testutils::create_tree(&repo, &[(&dir_file_path, "contents")]);
let tree2 = testutils::create_tree(&repo, &[(&dir_path, "contents")]);
assert_eq!(
tree1.diff_summary(&tree2),
DiffSummary {
modified: vec![],
added: vec![dir_path.clone()],
removed: vec![dir_file_path.clone()]
}
);
assert_eq!(
tree2.diff_summary(&tree1),
DiffSummary {
modified: vec![],
added: vec![dir_file_path],
removed: vec![dir_path]
}
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_sorting(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let a_path = FileRepoPath::from("a");
let b_path = FileRepoPath::from("b");
let f_a_path = FileRepoPath::from("f/a");
let f_b_path = FileRepoPath::from("f/b");
let f_f_a_path = FileRepoPath::from("f/f/a");
let f_f_b_path = FileRepoPath::from("f/f/b");
let n_path = FileRepoPath::from("n");
let s_b_path = FileRepoPath::from("s/b");
let z_path = FileRepoPath::from("z");
let tree1 = testutils::create_tree(
&repo,
&[
(&a_path, "before"),
(&f_a_path, "before"),
(&f_f_a_path, "before"),
],
);
let tree2 = testutils::create_tree(
&repo,
&[
(&a_path, "after"),
(&b_path, "after"),
(&f_a_path, "after"),
(&f_b_path, "after"),
(&f_f_a_path, "after"),
(&f_f_b_path, "after"),
(&n_path, "after"),
(&s_b_path, "after"),
(&z_path, "after"),
],
);
assert_eq!(
tree1.diff_summary(&tree2),
DiffSummary {
modified: vec![a_path.clone(), f_a_path.clone(), f_f_a_path.clone()],
added: vec![
b_path.clone(),
n_path.clone(),
z_path.clone(),
f_b_path.clone(),
f_f_b_path.clone(),
s_b_path.clone(),
],
removed: vec![]
}
);
assert_eq!(
tree2.diff_summary(&tree1),
DiffSummary {
modified: vec![a_path, f_a_path, f_f_a_path],
added: vec![],
removed: vec![b_path, n_path, z_path, f_b_path, f_f_b_path, s_b_path,]
}
);
}

630
lib/tests/test_evolution.rs Normal file
View file

@ -0,0 +1,630 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::commit::Commit;
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::evolution::evolve;
use jj_lib::evolution::EvolveListener;
use jj_lib::repo::{ReadonlyRepo, Repo};
use jj_lib::repo_path::FileRepoPath;
use jj_lib::settings::UserSettings;
use jj_lib::testutils;
use test_case::test_case;
#[must_use]
fn child_commit(settings: &UserSettings, repo: &ReadonlyRepo, commit: &Commit) -> CommitBuilder {
testutils::create_random_commit(&settings, repo).set_parents(vec![commit.id().clone()])
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_obsolete_and_orphan(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// A commit without successors should not be obsolete and not an orphan.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
assert!(!tx.as_repo().evolution().is_obsolete(original.id()));
assert!(!tx.as_repo().evolution().is_orphan(original.id()));
// A commit with a successor with a different change_id should not be obsolete.
let child = child_commit(&settings, &repo, &original).write_to_transaction(&mut tx);
let grandchild = child_commit(&settings, &repo, &child).write_to_transaction(&mut tx);
let cherry_picked = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert!(!tx.as_repo().evolution().is_obsolete(original.id()));
assert!(!tx.as_repo().evolution().is_orphan(original.id()));
assert!(!tx.as_repo().evolution().is_obsolete(child.id()));
assert!(!tx.as_repo().evolution().is_orphan(child.id()));
// A commit with a successor with the same change_id should be obsolete.
let rewritten = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
assert!(tx.as_repo().evolution().is_obsolete(original.id()));
assert!(!tx.as_repo().evolution().is_obsolete(child.id()));
assert!(tx.as_repo().evolution().is_orphan(child.id()));
assert!(tx.as_repo().evolution().is_orphan(grandchild.id()));
assert!(!tx.as_repo().evolution().is_obsolete(cherry_picked.id()));
assert!(!tx.as_repo().evolution().is_orphan(cherry_picked.id()));
assert!(!tx.as_repo().evolution().is_obsolete(rewritten.id()));
assert!(!tx.as_repo().evolution().is_orphan(rewritten.id()));
// It should no longer be obsolete if we remove the successor.
tx.remove_head(&rewritten);
assert!(!tx.as_repo().evolution().is_obsolete(original.id()));
assert!(!tx.as_repo().evolution().is_orphan(child.id()));
assert!(!tx.as_repo().evolution().is_orphan(grandchild.id()));
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_divergent(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// A single commit should not be divergent
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
assert!(!tx.as_repo().evolution().is_obsolete(original.id()));
// Successors with different change id are not divergent
let cherry_picked1 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let cherry_picked2 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert!(!tx.as_repo().evolution().is_divergent(original.change_id()));
assert!(!tx
.as_repo()
.evolution()
.is_divergent(cherry_picked1.change_id()));
assert!(!tx
.as_repo()
.evolution()
.is_divergent(cherry_picked2.change_id()));
// Commits with the same change id are divergent, including the original commit
// (it's the change that's is divergent)
let rewritten1 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
assert!(tx.as_repo().evolution().is_divergent(original.change_id()));
assert!(tx
.as_repo()
.evolution()
.is_divergent(rewritten1.change_id()));
assert!(tx
.as_repo()
.evolution()
.is_divergent(rewritten2.change_id()));
tx.discard();
}
// TODO: Create a #[repo_test] proc macro that injects the `settings` and `repo`
// variables into the test function
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_rewritten(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// After a simple rewrite, the new parent is the successor.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![rewritten.id().clone()].into_iter().collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_cherry_picked(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// A successor with a different change id has no effect.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _cherry_picked = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![original.id().clone()].into_iter().collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_is_pruned(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit's successor is pruned, the new parent is the parent of the
// pruned commit.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten = child_commit(&settings, &repo, &new_parent)
.set_pruned(true)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![new_parent.id().clone()].into_iter().collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_divergent(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit has multiple successors, then they will all be returned.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten3 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![
rewritten1.id().clone(),
rewritten2.id().clone(),
rewritten3.id().clone()
]
.into_iter()
.collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_divergent_one_not_pruned(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit has multiple successors, then they will all be returned, even if
// all but one are pruned (the parents of the pruned commits, not the pruned
// commits themselves).
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &root_commit)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let parent2 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten2 = child_commit(&settings, &repo, &parent2)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.set_pruned(true)
.write_to_transaction(&mut tx);
let parent3 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten3 = child_commit(&settings, &repo, &parent3)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.set_pruned(true)
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![
rewritten1.id().clone(),
parent2.id().clone(),
parent3.id().clone()
]
.into_iter()
.collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_divergent_all_pruned(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit has multiple successors, then they will all be returned, even if
// they are all pruned (the parents of the pruned commits, not the pruned
// commits themselves).
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let parent1 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten1 = child_commit(&settings, &repo, &parent1)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.set_pruned(true)
.write_to_transaction(&mut tx);
let parent2 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten2 = child_commit(&settings, &repo, &parent2)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.set_pruned(true)
.write_to_transaction(&mut tx);
let parent3 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let _rewritten3 = child_commit(&settings, &repo, &parent3)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.set_pruned(true)
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![
parent1.id().clone(),
parent2.id().clone(),
parent3.id().clone()
]
.into_iter()
.collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_split(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit was split, the new parent is the tip-most rewritten
// commit. Here we let the middle commit inherit the change id, but it shouldn't
// matter which one inherits it.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &new_parent)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &rewritten1)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten3 = child_commit(&settings, &repo, &rewritten2)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![rewritten3.id().clone()].into_iter().collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_split_pruned_descendant(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit was split and the tip-most successor became pruned,
// we use that that descendant's parent.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &new_parent)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &rewritten1)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let rewritten3 = child_commit(&settings, &repo, &rewritten2)
.set_pruned(true)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let _rewritten4 = child_commit(&settings, &repo, &rewritten3)
.set_pruned(true)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![rewritten2.id().clone()].into_iter().collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_split_forked(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit was split and the successors were split up across topological
// branches, we return only the descendants from the branch with the same
// change id (we can't tell a split from two unrelated rewrites and cherry-picks
// anyway).
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &new_parent)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &rewritten1)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let rewritten3 = child_commit(&settings, &repo, &rewritten1)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let _rewritten4 = child_commit(&settings, &repo, &original)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![rewritten2.id().clone(), rewritten3.id().clone()]
.into_iter()
.collect()
);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_new_parent_split_forked_pruned(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
// If a commit was split and the successors were split up across topological
// branches and some commits were pruned, we won't return a parent of the pruned
// commit if the parent is an ancestor of another commit we'd return.
let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let rewritten1 = child_commit(&settings, &repo, &new_parent)
.set_predecessors(vec![original.id().clone()])
.set_change_id(original.change_id().clone())
.write_to_transaction(&mut tx);
let rewritten2 = child_commit(&settings, &repo, &rewritten1)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let rewritten3 = child_commit(&settings, &repo, &rewritten2)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
let _rewritten4 = child_commit(&settings, &repo, &rewritten1)
.set_pruned(true)
.set_predecessors(vec![original.id().clone()])
.write_to_transaction(&mut tx);
assert_eq!(
tx.as_repo().evolution().new_parent(original.id()),
vec![rewritten3.id().clone()].into_iter().collect()
);
tx.discard();
}
struct RecordingEvolveListener {
evolved_orphans: Vec<(Commit, Commit)>,
evolved_divergents: Vec<(Vec<Commit>, Commit)>,
}
impl Default for RecordingEvolveListener {
fn default() -> Self {
RecordingEvolveListener {
evolved_orphans: Default::default(),
evolved_divergents: Default::default(),
}
}
}
impl EvolveListener for RecordingEvolveListener {
fn orphan_evolved(&mut self, orphan: &Commit, new_commit: &Commit) {
self.evolved_orphans
.push((orphan.clone(), new_commit.clone()));
}
fn orphan_target_ambiguous(&mut self, _orphan: &Commit) {
// TODO: Record this too and add tests
panic!("unexpected call to orphan_target_ambiguous");
}
fn divergent_resolved(&mut self, sources: &[Commit], resolved: &Commit) {
self.evolved_divergents
.push((sources.iter().cloned().collect(), resolved.clone()));
}
fn divergent_no_common_predecessor(&mut self, _commit1: &Commit, _commit2: &Commit) {
// TODO: Record this too and add tests
panic!("unexpected call to divergent_no_common_predecessor");
}
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_evolve_orphan(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
let initial = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let child = child_commit(&settings, &repo, &initial).write_to_transaction(&mut tx);
let grandchild = child_commit(&settings, &repo, &child).write_to_transaction(&mut tx);
let rewritten = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial)
.set_description("rewritten".to_string())
.write_to_transaction(&mut tx);
let mut listener = RecordingEvolveListener::default();
evolve(&settings, &mut tx, &mut listener);
assert_eq!(listener.evolved_divergents.len(), 0);
assert_eq!(listener.evolved_orphans.len(), 2);
assert_eq!(&listener.evolved_orphans[0].0, &child);
assert_eq!(&listener.evolved_orphans[0].1.parents(), &vec![rewritten]);
assert_eq!(&listener.evolved_orphans[1].0, &grandchild);
// TODO: the grandchild currently doesn't get rebased onto the rewritten child
// assert_eq!(
// &listener.evolved_orphans[1].1.parents(),
// &vec![listener.evolved_orphans[0].1.clone()]
// );
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_evolve_pruned_orphan(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
let initial = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
// Create a pruned child and a non-pruned child to show that the pruned one does
// not get evolved (the non-pruned one is there to show that the setup is not
// broken).
let child = child_commit(&settings, &repo, &initial).write_to_transaction(&mut tx);
let _pruned_child = child_commit(&settings, &repo, &initial)
.set_pruned(true)
.write_to_transaction(&mut tx);
let _rewritten = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial)
.set_description("rewritten".to_string())
.write_to_transaction(&mut tx);
let mut listener = RecordingEvolveListener::default();
evolve(&settings, &mut tx, &mut listener);
assert_eq!(listener.evolved_divergents.len(), 0);
assert_eq!(listener.evolved_orphans.len(), 1);
assert_eq!(listener.evolved_orphans[0].0.id(), child.id());
tx.discard();
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_evolve_divergent(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let root_commit = store.root_commit();
let mut tx = repo.start_transaction("test");
// Set up a repo like this:
//
// x 6 add files X and Z (divergent commit 2)
// o 5 add file A, contents C
// | x 4 add files X and Y (divergent commit 1)
// | o 3 add file A, contents B
// |/
// | x 2 add file X (source of divergence)
// | o 1 add file A, contents A
// |/
// o root
//
// Resolving the divergence should result in a new commit on top of 5 (because
// commit 6 has a later commit time than commit 4). It should have files C,
// X, Y, Z.
let path_a = FileRepoPath::from("A");
let path_x = FileRepoPath::from("X");
let path_y = FileRepoPath::from("Y");
let path_z = FileRepoPath::from("Z");
let tree1 = testutils::create_tree(&repo, &[(&path_a, "A")]);
let tree2 = testutils::create_tree(&repo, &[(&path_a, "A"), (&path_x, "X")]);
let tree3 = testutils::create_tree(&repo, &[(&path_a, "B")]);
let tree4 = testutils::create_tree(&repo, &[(&path_a, "B"), (&path_x, "X"), (&path_y, "Y")]);
let tree5 = testutils::create_tree(&repo, &[(&path_a, "C")]);
let tree6 = testutils::create_tree(&repo, &[(&path_a, "C"), (&path_x, "X"), (&path_z, "Z")]);
let commit1 = CommitBuilder::for_new_commit(&settings, repo.store(), tree1.id().clone())
.set_parents(vec![root_commit.id().clone()])
.set_description("add file A, contents A".to_string())
.write_to_transaction(&mut tx);
let commit3 = CommitBuilder::for_new_commit(&settings, repo.store(), tree3.id().clone())
.set_parents(vec![root_commit.id().clone()])
.set_description("add file A, contents B".to_string())
.write_to_transaction(&mut tx);
let commit5 = CommitBuilder::for_new_commit(&settings, repo.store(), tree5.id().clone())
.set_parents(vec![root_commit.id().clone()])
.set_description("add file A, contents C".to_string())
.write_to_transaction(&mut tx);
let commit2 = CommitBuilder::for_new_commit(&settings, repo.store(), tree2.id().clone())
.set_parents(vec![commit1.id().clone()])
.set_description("add file X".to_string())
.write_to_transaction(&mut tx);
let commit4 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &commit2)
.set_parents(vec![commit3.id().clone()])
.set_tree(tree4.id().clone())
.set_description("add files X and Y".to_string())
.write_to_transaction(&mut tx);
let mut later_time = commit4.committer().clone();
later_time.timestamp.timestamp.0 += 1;
let commit6 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &commit2)
.set_parents(vec![commit5.id().clone()])
.set_tree(tree6.id().clone())
.set_description("add files X and Z".to_string())
.set_committer(later_time)
.write_to_transaction(&mut tx);
let mut listener = RecordingEvolveListener::default();
evolve(&settings, &mut tx, &mut listener);
assert_eq!(listener.evolved_orphans.len(), 0);
assert_eq!(listener.evolved_divergents.len(), 1);
assert_eq!(
listener.evolved_divergents[0].0,
&[commit6.clone(), commit4.clone()]
);
let resolved = listener.evolved_divergents[0].1.clone();
assert_eq!(resolved.predecessors(), &[commit6.clone(), commit4.clone()]);
let tree = resolved.tree();
let entries: Vec<_> = tree.entries().collect();
assert_eq!(entries.len(), 4);
assert_eq!(tree.value("A").unwrap(), tree5.value("A").unwrap());
assert_eq!(tree.value("X").unwrap(), tree2.value("X").unwrap());
assert_eq!(tree.value("Y").unwrap(), tree4.value("Y").unwrap());
assert_eq!(tree.value("Z").unwrap(), tree6.value("Z").unwrap());
tx.discard();
}

390
lib/tests/test_index.rs Normal file
View file

@ -0,0 +1,390 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::commit::Commit;
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::index::CompositeIndex;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::settings::UserSettings;
use jj_lib::store::CommitId;
use jj_lib::testutils;
use std::sync::Arc;
use test_case::test_case;
#[must_use]
fn child_commit(settings: &UserSettings, repo: &ReadonlyRepo, commit: &Commit) -> CommitBuilder {
testutils::create_random_commit(&settings, repo).set_parents(vec![commit.id().clone()])
}
// Helper just to reduce line wrapping
fn generation_number(index: &CompositeIndex, commit_id: &CommitId) -> u32 {
index.entry_by_id(commit_id).unwrap().generation_number()
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_empty_repo(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit
assert_eq!(index.num_commits(), 2);
// Check the generation numbers of the root and the working copy
assert_eq!(generation_number(&index, repo.store().root_commit_id()), 0);
assert_eq!(
generation_number(&index, &repo.working_copy_locked().current_commit_id()),
1
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_standard_cases(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
// o H
// o | G
// o | F
// |\|
// | o E
// | o D
// | o C
// o | B
// |/
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let wc_commit = repo.working_copy_locked().current_commit();
let mut tx = repo.start_transaction("test");
let commit_a = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx);
let commit_c = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx);
let commit_d = child_commit(&settings, &repo, &commit_c).write_to_transaction(&mut tx);
let commit_e = child_commit(&settings, &repo, &commit_d).write_to_transaction(&mut tx);
let commit_f = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![commit_b.id().clone(), commit_e.id().clone()])
.write_to_transaction(&mut tx);
let commit_g = child_commit(&settings, &repo, &commit_f).write_to_transaction(&mut tx);
let commit_h = child_commit(&settings, &repo, &commit_e).write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 8 more
assert_eq!(index.num_commits(), 2 + 8);
let stats = index.stats();
assert_eq!(stats.num_commits, 2 + 8);
assert_eq!(stats.num_merges, 1);
assert_eq!(stats.max_generation_number, 6);
assert_eq!(generation_number(&index, root_commit.id()), 0);
assert_eq!(generation_number(&index, wc_commit.id()), 1);
assert_eq!(generation_number(&index, commit_a.id()), 1);
assert_eq!(generation_number(&index, commit_b.id()), 2);
assert_eq!(generation_number(&index, commit_c.id()), 2);
assert_eq!(generation_number(&index, commit_d.id()), 3);
assert_eq!(generation_number(&index, commit_e.id()), 4);
assert_eq!(generation_number(&index, commit_f.id()), 5);
assert_eq!(generation_number(&index, commit_g.id()), 6);
assert_eq!(generation_number(&index, commit_h.id()), 5);
assert!(index.is_ancestor(root_commit.id(), commit_a.id()));
assert!(!index.is_ancestor(commit_a.id(), root_commit.id()));
assert!(index.is_ancestor(root_commit.id(), commit_b.id()));
assert!(!index.is_ancestor(commit_b.id(), root_commit.id()));
assert!(!index.is_ancestor(commit_b.id(), commit_c.id()));
assert!(index.is_ancestor(commit_a.id(), commit_b.id()));
assert!(index.is_ancestor(commit_a.id(), commit_e.id()));
assert!(index.is_ancestor(commit_a.id(), commit_f.id()));
assert!(index.is_ancestor(commit_a.id(), commit_g.id()));
assert!(index.is_ancestor(commit_a.id(), commit_h.id()));
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_criss_cross(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let num_generations = 50;
let root_commit = repo.store().root_commit();
// Create a long chain of criss-crossed merges. If they were traversed without
// keeping track of visited nodes, it would be 2^50 visits, so if this test
// finishes in reasonable time, we know that we don't do a naive traversal.
let mut tx = repo.start_transaction("test");
let mut left_commits =
vec![child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx)];
let mut right_commits =
vec![child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx)];
for gen in 1..num_generations {
let new_left = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![
left_commits[gen - 1].id().clone(),
right_commits[gen - 1].id().clone(),
])
.write_to_transaction(&mut tx);
let new_right = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![
left_commits[gen - 1].id().clone(),
right_commits[gen - 1].id().clone(),
])
.write_to_transaction(&mut tx);
left_commits.push(new_left);
right_commits.push(new_right);
}
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let index = repo.index().index_file();
let index = index.as_composite();
// There should the root commit and the working copy commit, plus 2 for each
// generation
assert_eq!(index.num_commits(), 2 + 2 * (num_generations as u32));
let stats = index.stats();
assert_eq!(stats.num_commits, 2 + 2 * (num_generations as u32));
// The first generations are not merges
assert_eq!(stats.num_merges, 2 * (num_generations as u32 - 1));
assert_eq!(stats.max_generation_number, num_generations as u32);
// Check generation numbers
for gen in 0..num_generations {
assert_eq!(
generation_number(&index, left_commits[gen].id()),
(gen as u32) + 1
);
assert_eq!(
generation_number(&index, right_commits[gen].id()),
(gen as u32) + 1
);
}
// The left and right commits of the same generation should not be ancestors of
// each other
for gen in 0..num_generations {
assert!(!index.is_ancestor(left_commits[gen].id(), right_commits[gen].id()));
assert!(!index.is_ancestor(right_commits[gen].id(), left_commits[gen].id()));
}
// Both sides of earlier generations should be ancestors. Check a few different
// earlier generations.
for gen in 1..num_generations {
for ancestor_side in &[&left_commits, &right_commits] {
for descendant_side in &[&left_commits, &right_commits] {
assert!(index.is_ancestor(ancestor_side[0].id(), descendant_side[gen].id()));
assert!(index.is_ancestor(ancestor_side[gen - 1].id(), descendant_side[gen].id()));
assert!(index.is_ancestor(ancestor_side[gen / 2].id(), descendant_side[gen].id()));
}
}
}
assert_eq!(
index
.walk_revs(&[left_commits[num_generations - 1].id().clone()], &[])
.count(),
2 * num_generations
);
assert_eq!(
index
.walk_revs(&[right_commits[num_generations - 1].id().clone()], &[])
.count(),
2 * num_generations
);
assert_eq!(
index
.walk_revs(
&[left_commits[num_generations - 1].id().clone()],
&[left_commits[num_generations - 2].id().clone()]
)
.count(),
2
);
assert_eq!(
index
.walk_revs(
&[right_commits[num_generations - 1].id().clone()],
&[right_commits[num_generations - 2].id().clone()]
)
.count(),
2
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_previous_operations(use_git: bool) {
// Test that commits visible only in previous operations are indexed.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
// Remove commit B and C in one operation and make sure they're still
// visible in the index after that operation.
// o C
// o B
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction("test");
let commit_a = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx);
let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx);
let commit_c = child_commit(&settings, &repo, &commit_b).write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
tx.remove_head(&commit_c);
tx.remove_head(&commit_b);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
// Delete index from disk
let index_operations_dir = repo
.working_copy_path()
.join(".jj")
.join("index")
.join("operations");
assert!(index_operations_dir.is_dir());
std::fs::remove_dir_all(&index_operations_dir).unwrap();
std::fs::create_dir(&index_operations_dir).unwrap();
let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 3 more
assert_eq!(index.num_commits(), 2 + 3);
let stats = index.stats();
assert_eq!(stats.num_commits, 2 + 3);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 3);
assert_eq!(generation_number(&index, commit_a.id()), 1);
assert_eq!(generation_number(&index, commit_b.id()), 2);
assert_eq!(generation_number(&index, commit_c.id()), 3);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_incremental(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
// Create A in one operation, then B and C in another. Check that the index is
// valid after.
// o C
// o B
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let commit_a =
child_commit(&settings, &repo, &root_commit).write_to_new_transaction(&repo, "test");
Arc::get_mut(&mut repo).unwrap().reload();
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 1 more
assert_eq!(index.num_commits(), 2 + 1);
let mut tx = repo.start_transaction("test");
let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx);
let commit_c = child_commit(&settings, &repo, &commit_b).write_to_transaction(&mut tx);
tx.commit();
let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 3 more
assert_eq!(index.num_commits(), 2 + 3);
let stats = index.stats();
assert_eq!(stats.num_commits, 2 + 3);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 3);
assert_eq!(stats.levels.len(), 2);
assert_eq!(stats.levels[0].num_commits, 2);
assert_eq!(stats.levels[1].num_commits, 3);
assert_eq!(generation_number(&index, root_commit.id()), 0);
assert_eq!(generation_number(&index, commit_a.id()), 1);
assert_eq!(generation_number(&index, commit_b.id()), 2);
assert_eq!(generation_number(&index, commit_c.id()), 3);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_index_commits_incremental_empty_transaction(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
// Create A in one operation, then just an empty transaction. Check that the
// index is valid after.
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let commit_a =
child_commit(&settings, &repo, &root_commit).write_to_new_transaction(&repo, "test");
Arc::get_mut(&mut repo).unwrap().reload();
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 1 more
assert_eq!(index.num_commits(), 2 + 1);
repo.start_transaction("test").commit();
let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone());
let index = repo.index().index_file();
let index = index.as_composite();
// There should be the root commit and the working copy commit, plus
// 1 more
assert_eq!(index.num_commits(), 2 + 1);
let stats = index.stats();
assert_eq!(stats.num_commits, 2 + 1);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 1);
assert_eq!(stats.levels.len(), 2);
assert_eq!(stats.levels[0].num_commits, 0);
assert_eq!(stats.levels[1].num_commits, 3);
assert_eq!(generation_number(&index, root_commit.id()), 0);
assert_eq!(generation_number(&index, commit_a.id()), 1);
}

View file

@ -0,0 +1,473 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::repo_path::{DirRepoPath, FileRepoPath, RepoPath};
use jj_lib::store::{ConflictPart, TreeValue};
use jj_lib::testutils;
use jj_lib::tree::Tree;
use jj_lib::trees;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_same_type(use_git: bool) {
// Tests all possible cases where the entry type is unchanged, specifically
// using only normal files in all trees (no symlinks, no trees, etc.).
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
// The file name encodes the state in the base and in each side ("_" means
// missing)
let files = vec![
"__a", // side 2 added
"_a_", // side 1 added
"_aa", // both sides added, same content
"_ab", // both sides added, different content
"a__", // both sides removed
"a_a", // side 1 removed
"a_b", // side 1 removed, side 2 modified
"aa_", // side 2 removed
"aaa", // no changes
"aab", // side 2 modified
"ab_", // side 1 modified, side 2 removed
"aba", // side 1 modified
"abb", // both sides modified, same content
"abc", // both sides modified, different content
];
let write_tree = |index: usize| -> Tree {
let mut tree_builder = store.tree_builder(store.empty_tree_id().clone());
for path in &files {
let contents = &path[index..index + 1];
if contents != "_" {
testutils::write_normal_file(
&mut tree_builder,
&FileRepoPath::from(*path),
contents,
);
}
}
let tree_id = tree_builder.write_tree();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
let base_tree = write_tree(0);
let side1_tree = write_tree(1);
let side2_tree = write_tree(2);
// Created the merged tree
let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap();
let merged_tree = store
.get_tree(&DirRepoPath::root(), &merged_tree_id)
.unwrap();
// Check that we have exactly the paths we expect in the merged tree
let names: Vec<&str> = merged_tree
.entries()
.map(|entry| entry.name().as_ref())
.collect();
assert_eq!(
names,
vec!["__a", "_a_", "_aa", "_ab", "a_b", "aaa", "aab", "ab_", "aba", "abb", "abc",]
);
// Check that the simple, non-conflicting cases were resolved correctly
assert_eq!(merged_tree.value("__a"), side2_tree.value("__a"));
assert_eq!(merged_tree.value("_a_"), side1_tree.value("_a_"));
assert_eq!(merged_tree.value("_aa"), side1_tree.value("_aa"));
assert_eq!(merged_tree.value("aaa"), side1_tree.value("aaa"));
assert_eq!(merged_tree.value("aab"), side2_tree.value("aab"));
assert_eq!(merged_tree.value("aba"), side1_tree.value("aba"));
assert_eq!(merged_tree.value("abb"), side1_tree.value("abb"));
// Check the conflicting cases
match merged_tree.value("_ab").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: side1_tree.value("_ab").cloned().unwrap()
},
ConflictPart {
value: side2_tree.value("_ab").cloned().unwrap()
}
]
);
assert!(conflict.removes.is_empty());
}
_ => panic!("unexpected value"),
};
match merged_tree.value("a_b").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("a_b").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![ConflictPart {
value: side2_tree.value("a_b").cloned().unwrap()
}]
);
}
_ => panic!("unexpected value"),
};
match merged_tree.value("ab_").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("ab_").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![ConflictPart {
value: side1_tree.value("ab_").cloned().unwrap()
}]
);
}
_ => panic!("unexpected value"),
};
match merged_tree.value("abc").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("abc").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: side1_tree.value("abc").cloned().unwrap()
},
ConflictPart {
value: side2_tree.value("abc").cloned().unwrap()
}
]
);
}
_ => panic!("unexpected value"),
};
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_subtrees(use_git: bool) {
// Tests that subtrees are merged.
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let write_tree = |paths: Vec<&str>| -> Tree {
let mut tree_builder = store.tree_builder(store.empty_tree_id().clone());
for path in paths {
testutils::write_normal_file(
&mut tree_builder,
&FileRepoPath::from(path),
&format!("contents of {:?}", path),
);
}
let tree_id = tree_builder.write_tree();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
let base_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/f1", "d1/d1/d1/f1"]);
let side1_tree = write_tree(vec![
"f1",
"f2",
"d1/f1",
"d1/f2",
"d1/d1/f1",
"d1/d1/d1/f1",
]);
let side2_tree = write_tree(vec![
"f1",
"d1/f1",
"d1/d1/f1",
"d1/d1/d1/f1",
"d1/d1/d1/f2",
]);
let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap();
let merged_tree = store
.get_tree(&DirRepoPath::root(), &merged_tree_id)
.unwrap();
let entries: Vec<_> = merged_tree.entries().collect();
let expected_tree = write_tree(vec![
"f1",
"f2",
"d1/f1",
"d1/f2",
"d1/d1/f1",
"d1/d1/d1/f1",
"d1/d1/d1/f2",
]);
let expected_entries: Vec<_> = expected_tree.entries().collect();
assert_eq!(entries, expected_entries);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_subtree_becomes_empty(use_git: bool) {
// Tests that subtrees that become empty are removed from the parent tree.
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let write_tree = |paths: Vec<&str>| -> Tree {
let mut tree_builder = store.tree_builder(store.empty_tree_id().clone());
for path in paths {
testutils::write_normal_file(
&mut tree_builder,
&FileRepoPath::from(path),
&format!("contents of {:?}", path),
);
}
let tree_id = tree_builder.write_tree();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
let base_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/d1/f1", "d1/d1/d1/f2"]);
let side1_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/d1/f1"]);
let side2_tree = write_tree(vec!["d1/d1/d1/f2"]);
let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap();
let merged_tree = store
.get_tree(&DirRepoPath::root(), &merged_tree_id)
.unwrap();
assert_eq!(merged_tree.id(), store.empty_tree_id());
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_types(use_git: bool) {
// Tests conflicts between different types. This is mostly to test that the
// conflicts survive the roundtrip to the store.
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let mut base_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut side1_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut side2_tree_builder = store.tree_builder(store.empty_tree_id().clone());
testutils::write_normal_file(
&mut base_tree_builder,
&FileRepoPath::from("normal_executable_symlink"),
"contents",
);
testutils::write_executable_file(
&mut side1_tree_builder,
&FileRepoPath::from("normal_executable_symlink"),
"contents",
);
testutils::write_symlink(
&mut side2_tree_builder,
&FileRepoPath::from("normal_executable_symlink"),
"contents",
);
let tree_id = store.empty_tree_id().clone();
base_tree_builder.set(
RepoPath::from("tree_normal_symlink"),
TreeValue::Tree(tree_id),
);
testutils::write_normal_file(
&mut side1_tree_builder,
&FileRepoPath::from("tree_normal_symlink"),
"contents",
);
testutils::write_symlink(
&mut side2_tree_builder,
&FileRepoPath::from("tree_normal_symlink"),
"contents",
);
let base_tree_id = base_tree_builder.write_tree();
let base_tree = store.get_tree(&DirRepoPath::root(), &base_tree_id).unwrap();
let side1_tree_id = side1_tree_builder.write_tree();
let side1_tree = store
.get_tree(&DirRepoPath::root(), &side1_tree_id)
.unwrap();
let side2_tree_id = side2_tree_builder.write_tree();
let side2_tree = store
.get_tree(&DirRepoPath::root(), &side2_tree_id)
.unwrap();
// Created the merged tree
let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap();
let merged_tree = store
.get_tree(&DirRepoPath::root(), &merged_tree_id)
.unwrap();
// Check the conflicting cases
match merged_tree.value("normal_executable_symlink").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(&id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree
.value("normal_executable_symlink")
.cloned()
.unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: side1_tree
.value("normal_executable_symlink")
.cloned()
.unwrap()
},
ConflictPart {
value: side2_tree
.value("normal_executable_symlink")
.cloned()
.unwrap()
},
]
);
}
_ => panic!("unexpected value"),
};
match merged_tree.value("tree_normal_symlink").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("tree_normal_symlink").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: side1_tree.value("tree_normal_symlink").cloned().unwrap()
},
ConflictPart {
value: side2_tree.value("tree_normal_symlink").cloned().unwrap()
},
]
);
}
_ => panic!("unexpected value"),
};
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_simplify_conflict(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let write_tree = |contents: &str| -> Tree {
testutils::create_tree(&repo, &[(&FileRepoPath::from("file"), contents)])
};
let base_tree = write_tree("base contents");
let branch_tree = write_tree("branch contents");
let upstream1_tree = write_tree("upstream1 contents");
let upstream2_tree = write_tree("upstream2 contents");
let merge_trees = |base: &Tree, side1: &Tree, side2: &Tree| -> Tree {
let tree_id = trees::merge_trees(&side1, &base, &side2).unwrap();
store.get_tree(&DirRepoPath::root(), &tree_id).unwrap()
};
// Rebase the branch tree to the first upstream tree
let rebased1_tree = merge_trees(&base_tree, &branch_tree, &upstream1_tree);
// Make sure we have a conflict (testing the test setup)
match rebased1_tree.value("file").unwrap() {
TreeValue::Conflict(_) => {
// expected
}
_ => panic!("unexpected value"),
};
// Rebase the rebased tree back to the base. The conflict should be gone. Try
// both directions.
let rebased_back_tree = merge_trees(&upstream1_tree, &rebased1_tree, &base_tree);
assert_eq!(rebased_back_tree.value("file"), branch_tree.value("file"));
let rebased_back_tree = merge_trees(&upstream1_tree, &base_tree, &rebased1_tree);
assert_eq!(rebased_back_tree.value("file"), branch_tree.value("file"));
// Rebase the rebased tree further upstream. The conflict should be simplified
// to not mention the contents from the first rebase.
let further_rebased_tree = merge_trees(&upstream1_tree, &rebased1_tree, &upstream2_tree);
match further_rebased_tree.value("file").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("file").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: branch_tree.value("file").cloned().unwrap()
},
ConflictPart {
value: upstream2_tree.value("file").cloned().unwrap()
},
]
);
}
_ => panic!("unexpected value"),
};
let further_rebased_tree = merge_trees(&upstream1_tree, &upstream2_tree, &rebased1_tree);
match further_rebased_tree.value("file").unwrap() {
TreeValue::Conflict(id) => {
let conflict = store.read_conflict(id).unwrap();
assert_eq!(
conflict.removes,
vec![ConflictPart {
value: base_tree.value("file").cloned().unwrap()
}]
);
assert_eq!(
conflict.adds,
vec![
ConflictPart {
value: upstream2_tree.value("file").cloned().unwrap()
},
ConflictPart {
value: branch_tree.value("file").cloned().unwrap()
},
]
);
}
_ => panic!("unexpected value"),
};
}

View file

@ -0,0 +1,171 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::repo::Repo;
use jj_lib::store::CommitId;
use jj_lib::testutils;
use std::collections::HashSet;
use std::path::Path;
use std::sync::Arc;
use test_case::test_case;
fn list_dir(dir: &Path) -> Vec<String> {
std::fs::read_dir(dir)
.unwrap()
.map(|entry| entry.unwrap().file_name().to_str().unwrap().to_owned())
.collect()
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_consecutive_operations(use_git: bool) {
// Test that consecutive operations result in a single op-head on disk after
// each operation
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let op_heads_dir = repo.repo_path().join("view").join("op_heads");
let op_head_id0 = repo.view().base_op_head_id().clone();
assert_eq!(
list_dir(&op_heads_dir),
vec![repo.view().base_op_head_id().hex()]
);
let mut tx1 = repo.start_transaction("transaction 1");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1);
let op_head_id1 = tx1.commit().id().clone();
assert_ne!(op_head_id1, op_head_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]);
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx2 = repo.start_transaction("transaction 2");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2);
let op_head_id2 = tx2.commit().id().clone();
assert_ne!(op_head_id2, op_head_id0);
assert_ne!(op_head_id2, op_head_id1);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]);
// Reloading the repo makes no difference (there are no conflicting operations
// to resolve).
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_concurrent_operations(use_git: bool) {
// Test that consecutive operations result in multiple op-heads on disk until
// the repo has been reloaded (which currently happens right away).
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let op_heads_dir = repo.repo_path().join("view").join("op_heads");
let op_head_id0 = repo.view().base_op_head_id().clone();
assert_eq!(
list_dir(&op_heads_dir),
vec![repo.view().base_op_head_id().hex()]
);
let mut tx1 = repo.start_transaction("transaction 1");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1);
let op_head_id1 = tx1.commit().id().clone();
assert_ne!(op_head_id1, op_head_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]);
// After both transactions have committed, we should have two op-heads on disk,
// since they were run in parallel.
let mut tx2 = repo.start_transaction("transaction 2");
testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2);
let op_head_id2 = tx2.commit().id().clone();
assert_ne!(op_head_id2, op_head_id0);
assert_ne!(op_head_id2, op_head_id1);
let mut actual_heads_on_disk = list_dir(&op_heads_dir);
actual_heads_on_disk.sort();
let mut expected_heads_on_disk = vec![op_head_id1.hex(), op_head_id2.hex()];
expected_heads_on_disk.sort();
assert_eq!(actual_heads_on_disk, expected_heads_on_disk);
// Reloading the repo causes the operations to be merged
Arc::get_mut(&mut repo).unwrap().reload();
let merged_op_head_id = repo.view().base_op_head_id().clone();
assert_ne!(merged_op_head_id, op_head_id0);
assert_ne!(merged_op_head_id, op_head_id1);
assert_ne!(merged_op_head_id, op_head_id2);
assert_eq!(list_dir(&op_heads_dir), vec![merged_op_head_id.hex()]);
}
fn assert_heads(repo: &impl Repo, expected: Vec<&CommitId>) {
let actual: HashSet<_> = {
let locked_heads = repo.view();
locked_heads.heads().cloned().collect()
};
let expected = expected.iter().cloned().cloned().collect();
assert_eq!(actual, expected);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_isolation(use_git: bool) {
// Test that two concurrent transactions don't see each other's changes.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let wc_id = repo.working_copy_locked().current_commit_id();
let initial = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![repo.store().root_commit_id().clone()])
.write_to_new_transaction(&repo, "test");
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx1 = repo.start_transaction("transaction 1");
let mut tx2 = repo.start_transaction("transaction 2");
assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]);
assert_heads(tx1.as_repo(), vec![&wc_id, initial.id()]);
assert_heads(tx2.as_repo(), vec![&wc_id, initial.id()]);
assert!(!repo.evolution().is_obsolete(initial.id()));
assert!(!tx1.as_repo().evolution().is_obsolete(initial.id()));
assert!(!tx2.as_repo().evolution().is_obsolete(initial.id()));
let rewrite1 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial)
.set_description("rewrite1".to_string())
.write_to_transaction(&mut tx1);
let rewrite2 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial)
.set_description("rewrite2".to_string())
.write_to_transaction(&mut tx2);
// Neither transaction has committed yet, so each transaction sees its own
// commit.
assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]);
assert_heads(tx1.as_repo(), vec![&wc_id, initial.id(), rewrite1.id()]);
assert_heads(tx2.as_repo(), vec![&wc_id, initial.id(), rewrite2.id()]);
assert!(!repo.evolution().is_obsolete(initial.id()));
assert!(tx1.as_repo().evolution().is_obsolete(initial.id()));
assert!(tx2.as_repo().evolution().is_obsolete(initial.id()));
// The base repo and tx2 don't see the commits from tx1.
tx1.commit();
assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]);
assert_heads(tx2.as_repo(), vec![&wc_id, initial.id(), rewrite2.id()]);
// The base repo still doesn't see the commits after both transactions commit.
tx2.commit();
assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]);
// After reload, the base repo sees both rewrites.
Arc::get_mut(&mut repo).unwrap().reload();
assert_heads(
repo.as_ref(),
vec![&wc_id, initial.id(), rewrite1.id(), rewrite2.id()],
);
}

View file

@ -0,0 +1,303 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::repo::Repo;
use jj_lib::repo_path::FileRepoPath;
use jj_lib::store::{Conflict, ConflictId, ConflictPart, TreeValue};
use jj_lib::store_wrapper::StoreWrapper;
use jj_lib::testutils;
use std::sync::Arc;
use test_case::test_case;
// TODO Many of the tests here are not run with Git because they end up creating
// two commits with the same contents.
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_open(use_git: bool) {
// Test that Transaction::check_out() uses the requested commit if it's open
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let requested_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let actual_checkout = tx.check_out(&settings, &requested_checkout);
assert_eq!(actual_checkout.id(), requested_checkout.id());
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(repo.view().checkout(), actual_checkout.id());
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_closed(use_git: bool) {
// Test that Transaction::check_out() creates a child if the requested commit is
// closed
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let requested_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(false)
.write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let actual_checkout = tx.check_out(&settings, &requested_checkout);
assert_eq!(actual_checkout.tree().id(), requested_checkout.tree().id());
assert_eq!(actual_checkout.parents().len(), 1);
assert_eq!(actual_checkout.parents()[0].id(), requested_checkout.id());
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(repo.view().checkout(), actual_checkout.id());
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_open_with_conflict(use_git: bool) {
// Test that Transaction::check_out() creates a successor if the requested
// commit is open and has conflicts
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let file_path = FileRepoPath::from("file");
let conflict_id = write_conflict(store, &file_path);
let mut tree_builder = repo
.store()
.tree_builder(repo.store().empty_tree_id().clone());
tree_builder.set(file_path.to_repo_path(), TreeValue::Conflict(conflict_id));
let tree_id = tree_builder.write_tree();
let mut tx = repo.start_transaction("test");
let requested_checkout = CommitBuilder::for_new_commit(&settings, store, tree_id)
.set_open(true)
.write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let actual_checkout = tx.check_out(&settings, &requested_checkout);
let file_value = actual_checkout.tree().path_value(&file_path.to_repo_path());
match file_value {
Some(TreeValue::Normal {
id: _,
executable: false,
}) => {}
_ => panic!("unexpected tree value: {:?}", file_value),
}
assert_eq!(actual_checkout.predecessors().len(), 1);
assert_eq!(
actual_checkout.predecessors()[0].id(),
requested_checkout.id()
);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(repo.view().checkout(), actual_checkout.id());
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_closed_with_conflict(use_git: bool) {
// Test that Transaction::check_out() creates a child if the requested commit is
// closed and has conflicts
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let file_path = FileRepoPath::from("file");
let conflict_id = write_conflict(store, &file_path);
let mut tree_builder = repo
.store()
.tree_builder(repo.store().empty_tree_id().clone());
tree_builder.set(file_path.to_repo_path(), TreeValue::Conflict(conflict_id));
let tree_id = tree_builder.write_tree();
let mut tx = repo.start_transaction("test");
let requested_checkout = CommitBuilder::for_new_commit(&settings, store, tree_id)
.set_open(false)
.write_to_transaction(&mut tx);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let actual_checkout = tx.check_out(&settings, &requested_checkout);
let file_value = actual_checkout.tree().path_value(&file_path.to_repo_path());
match file_value {
Some(TreeValue::Normal {
id: _,
executable: false,
}) => {}
_ => panic!("unexpected tree value: {:?}", file_value),
}
assert_eq!(actual_checkout.parents().len(), 1);
assert_eq!(actual_checkout.parents()[0].id(), requested_checkout.id());
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
assert_eq!(repo.view().checkout(), actual_checkout.id());
}
fn write_conflict(store: &Arc<StoreWrapper>, file_path: &FileRepoPath) -> ConflictId {
let file_id1 = testutils::write_file(store, &file_path, "a\n");
let file_id2 = testutils::write_file(store, &file_path, "b\n");
let file_id3 = testutils::write_file(store, &file_path, "c\n");
let conflict = Conflict {
removes: vec![ConflictPart {
value: TreeValue::Normal {
id: file_id1,
executable: false,
},
}],
adds: vec![
ConflictPart {
value: TreeValue::Normal {
id: file_id2,
executable: false,
},
},
ConflictPart {
value: TreeValue::Normal {
id: file_id3,
executable: false,
},
},
],
};
store.write_conflict(&conflict).unwrap()
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_previous_not_empty(use_git: bool) {
// Test that Transaction::check_out() does not usually prune the previous
// commit.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let old_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &old_checkout);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let new_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &new_checkout);
assert!(!tx.as_repo().evolution().is_obsolete(old_checkout.id()));
tx.discard();
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_previous_empty(use_git: bool) {
// Test that Transaction::check_out() prunes the previous commit if it was
// empty.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let old_checkout = CommitBuilder::for_open_commit(
&settings,
repo.store(),
repo.store().root_commit_id().clone(),
repo.store().empty_tree_id().clone(),
)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &old_checkout);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let new_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &new_checkout);
assert!(tx.as_repo().evolution().is_obsolete(old_checkout.id()));
tx.discard();
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_previous_empty_and_obsolete(use_git: bool) {
// Test that Transaction::check_out() does not unnecessarily prune the previous
// commit if it was empty but already obsolete.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let old_checkout = CommitBuilder::for_open_commit(
&settings,
repo.store(),
repo.store().root_commit_id().clone(),
repo.store().empty_tree_id().clone(),
)
.write_to_transaction(&mut tx);
let successor = CommitBuilder::for_rewrite_from(&settings, repo.store(), &old_checkout)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &old_checkout);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let new_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &new_checkout);
let successors = tx.as_repo().evolution().successors(old_checkout.id());
assert_eq!(successors.len(), 1);
assert_eq!(successors.iter().next().unwrap(), successor.id());
tx.discard();
}
#[test_case(false ; "local store")]
// #[test_case(true ; "git store")]
fn test_checkout_previous_empty_and_pruned(use_git: bool) {
// Test that Transaction::check_out() does not unnecessarily prune the previous
// commit if it was empty but already obsolete.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let old_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.set_pruned(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &old_checkout);
tx.commit();
Arc::get_mut(&mut repo).unwrap().reload();
let mut tx = repo.start_transaction("test");
let new_checkout = testutils::create_random_commit(&settings, &repo)
.set_open(true)
.write_to_transaction(&mut tx);
tx.check_out(&settings, &new_checkout);
assert!(tx
.as_repo()
.evolution()
.successors(old_checkout.id())
.is_empty());
tx.discard();
}

93
lib/tests/test_view.rs Normal file
View file

@ -0,0 +1,93 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::repo::Repo;
use jj_lib::store::CommitId;
use jj_lib::testutils;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_heads_empty(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let heads = repo.view();
let wc = repo.working_copy_locked();
let all_heads: Vec<CommitId> = heads.heads().cloned().collect();
assert_eq!(all_heads, vec![wc.current_commit_id()]);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_heads_fork(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let initial = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![repo.store().root_commit_id().clone()])
.write_to_transaction(&mut tx);
let child1 = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let child2 = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let heads = tx.as_repo().view();
let wc = repo.working_copy_locked();
let mut actual_all_heads: Vec<CommitId> = heads.heads().cloned().collect();
actual_all_heads.sort();
let mut expected_all_heads = vec![
wc.current_commit_id(),
child1.id().clone(),
child2.id().clone(),
];
expected_all_heads.sort();
assert_eq!(actual_all_heads, expected_all_heads);
drop(heads);
tx.discard();
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_heads_merge(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let mut tx = repo.start_transaction("test");
let initial = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![repo.store().root_commit_id().clone()])
.write_to_transaction(&mut tx);
let child1 = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let child2 = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![initial.id().clone()])
.write_to_transaction(&mut tx);
let merge = testutils::create_random_commit(&settings, &repo)
.set_parents(vec![child1.id().clone(), child2.id().clone()])
.write_to_transaction(&mut tx);
let heads = tx.as_repo().view();
let wc = repo.working_copy_locked();
let mut actual_all_heads: Vec<CommitId> = heads.heads().cloned().collect();
actual_all_heads.sort();
let mut expected_all_heads = vec![wc.current_commit_id(), merge.id().clone()];
expected_all_heads.sort();
assert_eq!(actual_all_heads, expected_all_heads);
drop(heads);
tx.discard();
}

View file

@ -0,0 +1,267 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::repo::{ReadonlyRepo, Repo};
use jj_lib::repo_path::{FileRepoPath, RepoPath};
use jj_lib::settings::UserSettings;
use jj_lib::store::TreeValue;
use jj_lib::testutils;
use jj_lib::tree_builder::TreeBuilder;
use std::fs::OpenOptions;
use std::io::Write;
use std::sync::Arc;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_root(use_git: bool) {
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let owned_wc = repo.working_copy().clone();
let wc = owned_wc.lock().unwrap();
assert_eq!(&wc.current_commit_id(), repo.view().checkout());
assert_ne!(&wc.current_commit_id(), repo.store().root_commit_id());
let wc_commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap());
assert_eq!(wc_commit.id(), repo.view().checkout());
assert_eq!(wc_commit.tree().id(), repo.store().empty_tree_id());
assert_eq!(wc_commit.store_commit().parents, vec![]);
assert_eq!(wc_commit.predecessors(), vec![]);
assert_eq!(wc_commit.description(), "");
assert_eq!(wc_commit.is_open(), true);
assert_eq!(wc_commit.author().name, settings.user_name());
assert_eq!(wc_commit.author().email, settings.user_email());
assert_eq!(wc_commit.committer().name, settings.user_name());
assert_eq!(wc_commit.committer().email, settings.user_email());
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_checkout_file_transitions(use_git: bool) {
// Tests switching between commits where a certain path is of one type in one
// commit and another type in the other. Includes a "missing" type, so we cover
// additions and removals as well.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let store = repo.store().clone();
#[derive(Debug, Clone, Copy)]
enum Kind {
Missing,
Normal,
Executable,
Symlink,
Tree,
GitSubmodule,
};
fn write_path(
settings: &UserSettings,
repo: &ReadonlyRepo,
tree_builder: &mut TreeBuilder,
kind: Kind,
path: &str,
) {
let store = repo.store();
let value = match kind {
Kind::Missing => {
return;
}
Kind::Normal => {
let id =
testutils::write_file(store, &FileRepoPath::from(path), "normal file contents");
TreeValue::Normal {
id,
executable: false,
}
}
Kind::Executable => {
let id = testutils::write_file(
store,
&FileRepoPath::from(path),
"executable file contents",
);
TreeValue::Normal {
id,
executable: true,
}
}
Kind::Symlink => {
let id = store
.write_symlink(&FileRepoPath::from(path), "target")
.unwrap();
TreeValue::Symlink(id)
}
Kind::Tree => {
let mut sub_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let file_path = path.to_owned() + "/file";
write_path(
settings,
repo,
&mut sub_tree_builder,
Kind::Normal,
&file_path,
);
let id = sub_tree_builder.write_tree();
TreeValue::Tree(id)
}
Kind::GitSubmodule => {
let id = testutils::create_random_commit(&settings, &repo)
.write_to_new_transaction(&repo, "test")
.id()
.clone();
TreeValue::GitSubmodule(id)
}
};
tree_builder.set(RepoPath::from(path), value);
};
let mut kinds = vec![
Kind::Missing,
Kind::Normal,
Kind::Executable,
Kind::Symlink,
Kind::Tree,
];
if use_git {
kinds.push(Kind::GitSubmodule);
}
let mut left_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut right_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut files = vec![];
for left_kind in &kinds {
for right_kind in &kinds {
let path = format!("{:?}_{:?}", left_kind, right_kind);
write_path(&settings, &repo, &mut left_tree_builder, *left_kind, &path);
write_path(
&settings,
&repo,
&mut right_tree_builder,
*right_kind,
&path,
);
files.push((*left_kind, *right_kind, path));
}
}
let left_tree_id = left_tree_builder.write_tree();
let right_tree_id = right_tree_builder.write_tree();
let left_commit = CommitBuilder::for_new_commit(&settings, repo.store(), left_tree_id)
.set_parents(vec![store.root_commit_id().clone()])
.set_open(true)
.write_to_new_transaction(&repo, "test");
let right_commit = CommitBuilder::for_new_commit(&settings, repo.store(), right_tree_id)
.set_parents(vec![store.root_commit_id().clone()])
.set_open(true)
.write_to_new_transaction(&repo, "test");
let owned_wc = repo.working_copy().clone();
let wc = owned_wc.lock().unwrap();
wc.check_out(&repo, left_commit).unwrap();
wc.commit(&settings, Arc::get_mut(&mut repo).unwrap());
wc.check_out(&repo, right_commit.clone()).unwrap();
// Check that the working copy is clean.
let after_commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap());
let diff_summary = right_commit.tree().diff_summary(&after_commit.tree());
assert_eq!(diff_summary.modified, vec![]);
assert_eq!(diff_summary.added, vec![]);
assert_eq!(diff_summary.removed, vec![]);
for (_left_kind, right_kind, path) in &files {
let wc_path = repo.working_copy_path().join(path);
let maybe_metadata = wc_path.symlink_metadata();
match right_kind {
Kind::Missing => {
assert_eq!(maybe_metadata.is_ok(), false, "{:?} should not exist", path);
}
Kind::Normal => {
assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert_eq!(metadata.is_file(), true, "{:?} should be a file", path);
assert_eq!(
metadata.permissions().mode() & 0o111,
0,
"{:?} should not be executable",
path
);
}
Kind::Executable => {
assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert_eq!(metadata.is_file(), true, "{:?} should be a file", path);
assert_ne!(
metadata.permissions().mode() & 0o111,
0,
"{:?} should be executable",
path
);
}
Kind::Symlink => {
assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert_eq!(
metadata.file_type().is_symlink(),
true,
"{:?} should be a symlink",
path
);
}
Kind::Tree => {
assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert_eq!(metadata.is_dir(), true, "{:?} should be a directory", path);
}
Kind::GitSubmodule => {
// Not supported for now
assert_eq!(maybe_metadata.is_ok(), false, "{:?} should not exist", path);
}
};
}
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_commit_racy_timestamps(use_git: bool) {
// Tests that file modifications are detected even if they happen the same
// millisecond as the updated working copy state.
let settings = testutils::user_settings();
let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git);
let file_path = repo.working_copy_path().join("file");
let mut previous_tree_id = repo.store().empty_tree_id().clone();
let owned_wc = repo.working_copy().clone();
let wc = owned_wc.lock().unwrap();
for i in 0..100 {
{
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&file_path)
.unwrap();
file.write_all(format!("contents {}", i).as_bytes())
.unwrap();
}
let commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap());
let new_tree_id = commit.tree().id().clone();
assert_ne!(new_tree_id, previous_tree_id);
previous_tree_id = new_tree_id;
}
}

View file

@ -0,0 +1,155 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::thread;
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo_path::FileRepoPath;
use jj_lib::store::CommitId;
use jj_lib::testutils;
use jj_lib::working_copy::CheckoutError;
use std::collections::HashSet;
use std::sync::Arc;
use test_case::test_case;
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_concurrent_checkout(use_git: bool) {
// Test that we error out if a concurrent checkout is detected (i.e. if the
// current checkout changed on disk after we read it).
let settings = testutils::user_settings();
let (_temp_dir, repo1) = testutils::init_repo(&settings, use_git);
let commit1 = testutils::create_random_commit(&settings, &repo1)
.set_open(true)
.write_to_new_transaction(&repo1, "test");
let commit2 = testutils::create_random_commit(&settings, &repo1)
.set_open(true)
.write_to_new_transaction(&repo1, "test");
let commit3 = testutils::create_random_commit(&settings, &repo1)
.set_open(true)
.write_to_new_transaction(&repo1, "test");
// Check out commit1
let wc1 = repo1.working_copy_locked();
wc1.check_out(&repo1, commit1).unwrap();
// Check out commit2 from another process (simulated by another repo instance)
let repo2 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone());
repo2
.working_copy_locked()
.check_out(&repo2, commit2.clone())
.unwrap();
// Checking out another commit (via the first repo instance) should now fail.
assert_eq!(
wc1.check_out(&repo1, commit3),
Err(CheckoutError::ConcurrentCheckout)
);
// Check that the commit2 is still checked out on disk.
let repo3 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone());
assert_eq!(
repo3.working_copy_locked().current_tree_id(),
commit2.tree().id().clone()
);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_concurrent_commit(use_git: bool) {
// Test that concurrent working copy commits result in a chain of successors
// instead of divergence.
let settings = testutils::user_settings();
let (_temp_dir, mut repo1) = testutils::init_repo(&settings, use_git);
let owned_wc1 = repo1.working_copy().clone();
let wc1 = owned_wc1.lock().unwrap();
let commit1 = wc1.current_commit();
// Commit from another process (simulated by another repo instance)
let mut repo2 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone());
testutils::write_working_copy_file(&repo2, &FileRepoPath::from("file2"), "contents2");
let owned_wc2 = repo2.working_copy().clone();
let wc2 = owned_wc2.lock().unwrap();
let commit2 = wc2.commit(&settings, Arc::get_mut(&mut repo2).unwrap());
assert_eq!(commit2.predecessors(), vec![commit1]);
// Creating another commit (via the first repo instance) should result in a
// successor of the commit created from the other process.
testutils::write_working_copy_file(&repo1, &FileRepoPath::from("file3"), "contents3");
let commit3 = wc1.commit(&settings, Arc::get_mut(&mut repo1).unwrap());
assert_eq!(commit3.predecessors(), vec![commit2]);
}
#[test_case(false ; "local store")]
#[test_case(true ; "git store")]
fn test_checkout_parallel(use_git: bool) {
// Test that concurrent checkouts by different processes (simulated by using
// different repo instances) is safe.
let settings = testutils::user_settings();
let (_temp_dir, repo) = testutils::init_repo(&settings, use_git);
let store = repo.store();
let mut commit_ids = vec![];
for i in 0..100 {
let path = FileRepoPath::from(format!("file{}", i).as_str());
let tree = testutils::create_tree(&repo, &[(&path, "contents")]);
let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone())
.set_open(true)
.write_to_new_transaction(&repo, "test");
commit_ids.push(commit.id().clone());
}
// Create another commit just so we can test the update stats reliably from the
// first update
let tree = testutils::create_tree(&repo, &[(&FileRepoPath::from("other file"), "contents")]);
let mut tx = repo.start_transaction("test");
let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone())
.set_open(true)
.write_to_transaction(&mut tx);
repo.working_copy_locked().check_out(&repo, commit).unwrap();
tx.commit();
let mut threads = vec![];
let commit_ids_set: HashSet<CommitId> = commit_ids.iter().cloned().collect();
for commit_id in &commit_ids {
let commit_ids_set = commit_ids_set.clone();
let commit_id = commit_id.clone();
let settings = settings.clone();
let working_copy_path = repo.working_copy_path().clone();
let handle = thread::spawn(move || {
let mut repo = ReadonlyRepo::load(&settings, working_copy_path);
let owned_wc = repo.working_copy().clone();
let wc = owned_wc.lock().unwrap();
let commit = repo.store().get_commit(&commit_id).unwrap();
let stats = wc.check_out(&repo, commit).unwrap();
assert_eq!(stats.updated_files, 0);
assert_eq!(stats.added_files, 1);
assert_eq!(stats.removed_files, 1);
// Check that the working copy contains one of the commits. We may see a
// different commit than the one we just checked out, but since
// commit() should take the same lock as check_out(), commit()
// should never produce a different tree (resulting in a different commit).
let commit_after = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap());
assert!(commit_ids_set.contains(commit_after.id()));
});
threads.push(handle);
}
for thread in threads {
thread.join().ok().unwrap();
}
}

3
rustfmt.toml Normal file
View file

@ -0,0 +1,3 @@
max_width = 100
wrap_comments = true
error_on_line_overflow = true

1903
src/commands.rs Normal file

File diff suppressed because it is too large Load diff

817
src/graphlog.rs Normal file
View file

@ -0,0 +1,817 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::hash::Hash;
use std::io::Write;
#[derive(Debug, Clone, PartialEq, Eq)]
// An edge to another node in the graph
pub enum Edge<T> {
Present { target: T, direct: bool },
Missing,
}
impl<T> Edge<T> {
pub fn missing() -> Self {
Edge::Missing
}
pub fn direct(id: T) -> Self {
Edge::Present {
target: id,
direct: true,
}
}
pub fn indirect(id: T) -> Self {
Edge::Present {
target: id,
direct: false,
}
}
}
pub struct AsciiGraphDrawer<'writer, K> {
writer: &'writer mut dyn Write,
edges: Vec<Edge<K>>,
pending_text: Vec<Vec<u8>>,
}
impl<'writer, K> AsciiGraphDrawer<'writer, K>
where
K: Clone + Eq + Hash,
{
pub fn new(writer: &'writer mut dyn Write) -> Self {
Self {
writer,
edges: Default::default(),
pending_text: Default::default(),
}
}
pub fn add_node(&mut self, id: &K, edges: &[Edge<K>], node_symbol: &[u8], text: &[u8]) {
assert!(self.pending_text.is_empty());
for line in text.split(|x| x == &b'\n') {
self.pending_text.push(line.to_vec());
}
if self.pending_text.last() == Some(&vec![]) {
self.pending_text.pop().unwrap();
}
self.pending_text.reverse();
// Check if an existing edge should be terminated by the new node. If there
// is, draw the new node in the same column. Otherwise, insert it at the right.
let edge_index = if let Some(edge_index) = self.index_by_target(id) {
// This edge terminates in the node we're adding
// If we're inserting a merge somewhere that's not the very right, the edges
// right of it will move further right, so we need to prepare by inserting rows
// of '\'.
if edges.len() > 2 && edge_index < self.edges.len() - 1 {
for i in 2..edges.len() {
for edge in self.edges.iter().take(edge_index + 1) {
AsciiGraphDrawer::straight_edge(&mut self.writer, &edge);
}
for _ in 0..i - 2 {
self.writer.write_all(b" ").unwrap();
}
for _ in edge_index + 1..self.edges.len() {
self.writer.write_all(b" \\").unwrap();
}
self.writer.write_all(b"\n").unwrap();
}
}
self.edges.remove(edge_index);
edge_index
} else {
self.edges.len()
};
// Draw the edges to the left of the new node
for edge in self.edges.iter().take(edge_index) {
AsciiGraphDrawer::straight_edge(&mut self.writer, &edge);
}
// Draw the new node
self.writer.write_all(node_symbol).unwrap();
// If it's a merge of many nodes, draw a vertical line to the right
for _ in 3..edges.len() {
self.writer.write_all(b"--").unwrap();
}
if edges.len() > 2 {
self.writer.write_all(b"-.").unwrap();
}
self.writer.write_all(b" ").unwrap();
// Draw the edges to the right of the new node
for edge in self.edges.iter().skip(edge_index) {
AsciiGraphDrawer::straight_edge(&mut self.writer, &edge);
}
if edges.len() > 1 {
self.writer.write_all(b" ").unwrap();
}
self.maybe_write_pending_text();
// Update the data model.
for (i, edge) in edges.iter().enumerate() {
self.edges.insert(edge_index + i, edge.clone());
}
// If it's a merge commit, insert a row of '\'.
if edges.len() >= 2 {
for edge in self.edges.iter().take(edge_index) {
AsciiGraphDrawer::straight_edge(&mut self.writer, &edge);
}
AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[edge_index]);
for _ in edge_index + 1..self.edges.len() {
self.writer.write_all(b"\\ ").unwrap();
}
self.writer.write_all(b" ").unwrap();
self.maybe_write_pending_text();
}
let pad_to_index = self.edges.len();
// Close any edges to missing nodes.
for (i, edge) in edges.iter().enumerate().rev() {
if *edge == Edge::Missing {
self.close_edge(edge_index + i, pad_to_index);
}
}
// Merge new edges that share the same target.
let mut source_index = 1;
while source_index < self.edges.len() {
if let Edge::Present { target, .. } = &self.edges[source_index] {
if let Some(target_index) = self.index_by_target(target) {
// There already is an edge leading to the same target node. Mark that we
// want to merge the higher index into the lower index.
if source_index > target_index {
self.merge_edges(source_index, target_index, pad_to_index);
// Don't increment source_index.
continue;
}
}
}
source_index += 1;
}
// Emit any remaining lines of text.
while !self.pending_text.is_empty() {
for edge in self.edges.iter() {
AsciiGraphDrawer::straight_edge(&mut self.writer, &edge);
}
self.maybe_write_pending_text();
}
}
fn index_by_target(&self, id: &K) -> Option<usize> {
for (i, edge) in self.edges.iter().enumerate() {
match edge {
Edge::Present { target, .. } if target == id => return Some(i),
_ => {}
}
}
None
}
/// Not an instance method so the caller doesn't need mutable access to the
/// whole struct.
fn straight_edge(writer: &mut dyn Write, edge: &Edge<K>) {
AsciiGraphDrawer::straight_edge_no_space(writer, edge);
writer.write_all(b" ").unwrap();
}
/// Not an instance method so the caller doesn't need mutable access to the
/// whole struct.
fn straight_edge_no_space(writer: &mut dyn Write, edge: &Edge<K>) {
match edge {
Edge::Present { direct: true, .. } => {
writer.write_all(b"|").unwrap();
}
Edge::Present { direct: false, .. } => {
writer.write_all(b":").unwrap();
}
Edge::Missing => {
writer.write_all(b"|").unwrap();
}
}
}
fn merge_edges(&mut self, source: usize, target: usize, pad_to_index: usize) {
assert!(target < source);
self.edges.remove(source);
for i in 0..target {
AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]);
}
if source == target + 1 {
// If we're merging exactly one step to the left, draw a '/' to join the lines.
AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[target]);
for _ in source..self.edges.len() + 1 {
self.writer.write_all(b"/ ").unwrap();
}
self.writer.write_all(b" ").unwrap();
for _ in self.edges.len() + 1..pad_to_index {
self.writer.write_all(b" ").unwrap();
}
self.maybe_write_pending_text();
} else {
// If we're merging more than one step to the left, we need two rows:
// | |_|_|/
// |/| | |
AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[target]);
for i in target + 1..source - 1 {
AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[i]);
self.writer.write_all(b"_").unwrap();
}
AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[source - 1]);
for _ in source..self.edges.len() + 1 {
self.writer.write_all(b"/ ").unwrap();
}
self.writer.write_all(b" ").unwrap();
for _ in self.edges.len() + 1..pad_to_index {
self.writer.write_all(b" ").unwrap();
}
self.maybe_write_pending_text();
for i in 0..target {
AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]);
}
AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[target]);
self.writer.write_all(b"/").unwrap();
for i in target + 1..self.edges.len() {
AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]);
}
for _ in self.edges.len()..pad_to_index {
self.writer.write_all(b" ").unwrap();
}
self.maybe_write_pending_text();
}
}
fn close_edge(&mut self, source: usize, pad_to_index: usize) {
self.edges.remove(source);
for i in 0..source {
AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]);
}
self.writer.write_all(b"~").unwrap();
for _ in source..self.edges.len() {
self.writer.write_all(b"/ ").unwrap();
}
self.writer.write_all(b" ").unwrap();
for _ in self.edges.len() + 1..pad_to_index {
self.writer.write_all(b" ").unwrap();
}
self.maybe_write_pending_text();
}
fn maybe_write_pending_text(&mut self) {
if let Some(text) = self.pending_text.pop() {
self.writer.write_all(&text).unwrap();
}
self.writer.write_all(b"\n").unwrap();
}
}
#[cfg(test)]
mod tests {
use super::*;
use indoc::indoc;
#[test]
fn single_node() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&1, &[], b"@", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(String::from_utf8_lossy(&buffer), "@ node 1\n");
}
#[test]
fn long_description() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&2, &[Edge::direct(1)], b"@", b"many\nlines\nof\ntext\n");
graph.add_node(&1, &[], b"o", b"single line");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@ many
| lines
| of
| text
o single line
"
}
);
}
#[test]
fn long_description_blank_lines() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(
&2,
&[Edge::direct(1)],
b"@",
b"\n\nmany\n\nlines\n\nof\n\ntext\n\n\n",
);
graph.add_node(&1, &[], b"o", b"single line");
// A final newline is ignored but all other newlines are respected.
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@
|
| many
|
| lines
|
| of
|
| text
|
|
o single line
"
}
);
}
#[test]
fn chain() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&3, &[Edge::direct(2)], b"@", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@ node 3
o node 2
o node 1
"}
);
}
#[test]
fn interleaved_chains() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&7, &[Edge::direct(5)], b"o", b"node 7");
graph.add_node(&6, &[Edge::direct(4)], b"o", b"node 6");
graph.add_node(&5, &[Edge::direct(3)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"@", b"node 3");
graph.add_node(&2, &[], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 7
| o node 6
o | node 5
| o node 4
@ | node 3
| o node 2
o node 1
"}
);
}
#[test]
fn independent_nodes() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&3, &[Edge::missing()], b"o", b"node 3");
graph.add_node(&2, &[Edge::missing()], b"o", b"node 2");
graph.add_node(&1, &[Edge::missing()], b"@", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 3
~
o node 2
~
@ node 1
~
"}
);
}
#[test]
fn left_chain_ends() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::missing()], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 4
| o node 3
o | node 2
~/
o node 1
"}
);
}
#[test]
fn fork_multiple() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&4, &[Edge::direct(1)], b"@", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@ node 4
| o node 3
|/
| o node 2
|/
o node 1
"}
);
}
#[test]
fn fork_multiple_chains() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&10, &[Edge::direct(7)], b"o", b"node 10");
graph.add_node(&9, &[Edge::direct(6)], b"o", b"node 9");
graph.add_node(&8, &[Edge::direct(5)], b"o", b"node 8");
graph.add_node(&7, &[Edge::direct(4)], b"o", b"node 7");
graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6");
graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 10
| o node 9
| | o node 8
o | | node 7
| o | node 6
| | o node 5
o | | node 4
| o | node 3
|/ /
| o node 2
|/
o node 1
"}
);
}
#[test]
fn cross_over() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&5, &[Edge::direct(1)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 5
| o node 4
| | o node 3
| |/
|/|
| o node 2
|/
o node 1
"}
);
}
#[test]
fn cross_over_multiple() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&7, &[Edge::direct(1)], b"o", b"node 7");
graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6");
graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 7
| o node 6
| | o node 5
| | | o node 4
| |_|/
|/| |
| o | node 3
|/ /
| o node 2
|/
o node 1
"}
);
}
#[test]
fn cross_over_new_on_left() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6");
graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 6
| o node 5
| | o node 4
o | | node 3
| |/
|/|
| o node 2
|/
o node 1
"}
);
}
#[test]
fn merge_multiple() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(
&5,
&[
Edge::direct(1),
Edge::direct(2),
Edge::direct(3),
Edge::direct(4),
],
b"@",
b"node 5\nmore\ntext",
);
graph.add_node(&4, &[Edge::missing()], b"o", b"node 4");
graph.add_node(&3, &[Edge::missing()], b"o", b"node 3");
graph.add_node(&2, &[Edge::missing()], b"o", b"node 2");
graph.add_node(&1, &[Edge::missing()], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@---. node 5
|\ \ \ more
| | | | text
| | | o node 4
| | | ~
| | o node 3
| | ~
| o node 2
| ~
o node 1
~
"}
);
}
#[test]
fn fork_merge_in_central_edge() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&8, &[Edge::direct(1)], b"o", b"node 8");
graph.add_node(&7, &[Edge::direct(5)], b"o", b"node 7");
graph.add_node(
&6,
&[Edge::direct(2)],
b"o",
b"node 6\nwith\nsome\nmore\nlines",
);
graph.add_node(&5, &[Edge::direct(4), Edge::direct(3)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 8
| o node 7
| | o node 6
| | | with
| | | some
| | | more
| | | lines
| o | node 5
| |\ \
| o | | node 4
|/ / /
| o | node 3
|/ /
| o node 2
|/
o node 1
"}
);
}
#[test]
fn fork_merge_multiple() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&6, &[Edge::direct(5)], b"o", b"node 6");
graph.add_node(
&5,
&[Edge::direct(2), Edge::direct(3), Edge::direct(4)],
b"o",
b"node 5",
);
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 6
o-. node 5
|\ \
| | o node 4
| o | node 3
| |/
o | node 2
|/
o node 1
"}
);
}
#[test]
fn fork_merge_multiple_in_central_edge() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&10, &[Edge::direct(1)], b"o", b"node 10");
graph.add_node(&9, &[Edge::direct(7)], b"o", b"node 9");
graph.add_node(&8, &[Edge::direct(2)], b"o", b"node 8");
graph.add_node(
&7,
&[
Edge::direct(6),
Edge::direct(5),
Edge::direct(4),
Edge::direct(3),
],
b"o",
b"node 7",
);
graph.add_node(&6, &[Edge::direct(1)], b"o", b"node 6");
graph.add_node(&5, &[Edge::direct(1)], b"o", b"node 5");
graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4");
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2");
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 10
| o node 9
| | o node 8
| | \
| | \
| o---. | node 7
| |\ \ \ \
| o | | | | node 6
|/ / / / /
| o | | | node 5
|/ / / /
| o | | node 4
|/ / /
| o | node 3
|/ /
| o node 2
|/
o node 1
"}
);
}
#[test]
fn merge_multiple_missing_edges() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(
&1,
&[
Edge::missing(),
Edge::missing(),
Edge::missing(),
Edge::missing(),
],
b"@",
b"node 1\nwith\nmany\nlines\nof\ntext",
);
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
@---. node 1
|\ \ \ with
| | | ~ many
| | ~ lines
| ~ of
~ text
"}
);
}
#[test]
fn merge_missing_edges_and_fork() {
let mut buffer = vec![];
let mut graph = AsciiGraphDrawer::new(&mut buffer);
graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3");
graph.add_node(
&2,
&[
Edge::missing(),
Edge::indirect(1),
Edge::missing(),
Edge::indirect(1),
],
b"o",
b"node 2\nwith\nmany\nlines\nof\ntext",
);
graph.add_node(&1, &[], b"o", b"node 1");
println!("{}", String::from_utf8_lossy(&buffer));
assert_eq!(
String::from_utf8_lossy(&buffer),
indoc! {r"
o node 3
| o---. node 2
| |\ \ \ with
| | : ~/ many
| ~/ / lines
|/ / of
|/ text
o node 1
"}
);
}
}

28
src/lib.rs Normal file
View file

@ -0,0 +1,28 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(unused_must_use)]
#[macro_use]
extern crate pest_derive;
pub mod commands;
pub mod graphlog;
pub mod styler;
pub mod template_parser;
pub mod templater;
pub mod ui;
// TODO: make this a separate crate?
pub mod testutils;

27
src/main.rs Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj::commands::dispatch;
use jj::ui::Ui;
use jj_lib::settings::UserSettings;
fn main() {
// TODO: We need to do some argument parsing here, at least for things like
// --config, and for reading user configs from the repo pointed to by
// -R.
let user_settings = UserSettings::for_user().unwrap();
let ui = Ui::for_terminal(user_settings);
let status = dispatch(ui, &mut std::env::args_os());
std::process::exit(status);
}

198
src/styler.rs Normal file
View file

@ -0,0 +1,198 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::io::{Error, Read, Write};
use jj_lib::settings::UserSettings;
// Lets the caller label strings and translates the labels to colors
pub trait Styler: Write {
fn write_bytes(&mut self, data: &[u8]) {
self.write_all(data).unwrap()
}
fn write_str(&mut self, text: &str) {
self.write_all(text.as_bytes()).unwrap()
}
fn write_from_reader(&mut self, reader: &mut dyn Read) {
let mut buffer = vec![];
reader.read_to_end(&mut buffer).unwrap();
self.write_all(buffer.as_slice()).unwrap()
}
fn add_label(&mut self, label: String);
fn remove_label(&mut self);
}
pub struct PlainTextStyler<'a> {
output: Box<dyn Write + 'a>,
}
impl<'a> PlainTextStyler<'a> {
pub fn new(output: Box<dyn Write + 'a>) -> PlainTextStyler<'a> {
Self { output }
}
}
impl Write for PlainTextStyler<'_> {
fn write(&mut self, data: &[u8]) -> Result<usize, Error> {
self.output.write(data)
}
fn flush(&mut self) -> Result<(), Error> {
self.output.flush()
}
}
impl Styler for PlainTextStyler<'_> {
fn add_label(&mut self, _label: String) {}
fn remove_label(&mut self) {}
}
pub struct ColorStyler<'a> {
output: Box<dyn Write + 'a>,
colors: HashMap<String, String>,
labels: Vec<String>,
cached_colors: HashMap<Vec<String>, Vec<u8>>,
current_color: Vec<u8>,
}
fn config_colors(user_settings: &UserSettings) -> HashMap<String, String> {
let mut result = HashMap::new();
result.insert(String::from("error"), String::from("red"));
result.insert(String::from("commit_id"), String::from("blue"));
result.insert(String::from("commit_id open"), String::from("green"));
result.insert(String::from("change_id"), String::from("magenta"));
result.insert(String::from("author"), String::from("yellow"));
result.insert(String::from("committer"), String::from("yellow"));
result.insert(String::from("pruned"), String::from("red"));
result.insert(String::from("obsolete"), String::from("red"));
result.insert(String::from("orphan"), String::from("red"));
result.insert(String::from("divergent"), String::from("red"));
result.insert(String::from("conflict"), String::from("red"));
result.insert(String::from("diff header"), String::from("yellow"));
result.insert(String::from("diff left"), String::from("red"));
result.insert(String::from("diff right"), String::from("green"));
result.insert(String::from("op-log id"), String::from("blue"));
result.insert(String::from("op-log user"), String::from("yellow"));
result.insert(String::from("op-log time"), String::from("magenta"));
if let Ok(table) = user_settings.config().get_table("colors") {
for (key, value) in table {
result.insert(key, value.to_string());
}
}
result
}
impl<'a> ColorStyler<'a> {
pub fn new(output: Box<dyn Write + 'a>, user_settings: &UserSettings) -> ColorStyler<'a> {
ColorStyler {
output,
colors: config_colors(user_settings),
labels: vec![],
cached_colors: HashMap::new(),
current_color: b"\x1b[0m".to_vec(),
}
}
fn current_color(&mut self) -> Vec<u8> {
if let Some(cached) = self.cached_colors.get(&self.labels) {
cached.clone()
} else {
let mut best_match = (-1, "");
for (key, value) in &self.colors {
let mut num_matching = 0;
let mut valid = true;
for label in key.split_whitespace() {
if !self.labels.contains(&label.to_string()) {
valid = false;
break;
}
num_matching += 1;
}
if !valid {
continue;
}
if num_matching >= best_match.0 {
best_match = (num_matching, value)
}
}
let color = self.color_for_name(&best_match.1);
self.cached_colors
.insert(self.labels.clone(), color.clone());
color
}
}
fn color_for_name(&self, color_name: &str) -> Vec<u8> {
match color_name {
"black" => b"\x1b[30m".to_vec(),
"red" => b"\x1b[31m".to_vec(),
"green" => b"\x1b[32m".to_vec(),
"yellow" => b"\x1b[33m".to_vec(),
"blue" => b"\x1b[34m".to_vec(),
"magenta" => b"\x1b[35m".to_vec(),
"cyan" => b"\x1b[36m".to_vec(),
"white" => b"\x1b[37m".to_vec(),
"bright black" => b"\x1b[1;30m".to_vec(),
"bright red" => b"\x1b[1;31m".to_vec(),
"bright green" => b"\x1b[1;32m".to_vec(),
"bright yellow" => b"\x1b[1;33m".to_vec(),
"bright blue" => b"\x1b[1;34m".to_vec(),
"bright magenta" => b"\x1b[1;35m".to_vec(),
"bright cyan" => b"\x1b[1;36m".to_vec(),
"bright white" => b"\x1b[1;37m".to_vec(),
_ => b"\x1b[0m".to_vec(),
}
}
}
impl Write for ColorStyler<'_> {
fn write(&mut self, data: &[u8]) -> Result<usize, Error> {
self.output.write(data)
}
fn flush(&mut self) -> Result<(), Error> {
self.output.flush()
}
}
impl Styler for ColorStyler<'_> {
fn add_label(&mut self, label: String) {
self.labels.push(label);
let new_color = self.current_color();
if new_color != self.current_color {
self.output.write_all(&new_color).unwrap();
}
self.current_color = new_color;
}
fn remove_label(&mut self) {
self.labels.pop();
let new_color = self.current_color();
if new_color != self.current_color {
self.output.write_all(&new_color).unwrap();
}
self.current_color = new_color;
}
}

52
src/template.pest Normal file
View file

@ -0,0 +1,52 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Example:
// "commit: " short(commit_id) "\n"
// predecessors % ("predecessor: " commit_id)
// parents % (commit_id " is a parent of " super.commit_id)
whitespace = { " " | "\n" }
escape = @{ "\\" ~ ("n" | "\"" | "\\") }
literal_char = @{ !("\"" | "\\") ~ ANY }
raw_literal = @{ literal_char+ }
literal = { "\"" ~ (raw_literal | escape)* ~ "\"" }
identifier = @{ (ASCII_ALPHANUMERIC | "_")+ }
function = { identifier ~ "(" ~ template ~ ("," ~ template)* ~ ")" }
method = { "." ~ identifier ~ "(" ~ template ~ ("," ~ template)* ~ ")" ~ maybe_method }
maybe_method = { method | "" }
// Note that "x(y)" is a function call but "x (y)" concatenates "x" and "y"
term = {
("(" ~ term ~ ")") ~ maybe_method
| function ~ maybe_method
| identifier ~ maybe_method
| literal ~ maybe_method
| ""
}
list = {
("(" ~ list ~ ")")
| term ~ (whitespace+ ~ term)+
}
template = {
list
| term
}

416
src/template_parser.rs Normal file
View file

@ -0,0 +1,416 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate pest;
use pest::iterators::Pair;
use pest::iterators::Pairs;
use pest::Parser;
use jj_lib::commit::Commit;
use jj_lib::store::{CommitId, Signature};
use crate::styler::PlainTextStyler;
use crate::templater::{
AuthorProperty, ChangeIdProperty, CommitIdKeyword, CommitterProperty, ConditionalTemplate,
ConflictProperty, ConstantTemplateProperty, CurrentCheckoutProperty, DescriptionProperty,
DivergentProperty, DynamicLabelTemplate, LabelTemplate, ListTemplate, LiteralTemplate,
ObsoleteProperty, OpenProperty, OrphanProperty, PrunedProperty, StringPropertyTemplate,
Template, TemplateFunction, TemplateProperty,
};
use jj_lib::repo::Repo;
#[derive(Parser)]
#[grammar = "template.pest"]
pub struct TemplateParser;
fn parse_string_literal(pair: Pair<Rule>) -> String {
assert_eq!(pair.as_rule(), Rule::literal);
let mut result = String::new();
for part in pair.into_inner() {
match part.as_rule() {
Rule::raw_literal => {
result.push_str(part.as_str());
}
Rule::escape => match part.as_str().as_bytes()[1] as char {
'"' => result.push('"'),
'\\' => result.push('\\'),
'n' => result.push('\n'),
char => panic!("invalid escape: \\{:?}", char),
},
_ => panic!("unexpected part of string: {:?}", part),
}
}
result
}
struct StringShort;
impl TemplateProperty<String, String> for StringShort {
fn extract(&self, context: &String) -> String {
context.chars().take(12).collect()
}
}
struct StringFirstLine;
impl TemplateProperty<String, String> for StringFirstLine {
fn extract(&self, context: &String) -> String {
context.lines().next().unwrap().to_string()
}
}
struct CommitIdShortest;
impl TemplateProperty<CommitId, String> for CommitIdShortest {
fn extract(&self, context: &CommitId) -> String {
CommitIdKeyword::shortest_format(context.clone())
}
}
struct SignatureName;
impl TemplateProperty<Signature, String> for SignatureName {
fn extract(&self, context: &Signature) -> String {
context.name.clone()
}
}
struct SignatureEmail;
impl TemplateProperty<Signature, String> for SignatureEmail {
fn extract(&self, context: &Signature) -> String {
context.email.clone()
}
}
fn parse_method_chain<'a, I: 'a>(
pair: Pair<Rule>,
input_property: Property<'a, I>,
) -> Property<'a, I> {
assert_eq!(pair.as_rule(), Rule::maybe_method);
if pair.as_str().is_empty() {
input_property
} else {
let method = pair.into_inner().next().unwrap();
match input_property {
Property::String(property) => {
let next_method = parse_string_method(method);
next_method.after(property)
}
Property::Boolean(property) => {
let next_method = parse_boolean_method(method);
next_method.after(property)
}
Property::CommitId(property) => {
let next_method = parse_commit_id_method(method);
next_method.after(property)
}
Property::Signature(property) => {
let next_method = parse_signature_method(method);
next_method.after(property)
}
}
}
}
fn parse_string_method<'a>(method: Pair<Rule>) -> Property<'a, String> {
assert_eq!(method.as_rule(), Rule::method);
let mut inner = method.into_inner();
let name = inner.next().unwrap();
// TODO: validate arguments
let this_function = match name.as_str() {
"short" => Property::String(Box::new(StringShort)),
"first_line" => Property::String(Box::new(StringFirstLine)),
name => panic!("no such string method: {}", name),
};
let chain_method = inner.last().unwrap();
parse_method_chain(chain_method, this_function)
}
fn parse_boolean_method<'a>(method: Pair<Rule>) -> Property<'a, bool> {
assert_eq!(method.as_rule(), Rule::maybe_method);
let mut inner = method.into_inner();
let name = inner.next().unwrap();
// TODO: validate arguments
panic!("no such boolean method: {}", name.as_str());
}
// TODO: pass a context to the returned function (we need the repo to find the
// shortest unambiguous prefix)
fn parse_commit_id_method<'a>(method: Pair<Rule>) -> Property<'a, CommitId> {
assert_eq!(method.as_rule(), Rule::method);
let mut inner = method.into_inner();
let name = inner.next().unwrap();
// TODO: validate arguments
let this_function = match name.as_str() {
"short" => Property::String(Box::new(CommitIdShortest)),
name => panic!("no such commit id method: {}", name),
};
let chain_method = inner.last().unwrap();
parse_method_chain(chain_method, this_function)
}
fn parse_signature_method<'a>(method: Pair<Rule>) -> Property<'a, Signature> {
assert_eq!(method.as_rule(), Rule::method);
let mut inner = method.into_inner();
let name = inner.next().unwrap();
// TODO: validate arguments
let this_function: Property<'a, Signature> = match name.as_str() {
// TODO: Automatically label these too (so author.name() gets
// labels "author" *and" "name". Perhaps drop parentheses
// from syntax for that? Or maybe this should be using
// syntax for nested records (e.g.
// `author % (name "<" email ">")`)?
"name" => Property::String(Box::new(SignatureName)),
"email" => Property::String(Box::new(SignatureEmail)),
name => panic!("no such commit id method: {}", name),
};
let chain_method = inner.last().unwrap();
parse_method_chain(chain_method, this_function)
}
enum Property<'a, I> {
String(Box<dyn TemplateProperty<I, String> + 'a>),
Boolean(Box<dyn TemplateProperty<I, bool> + 'a>),
CommitId(Box<dyn TemplateProperty<I, CommitId> + 'a>),
Signature(Box<dyn TemplateProperty<I, Signature> + 'a>),
}
impl<'a, I: 'a> Property<'a, I> {
fn after<C: 'a>(self, first: Box<dyn TemplateProperty<C, I> + 'a>) -> Property<'a, C> {
match self {
Property::String(property) => Property::String(Box::new(TemplateFunction::new(
first,
Box::new(move |value| property.extract(&value)),
))),
Property::Boolean(property) => Property::Boolean(Box::new(TemplateFunction::new(
first,
Box::new(move |value| property.extract(&value)),
))),
Property::CommitId(property) => Property::CommitId(Box::new(TemplateFunction::new(
first,
Box::new(move |value| property.extract(&value)),
))),
Property::Signature(property) => Property::Signature(Box::new(TemplateFunction::new(
first,
Box::new(move |value| property.extract(&value)),
))),
}
}
}
fn parse_commit_keyword<'a, 'r: 'a>(
repo: &'r dyn Repo,
pair: Pair<Rule>,
) -> (Property<'a, Commit>, String) {
assert_eq!(pair.as_rule(), Rule::identifier);
let property = match pair.as_str() {
"description" => Property::String(Box::new(DescriptionProperty)),
"change_id" => Property::String(Box::new(ChangeIdProperty)),
"commit_id" => Property::CommitId(Box::new(CommitIdKeyword)),
"author" => Property::Signature(Box::new(AuthorProperty)),
"committer" => Property::Signature(Box::new(CommitterProperty)),
"open" => Property::Boolean(Box::new(OpenProperty)),
"pruned" => Property::Boolean(Box::new(PrunedProperty)),
"current_checkout" => Property::Boolean(Box::new(CurrentCheckoutProperty { repo })),
"obsolete" => Property::Boolean(Box::new(ObsoleteProperty { repo })),
"orphan" => Property::Boolean(Box::new(OrphanProperty { repo })),
"divergent" => Property::Boolean(Box::new(DivergentProperty { repo })),
"conflict" => Property::Boolean(Box::new(ConflictProperty)),
name => panic!("unexpected identifier: {}", name),
};
(property, pair.as_str().to_string())
}
fn coerce_to_string<'a, I: 'a>(
property: Property<'a, I>,
) -> Box<dyn TemplateProperty<I, String> + 'a> {
match property {
Property::String(property) => property,
Property::Boolean(property) => Box::new(TemplateFunction::new(
property,
Box::new(|value| String::from(if value { "true" } else { "false" })),
)),
Property::CommitId(property) => Box::new(TemplateFunction::new(
property,
Box::new(CommitIdKeyword::default_format),
)),
Property::Signature(property) => Box::new(TemplateFunction::new(
property,
Box::new(|signature| signature.name),
)),
}
}
fn parse_boolean_commit_property<'a, 'r: 'a>(
repo: &'r dyn Repo,
pair: Pair<Rule>,
) -> Box<dyn TemplateProperty<Commit, bool> + 'a> {
let mut inner = pair.into_inner();
let pair = inner.next().unwrap();
let _method = inner.next().unwrap();
assert!(inner.next().is_none());
match pair.as_rule() {
Rule::identifier => match parse_commit_keyword(repo, pair.clone()).0 {
Property::Boolean(property) => property,
_ => panic!("cannot yet use this as boolean: {:?}", pair),
},
_ => panic!("cannot yet use this as boolean: {:?}", pair),
}
}
fn parse_commit_term<'a, 'r: 'a>(
repo: &'r dyn Repo,
pair: Pair<Rule>,
) -> Box<dyn Template<Commit> + 'a> {
assert_eq!(pair.as_rule(), Rule::term);
if pair.as_str().is_empty() {
Box::new(LiteralTemplate(String::new()))
} else {
let mut inner = pair.into_inner();
let expr = inner.next().unwrap();
let maybe_method = inner.next().unwrap();
assert!(inner.next().is_none());
match expr.as_rule() {
Rule::literal => {
let text = parse_string_literal(expr);
if maybe_method.as_str().is_empty() {
Box::new(LiteralTemplate(text))
} else {
let input_property =
Property::String(Box::new(ConstantTemplateProperty { output: text }));
let property = parse_method_chain(maybe_method, input_property);
let string_property = coerce_to_string(property);
Box::new(StringPropertyTemplate {
property: string_property,
})
}
}
Rule::identifier => {
let (term_property, labels) = parse_commit_keyword(repo, expr);
let property = parse_method_chain(maybe_method, term_property);
let string_property = coerce_to_string(property);
Box::new(LabelTemplate::new(
Box::new(StringPropertyTemplate {
property: string_property,
}),
labels,
))
}
Rule::function => {
let mut inner = expr.into_inner();
let name = inner.next().unwrap().as_str();
match name {
"label" => {
let label_pair = inner.next().unwrap();
let label_template = parse_commit_template_rule(
repo,
label_pair.into_inner().next().unwrap(),
);
let arg_template = match inner.next() {
None => panic!("label() requires two arguments"),
Some(pair) => pair,
};
if inner.next().is_some() {
panic!("label() accepts only two arguments")
}
let content: Box<dyn Template<Commit> + 'a> =
parse_commit_template_rule(repo, arg_template);
let get_labels = move |commit: &Commit| -> String {
let mut buf: Vec<u8> = vec![];
{
let writer = Box::new(&mut buf);
let mut styler = PlainTextStyler::new(writer);
label_template.format(commit, &mut styler);
}
String::from_utf8(buf).unwrap()
};
Box::new(DynamicLabelTemplate::new(content, Box::new(get_labels)))
}
"if" => {
let condition_pair = inner.next().unwrap();
let condition_template = condition_pair.into_inner().next().unwrap();
let condition = parse_boolean_commit_property(repo, condition_template);
let true_template = match inner.next() {
None => panic!("if() requires at least two arguments"),
Some(pair) => parse_commit_template_rule(repo, pair),
};
let false_template = match inner.next() {
None => None,
Some(pair) => Some(parse_commit_template_rule(repo, pair)),
};
if inner.next().is_some() {
panic!("if() accepts at most three arguments")
}
Box::new(ConditionalTemplate::new(
condition,
true_template,
false_template,
))
}
name => panic!("function {} not implemented", name),
}
}
other => panic!("unexpected term: {:?}", other),
}
}
}
fn parse_commit_template_rule<'a, 'r: 'a>(
repo: &'r dyn Repo,
pair: Pair<Rule>,
) -> Box<dyn Template<Commit> + 'a> {
match pair.as_rule() {
Rule::template => {
let mut inner = pair.into_inner();
let formatter = parse_commit_template_rule(repo, inner.next().unwrap());
assert!(inner.next().is_none());
formatter
}
Rule::term => parse_commit_term(repo, pair),
Rule::list => {
let mut formatters: Vec<Box<dyn Template<Commit>>> = vec![];
for inner_pair in pair.into_inner() {
formatters.push(parse_commit_template_rule(repo, inner_pair));
}
Box::new(ListTemplate(formatters))
}
_ => Box::new(LiteralTemplate(String::new())),
}
}
pub fn parse_commit_template<'a, 'r: 'a>(
repo: &'r dyn Repo,
template_text: &str,
) -> Box<dyn Template<Commit> + 'a> {
let mut pairs: Pairs<Rule> = TemplateParser::parse(Rule::template, template_text).unwrap();
let first_pair = pairs.next().unwrap();
assert!(pairs.next().is_none());
if first_pair.as_span().end() != template_text.len() {
panic!(
"failed to parse template past position {}",
first_pair.as_span().end()
);
}
parse_commit_template_rule(repo, first_pair)
}

327
src/templater.rs Normal file
View file

@ -0,0 +1,327 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::BorrowMut;
use std::ops::Add;
use jj_lib::commit::Commit;
use jj_lib::repo::Repo;
use jj_lib::store::{CommitId, Signature};
use crate::styler::Styler;
pub trait Template<C> {
fn format(&self, context: &C, styler: &mut dyn Styler);
}
// TODO: Extract a trait for this type?
pub struct TemplateFormatter<'s, 't: 's, C> {
template: Box<dyn Template<C> + 't>,
styler: &'s mut dyn Styler,
}
impl<'s, 't: 's, C> TemplateFormatter<'s, 't, C> {
pub fn new(template: Box<dyn Template<C> + 't>, styler: &'s mut dyn Styler) -> Self {
TemplateFormatter { template, styler }
}
pub fn format<'c, 'a: 'c>(&'a mut self, context: &'c C) {
self.template.format(context, self.styler.borrow_mut());
}
}
pub struct LiteralTemplate(pub String);
impl<C> Template<C> for LiteralTemplate {
fn format(&self, _context: &C, styler: &mut dyn Styler) {
styler.write_str(&self.0)
}
}
// TODO: figure out why this lifetime is needed
pub struct LabelTemplate<'a, C> {
content: Box<dyn Template<C> + 'a>,
labels: Vec<String>,
}
impl<'a, C> LabelTemplate<'a, C> {
pub fn new(content: Box<dyn Template<C> + 'a>, labels: String) -> Self {
let labels: Vec<String> = labels
.split_whitespace()
.map(|label| label.to_string())
.collect();
LabelTemplate { content, labels }
}
}
impl<'a, C> Template<C> for LabelTemplate<'a, C> {
fn format(&self, context: &C, styler: &mut dyn Styler) {
for label in &self.labels {
styler.add_label(label.clone());
}
self.content.format(context, styler);
for _label in &self.labels {
styler.remove_label();
}
}
}
// TODO: figure out why this lifetime is needed
pub struct DynamicLabelTemplate<'a, C> {
content: Box<dyn Template<C> + 'a>,
label_property: Box<dyn Fn(&C) -> String + 'a>,
}
impl<'a, C> DynamicLabelTemplate<'a, C> {
pub fn new(
content: Box<dyn Template<C> + 'a>,
label_property: Box<dyn Fn(&C) -> String + 'a>,
) -> Self {
DynamicLabelTemplate {
content,
label_property,
}
}
}
impl<'a, C> Template<C> for DynamicLabelTemplate<'a, C> {
fn format(&self, context: &C, styler: &mut dyn Styler) {
let labels = self.label_property.as_ref()(context);
let labels: Vec<String> = labels
.split_whitespace()
.map(|label| label.to_string())
.collect();
for label in &labels {
styler.add_label(label.clone());
}
self.content.format(context, styler);
for _label in &labels {
styler.remove_label();
}
}
}
// TODO: figure out why this lifetime is needed
pub struct ListTemplate<'a, C>(pub Vec<Box<dyn Template<C> + 'a>>);
impl<'a, C> Template<C> for ListTemplate<'a, C> {
fn format(&self, context: &C, styler: &mut dyn Styler) {
for template in &self.0 {
template.format(context, styler)
}
}
}
pub trait TemplateProperty<C, O> {
fn extract(&self, context: &C) -> O;
}
pub struct ConstantTemplateProperty<O: Clone> {
pub output: O,
}
impl<C, O: Clone> TemplateProperty<C, O> for ConstantTemplateProperty<O> {
fn extract(&self, _context: &C) -> O {
self.output.clone()
}
}
// TODO: figure out why this lifetime is needed
pub struct StringPropertyTemplate<'a, C> {
pub property: Box<dyn TemplateProperty<C, String> + 'a>,
}
impl<'a, C> Template<C> for StringPropertyTemplate<'a, C> {
fn format(&self, context: &C, styler: &mut dyn Styler) {
let text = self.property.extract(context);
styler.write_str(&text);
}
}
pub struct ChangeIdProperty;
impl<'r> TemplateProperty<Commit, String> for ChangeIdProperty {
fn extract(&self, context: &Commit) -> String {
context.change_id().hex()
}
}
pub struct DescriptionProperty;
impl<'r> TemplateProperty<Commit, String> for DescriptionProperty {
fn extract(&self, context: &Commit) -> String {
let description = context.description().to_owned();
if description.ends_with('\n') {
description
} else {
description.add("\n")
}
}
}
pub struct AuthorProperty;
impl<'r> TemplateProperty<Commit, Signature> for AuthorProperty {
fn extract(&self, context: &Commit) -> Signature {
context.author().clone()
}
}
pub struct CommitterProperty;
impl<'r> TemplateProperty<Commit, Signature> for CommitterProperty {
fn extract(&self, context: &Commit) -> Signature {
context.committer().clone()
}
}
pub struct OpenProperty;
impl<'r> TemplateProperty<Commit, bool> for OpenProperty {
fn extract(&self, context: &Commit) -> bool {
context.is_open()
}
}
pub struct PrunedProperty;
impl TemplateProperty<Commit, bool> for PrunedProperty {
fn extract(&self, context: &Commit) -> bool {
context.is_pruned()
}
}
pub struct CurrentCheckoutProperty<'r> {
pub repo: &'r dyn Repo,
}
impl<'r> TemplateProperty<Commit, bool> for CurrentCheckoutProperty<'r> {
fn extract(&self, context: &Commit) -> bool {
context.id() == self.repo.view().checkout()
}
}
pub struct ObsoleteProperty<'r> {
pub repo: &'r dyn Repo,
}
impl<'r> TemplateProperty<Commit, bool> for ObsoleteProperty<'r> {
fn extract(&self, context: &Commit) -> bool {
self.repo.evolution().is_obsolete(context.id())
}
}
pub struct OrphanProperty<'r> {
pub repo: &'r dyn Repo,
}
impl<'r> TemplateProperty<Commit, bool> for OrphanProperty<'r> {
fn extract(&self, context: &Commit) -> bool {
self.repo.evolution().is_orphan(context.id())
}
}
pub struct DivergentProperty<'r> {
pub repo: &'r dyn Repo,
}
impl<'r> TemplateProperty<Commit, bool> for DivergentProperty<'r> {
fn extract(&self, context: &Commit) -> bool {
self.repo.evolution().is_divergent(context.change_id())
}
}
pub struct ConflictProperty;
impl<'r> TemplateProperty<Commit, bool> for ConflictProperty {
fn extract(&self, context: &Commit) -> bool {
context.tree().has_conflict()
}
}
pub struct ConditionalTemplate<'a, C> {
pub condition: Box<dyn TemplateProperty<C, bool> + 'a>,
pub true_template: Box<dyn Template<C> + 'a>,
pub false_template: Option<Box<dyn Template<C> + 'a>>,
}
// TODO: figure out why this lifetime is needed
impl<'a, C> ConditionalTemplate<'a, C> {
pub fn new(
condition: Box<dyn TemplateProperty<C, bool> + 'a>,
true_template: Box<dyn Template<C> + 'a>,
false_template: Option<Box<dyn Template<C> + 'a>>,
) -> Self {
ConditionalTemplate {
condition,
true_template,
false_template,
}
}
}
impl<'a, C> Template<C> for ConditionalTemplate<'a, C> {
fn format(&self, context: &C, styler: &mut dyn Styler) {
if self.condition.extract(context) {
self.true_template.format(context, styler);
} else if let Some(false_template) = &self.false_template {
false_template.format(context, styler);
}
}
}
// TODO: If needed, add a ContextualTemplateFunction where the function also
// gets the context
pub struct TemplateFunction<'a, C, I, O> {
pub property: Box<dyn TemplateProperty<C, I> + 'a>,
pub function: Box<dyn Fn(I) -> O + 'a>,
}
// TODO: figure out why this lifetime is needed
impl<'a, C, I, O> TemplateFunction<'a, C, I, O> {
pub fn new(
template: Box<dyn TemplateProperty<C, I> + 'a>,
function: Box<dyn Fn(I) -> O + 'a>,
) -> Self {
TemplateFunction {
property: template,
function,
}
}
}
impl<'a, C, I, O> TemplateProperty<C, O> for TemplateFunction<'a, C, I, O> {
fn extract(&self, context: &C) -> O {
(self.function)(self.property.extract(context))
}
}
pub struct CommitIdKeyword;
impl CommitIdKeyword {
pub fn default_format(commit_id: CommitId) -> String {
commit_id.hex()
}
pub fn shortest_format(commit_id: CommitId) -> String {
// TODO: make this actually be the shortest unambiguous prefix
commit_id.hex()[..12].to_string()
}
}
impl<'r> TemplateProperty<Commit, CommitId> for CommitIdKeyword {
fn extract(&self, context: &Commit) -> CommitId {
context.id().clone()
}
}

56
src/testutils.rs Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Cursor;
use std::path::{Path, PathBuf};
use jj_lib::testutils::user_settings;
use crate::commands;
use crate::ui::Ui;
pub struct CommandRunner {
pub cwd: PathBuf,
pub stdout_buf: Vec<u8>,
}
impl CommandRunner {
pub fn new(cwd: &Path) -> CommandRunner {
CommandRunner {
cwd: cwd.to_owned(),
stdout_buf: vec![],
}
}
pub fn run(self, mut args: Vec<&str>) -> CommandOutput {
let mut stdout_buf = self.stdout_buf;
let stdout = Box::new(Cursor::new(&mut stdout_buf));
let ui = Ui::new(self.cwd, stdout, false, user_settings());
args.insert(0, "jj");
let status = commands::dispatch(ui, args);
CommandOutput { status, stdout_buf }
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct CommandOutput {
pub status: i32,
pub stdout_buf: Vec<u8>,
}
impl CommandOutput {
pub fn stdout_string(&self) -> String {
String::from_utf8(self.stdout_buf.clone()).unwrap()
}
}

101
src/ui.rs Normal file
View file

@ -0,0 +1,101 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::io;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Mutex, MutexGuard};
use jj_lib::commit::Commit;
use jj_lib::settings::UserSettings;
use crate::styler::{ColorStyler, PlainTextStyler, Styler};
use crate::templater::TemplateFormatter;
use jj_lib::repo::Repo;
pub struct Ui<'a> {
cwd: PathBuf,
styler: Mutex<Box<dyn Styler + 'a>>,
settings: UserSettings,
}
impl<'a> Ui<'a> {
pub fn new(
cwd: PathBuf,
stdout: Box<dyn Write + 'a>,
is_atty: bool,
settings: UserSettings,
) -> Ui<'a> {
let styler: Box<dyn Styler + 'a> = if is_atty {
Box::new(ColorStyler::new(stdout, &settings))
} else {
Box::new(PlainTextStyler::new(stdout))
};
let styler = Mutex::new(styler);
Ui {
cwd,
styler,
settings,
}
}
pub fn for_terminal(settings: UserSettings) -> Ui<'static> {
let cwd = std::env::current_dir().unwrap();
let stdout: Box<dyn Write + 'static> = Box::new(io::stdout());
Ui::new(cwd, stdout, true, settings)
}
pub fn cwd(&self) -> &Path {
&self.cwd
}
pub fn settings(&self) -> &UserSettings {
&self.settings
}
pub fn styler(&self) -> MutexGuard<Box<dyn Styler + 'a>> {
self.styler.lock().unwrap()
}
pub fn write(&mut self, text: &str) {
self.styler().write_str(text);
}
pub fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) {
self.styler().write_fmt(fmt).unwrap()
}
pub fn write_error(&mut self, text: &str) {
let mut styler = self.styler();
styler.add_label(String::from("error"));
styler.write_str(text);
}
pub fn write_commit_summary<'r>(&mut self, repo: &'r dyn Repo, commit: &Commit) {
let template_string = self
.settings
.config()
.get_str("template.commit_summary")
.unwrap_or_else(|_| {
String::from(
r#"label(if(open, "open"), commit_id.short() " " description.first_line())"#,
)
});
let template = crate::template_parser::parse_commit_template(repo, &template_string);
let mut styler = self.styler();
let mut template_writer = TemplateFormatter::new(template, styler.as_mut());
template_writer.format(commit);
}
}

112
tests/smoke_test.rs Normal file
View file

@ -0,0 +1,112 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj::testutils;
use regex::Regex;
#[test]
fn smoke_test() {
let temp_dir = tempfile::tempdir().unwrap();
let output = testutils::CommandRunner::new(temp_dir.path()).run(vec!["init", "repo"]);
assert_eq!(output.status, 0);
let repo_path = temp_dir.path().join("repo");
// Check the output of `jj status` right after initializing repo
let output = testutils::CommandRunner::new(&repo_path).run(vec!["status"]);
assert_eq!(output.status, 0);
let stdout_string = output.stdout_string();
let output_regex = Regex::new(
"^Working copy : ([[:xdigit:]]+) \n\
Parent commit: 000000000000 \n\
Diff summary:\n\
$",
)
.unwrap();
assert!(
output_regex.is_match(&stdout_string),
"output was: {}",
stdout_string
);
let wc_hex_id_empty = output_regex
.captures(&stdout_string)
.unwrap()
.get(1)
.unwrap()
.as_str()
.to_owned();
// Write some files and check the output of `jj status`
std::fs::write(repo_path.join("file1"), "file1").unwrap();
std::fs::write(repo_path.join("file2"), "file2").unwrap();
std::fs::write(repo_path.join("file3"), "file3").unwrap();
let output = testutils::CommandRunner::new(&repo_path).run(vec!["status"]);
assert_eq!(output.status, 0);
let stdout_string = output.stdout_string();
let output_regex = Regex::new(
"^Working copy : ([[:xdigit:]]+) \n\
Parent commit: 000000000000 \n\
Diff summary:\n\
A file1\n\
A file2\n\
A file3\n\
$",
)
.unwrap();
assert!(
output_regex.is_match(&stdout_string),
"output was: {}",
stdout_string
);
let wc_hex_id_non_empty = output_regex
.captures(&stdout_string)
.unwrap()
.get(1)
.unwrap()
.as_str()
.to_owned();
// The working copy's id should have changed
assert_ne!(wc_hex_id_empty, wc_hex_id_non_empty);
// Running `jj status` again gives the same output
let output2 = testutils::CommandRunner::new(&repo_path).run(vec!["status"]);
assert_eq!(output, output2);
// Add a commit description
let output =
testutils::CommandRunner::new(&repo_path).run(vec!["describe", "--text", "add some files"]);
assert_eq!(output.status, 0);
let stdout_string = output.stdout_string();
let output_regex =
Regex::new("^leaving: [[:xdigit:]]+ \nnow at: [[:xdigit:]]+ add some files\n$").unwrap();
assert!(
output_regex.is_match(&stdout_string),
"output was: {}",
stdout_string
);
// Close the commit
let output = testutils::CommandRunner::new(&repo_path).run(vec!["close"]);
assert_eq!(output.status, 0);
let stdout_string = output.stdout_string();
let output_regex =
Regex::new("^leaving: [[:xdigit:]]+ add some files\nnow at: [[:xdigit:]]+ \n$").unwrap();
assert!(
output_regex.is_match(&stdout_string),
"output was: {}",
stdout_string
);
}

View file

@ -0,0 +1,64 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj::testutils;
#[test]
fn test_init_git() {
let temp_dir = tempfile::tempdir().unwrap();
let git_repo_path = temp_dir.path().join("git-repo");
git2::Repository::init(git_repo_path.clone()).unwrap();
let output = testutils::CommandRunner::new(temp_dir.path()).run(vec![
"init",
"repo",
"--git-store",
git_repo_path.to_str().unwrap(),
]);
assert_eq!(output.status, 0);
let repo_path = temp_dir.path().join("repo");
assert!(repo_path.is_dir());
assert!(repo_path.join(".jj").is_dir());
let store_file_contents = std::fs::read_to_string(repo_path.join(".jj").join("store")).unwrap();
assert!(store_file_contents.starts_with("git: "));
assert!(store_file_contents.ends_with("/git-repo"));
assert_eq!(
output.stdout_string(),
format!("Initialized repo in \"{}\"\n", repo_path.to_str().unwrap())
);
}
#[test]
fn test_init_local() {
let temp_dir = tempfile::tempdir().unwrap();
let output = testutils::CommandRunner::new(temp_dir.path()).run(vec!["init", "repo"]);
assert_eq!(output.status, 0);
let repo_path = temp_dir.path().join("repo");
assert!(repo_path.is_dir());
assert!(repo_path.join(".jj").is_dir());
let store_dir = repo_path.join(".jj").join("store");
assert!(store_dir.is_dir());
assert!(store_dir.join("commits").is_dir());
assert!(store_dir.join("trees").is_dir());
assert!(store_dir.join("files").is_dir());
assert!(store_dir.join("symlinks").is_dir());
assert!(store_dir.join("conflicts").is_dir());
assert_eq!(
output.stdout_string(),
format!("Initialized repo in \"{}\"\n", repo_path.to_str().unwrap())
);
}