diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 000000000..bcf3d4f9c --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,1516 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi", +] + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "blake2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" +dependencies = [ + "byte-tools", + "crypto-mac", + "digest", + "opaque-debug", +] + +[[package]] +name = "blake2b_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bstr" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde 1.0.116", +] + +[[package]] +name = "bumpalo" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "bytes" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" + +[[package]] +name = "cast" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "cc" +version = "1.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits 0.2.12", + "time", + "winapi", +] + +[[package]] +name = "clap" +version = "2.33.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "config" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b076e143e1d9538dde65da30f8481c2a6c44040edb8e02b9bf1351edb92ce3" +dependencies = [ + "lazy_static", + "nom", + "rust-ini", + "serde 1.0.116", + "serde-hjson", + "serde_json", + "toml", + "yaml-rust", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "criterion" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" +dependencies = [ + "atty", + "cast", + "clap", + "criterion-plot", + "csv", + "itertools", + "lazy_static", + "num-traits 0.2.12", + "oorandom", + "plotters", + "rayon", + "regex", + "serde 1.0.116", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-channel" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +dependencies = [ + "crossbeam-utils", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-deque" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "maybe-uninit", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if", + "lazy_static", +] + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "csv" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +dependencies = [ + "bstr", + "csv-core", + "itoa", + "ryu", + "serde 1.0.116", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "dirs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +dependencies = [ + "cfg-if", + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "git2" +version = "0.13.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e094214efbc7fdbbdee952147e493b00e99a4e52817492277e98967ae918165" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url", +] + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "half" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" + +[[package]] +name = "hermit-abi" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indoc" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a75aeaaef0ce18b58056d306c27b07436fbb34b8816c53094b76dd81803136" +dependencies = [ + "unindent", +] + +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" + +[[package]] +name = "jj" +version = "0.1.0" +dependencies = [ + "blake2", + "bytes", + "chrono", + "clap", + "config", + "criterion", + "diff", + "dirs", + "git2", + "hex", + "indoc", + "jj-lib", + "pest", + "pest_derive", + "protobuf", + "protobuf-codegen-pure", + "regex", + "serde_json", + "tempfile", + "test-case", + "uuid", + "zstd", +] + +[[package]] +name = "jj-lib" +version = "0.1.0" +dependencies = [ + "blake2", + "byteorder", + "bytes", + "chrono", + "config", + "diff", + "dirs", + "git2", + "hex", + "protobuf", + "protobuf-codegen-pure", + "protos", + "rand", + "serde_json", + "tempfile", + "test-case", + "thiserror", + "uuid", + "whoami", + "zstd", +] + +[[package]] +name = "jobserver" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lexical-core" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db65c6da02e61f55dae90a0ae427b2a5f6b3e8db09f58d10efab23af92592616" +dependencies = [ + "arrayvec", + "bitflags", + "cfg-if", + "ryu", + "static_assertions", +] + +[[package]] +name = "libc" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" + +[[package]] +name = "libgit2-sys" +version = "0.12.13+1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069eea34f76ec15f2822ccf78fe0cdb8c9016764d0a12865278585a74dbdeae5" +dependencies = [ + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + +[[package]] +name = "libssh2-sys" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca46220853ba1c512fc82826d0834d87b06bcd3c2a42241b7de72f3d2fe17056" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linked-hash-map" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" +dependencies = [ + "serde 0.8.23", + "serde_test", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + +[[package]] +name = "nom" +version = "5.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +dependencies = [ + "lexical-core", + "memchr", + "version_check", +] + +[[package]] +name = "num-integer" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +dependencies = [ + "autocfg", + "num-traits 0.2.12", +] + +[[package]] +name = "num-traits" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +dependencies = [ + "num-traits 0.2.12", +] + +[[package]] +name = "num-traits" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "oorandom" +version = "11.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "openssl-sys" +version = "0.9.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +dependencies = [ + "maplit", + "pest", + "sha-1", +] + +[[package]] +name = "pkg-config" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" + +[[package]] +name = "plotters" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" +dependencies = [ + "js-sys", + "num-traits 0.2.12", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "protobuf" +version = "2.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d147edb77bcccbfc81fabffdc7bd50c13e103b15ca1e27515fe40de69a5776b" +dependencies = [ + "bytes", +] + +[[package]] +name = "protobuf-codegen" +version = "2.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e81f70c25aab9506f87253c55f7cdcd8917635d5597382958d20025c211bbbd" +dependencies = [ + "protobuf", +] + +[[package]] +name = "protobuf-codegen-pure" +version = "2.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6af8d72d9e14fd41a954f4d5b310396151437c83d2bfcbf19d3073af90e46288" +dependencies = [ + "protobuf", + "protobuf-codegen", +] + +[[package]] +name = "protos" +version = "0.1.0" +dependencies = [ + "bytes", + "protobuf", + "protobuf-codegen-pure", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rayon" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf6960dc9a5b4ee8d3e4c5787b4a112a8818e0290a42ff664ad60692fdf2032" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c4fec834fb6e6d2dd5eece3c7b432a52f0ba887cf40e595190c4107edc08bf" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "lazy_static", + "num_cpus", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "redox_users" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +dependencies = [ + "getrandom", + "redox_syscall", + "rust-argon2", +] + +[[package]] +name = "regex" +version = "1.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", +] + +[[package]] +name = "regex-syntax" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rust-argon2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +dependencies = [ + "base64", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + +[[package]] +name = "rust-ini" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" + +[[package]] +name = "serde" +version = "1.0.116" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" + +[[package]] +name = "serde-hjson" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" +dependencies = [ + "lazy_static", + "linked-hash-map 0.3.0", + "num-traits 0.1.43", + "regex", + "serde 0.8.23", +] + +[[package]] +name = "serde_cbor" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +dependencies = [ + "half", + "serde 1.0.116", +] + +[[package]] +name = "serde_derive" +version = "1.0.116" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +dependencies = [ + "itoa", + "ryu", + "serde 1.0.116", +] + +[[package]] +name = "serde_test" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "110b3dbdf8607ec493c22d5d947753282f3bae73c0f56d322af1e8c78e4c23d5" +dependencies = [ + "serde 0.8.23", +] + +[[package]] +name = "sha-1" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "syn" +version = "1.0.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e2e59c50ed8f6b050b071aa7b6865293957a9af6b58b94f97c1c9434ad440ea" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "test-case" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "199464148b42bcf3da8b2a56f6ee87ca68f47402496d1268849291ec9fb463c8" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "time" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "tinytemplate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" +dependencies = [ + "serde 1.0.116", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" + +[[package]] +name = "toml" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +dependencies = [ + "serde 1.0.116", +] + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "unindent" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +dependencies = [ + "idna", + "matches", + "percent-encoding", +] + +[[package]] +name = "uuid" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" +dependencies = [ + "rand", +] + +[[package]] +name = "vcpkg" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "walkdir" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasm-bindgen" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" + +[[package]] +name = "web-sys" +version = "0.3.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "whoami" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7884773ab69074615cb8f8425d0e53f11710786158704fca70f53e71b0e05504" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "yaml-rust" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +dependencies = [ + "linked-hash-map 0.5.3", +] + +[[package]] +name = "zstd" +version = "0.5.3+zstd.1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "2.0.5+zstd.1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "1.4.17+zstd.1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" +dependencies = [ + "cc", + "glob", + "itertools", + "libc", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..92c08ee4d --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,36 @@ +[workspace] +members = ["lib"] + +[package] +name = "jj" +version = "0.1.0" +authors = ["Martin von Zweigbergk "] +edition = "2018" + +[dependencies.jj-lib] +path = "lib" + +[dependencies] +blake2 = "0.8" +bytes = "0.5" +chrono = "0.4" +clap = "2.33" +config = "0.10" +criterion = "0.3.2" +diff = "0.1" +dirs = "2.0" +git2 = "0.13" +hex = "0.4" +indoc = "1.0" +pest = "2.1" +pest_derive = "2.1" +protobuf = { version = "2.12", features = ["with-bytes"] } +protobuf-codegen-pure = "2.12" +serde_json = "1.0" +tempfile = "3.1" +uuid = { version = "0.8", features = ["v4"] } +zstd = "0.5" + +[dev-dependencies] +test-case = "1.0.0" +regex = "1.3.9" diff --git a/lib/Cargo.toml b/lib/Cargo.toml new file mode 100644 index 000000000..23e885a40 --- /dev/null +++ b/lib/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "jj-lib" +version = "0.1.0" +authors = ["Martin von Zweigbergk "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies.protos] +path = "protos" + + +[dependencies] +blake2 = "0.8" +bytes = "0.5" +byteorder = "1.3.4" +chrono = "0.4" +config = "0.10" +diff = "0.1" +dirs = "2.0" +git2 = "0.13" +hex = "0.4" +protobuf = { version = "2.12", features = ["with-bytes"] } +protobuf-codegen-pure = "2.12" +rand = "0.7.3" +serde_json = "1.0" +tempfile = "3.1" +thiserror = "1.0" +uuid = { version = "0.8", features = ["v4"] } +whoami = "0.9.0" +zstd = "0.5" + +[dev-dependencies] +test-case = "1.0.0" diff --git a/lib/protos/Cargo.toml b/lib/protos/Cargo.toml new file mode 100644 index 000000000..1e4163550 --- /dev/null +++ b/lib/protos/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "protos" +version = "0.1.0" +authors = ["Martin von Zweigbergk "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bytes = "0.5" +protobuf = { version = "2.12", features = ["with-bytes"] } +protobuf-codegen-pure = "2.12" diff --git a/lib/protos/src/lib.rs b/lib/protos/src/lib.rs new file mode 100644 index 000000000..ee43d8384 --- /dev/null +++ b/lib/protos/src/lib.rs @@ -0,0 +1,17 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod op_store; +pub mod store; +pub mod working_copy; diff --git a/lib/protos/src/main.rs b/lib/protos/src/main.rs new file mode 100644 index 000000000..4fff3d6a9 --- /dev/null +++ b/lib/protos/src/main.rs @@ -0,0 +1,28 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate protobuf_codegen_pure; + +use protobuf_codegen_pure::Codegen; + +fn main() { + Codegen::new() + .out_dir("src/") + .include("src/") + .input("src/op_store.proto") + .input("src/store.proto") + .input("src/working_copy.proto") + .run() + .expect("protoc"); +} diff --git a/lib/protos/src/op_store.proto b/lib/protos/src/op_store.proto new file mode 100644 index 000000000..175d073c3 --- /dev/null +++ b/lib/protos/src/op_store.proto @@ -0,0 +1,40 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +message View { + repeated bytes head_ids = 1; + bytes checkout = 2; +} + +message Operation { + bytes view_id = 1; + repeated bytes parents = 2; + OperationMetadata metadata = 3; +} + +// TODO: Share with store.proto? Do we even need the timezone here? +message Timestamp { + uint64 millis_since_epoch = 1; + int32 tz_offset = 2; +} + +message OperationMetadata { + Timestamp start_time = 1; + Timestamp end_time = 2; + string description = 3; + string hostname = 4; + string username = 5; +} diff --git a/lib/protos/src/op_store.rs b/lib/protos/src/op_store.rs new file mode 100644 index 000000000..74e71d56e --- /dev/null +++ b/lib/protos/src/op_store.rs @@ -0,0 +1,1053 @@ +// This file is generated by rust-protobuf 2.18.0. Do not edit +// @generated + +// https://github.com/rust-lang/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(unused_attributes)] +#![rustfmt::skip] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unused_imports)] +#![allow(unused_results)] +//! Generated file from `op_store.proto` + +/// Generated files are compatible only with the same version +/// of protobuf runtime. +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_18_0; + +#[derive(PartialEq,Clone,Default)] +pub struct View { + // message fields + pub head_ids: ::protobuf::RepeatedField<::std::vec::Vec>, + pub checkout: ::std::vec::Vec, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a View { + fn default() -> &'a View { + ::default_instance() + } +} + +impl View { + pub fn new() -> View { + ::std::default::Default::default() + } + + // repeated bytes head_ids = 1; + + + pub fn get_head_ids(&self) -> &[::std::vec::Vec] { + &self.head_ids + } + pub fn clear_head_ids(&mut self) { + self.head_ids.clear(); + } + + // Param is passed by value, moved + pub fn set_head_ids(&mut self, v: ::protobuf::RepeatedField<::std::vec::Vec>) { + self.head_ids = v; + } + + // Mutable pointer to the field. + pub fn mut_head_ids(&mut self) -> &mut ::protobuf::RepeatedField<::std::vec::Vec> { + &mut self.head_ids + } + + // Take field + pub fn take_head_ids(&mut self) -> ::protobuf::RepeatedField<::std::vec::Vec> { + ::std::mem::replace(&mut self.head_ids, ::protobuf::RepeatedField::new()) + } + + // bytes checkout = 2; + + + pub fn get_checkout(&self) -> &[u8] { + &self.checkout + } + pub fn clear_checkout(&mut self) { + self.checkout.clear(); + } + + // Param is passed by value, moved + pub fn set_checkout(&mut self, v: ::std::vec::Vec) { + self.checkout = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_checkout(&mut self) -> &mut ::std::vec::Vec { + &mut self.checkout + } + + // Take field + pub fn take_checkout(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.checkout, ::std::vec::Vec::new()) + } +} + +impl ::protobuf::Message for View { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_bytes_into(wire_type, is, &mut self.head_ids)?; + }, + 2 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.checkout)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.head_ids { + my_size += ::protobuf::rt::bytes_size(1, &value); + }; + if !self.checkout.is_empty() { + my_size += ::protobuf::rt::bytes_size(2, &self.checkout); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.head_ids { + os.write_bytes(1, &v)?; + }; + if !self.checkout.is_empty() { + os.write_bytes(2, &self.checkout)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> View { + View::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "head_ids", + |m: &View| { &m.head_ids }, + |m: &mut View| { &mut m.head_ids }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "checkout", + |m: &View| { &m.checkout }, + |m: &mut View| { &mut m.checkout }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "View", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static View { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(View::new) + } +} + +impl ::protobuf::Clear for View { + fn clear(&mut self) { + self.head_ids.clear(); + self.checkout.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for View { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for View { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Operation { + // message fields + pub view_id: ::std::vec::Vec, + pub parents: ::protobuf::RepeatedField<::std::vec::Vec>, + pub metadata: ::protobuf::SingularPtrField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Operation { + fn default() -> &'a Operation { + ::default_instance() + } +} + +impl Operation { + pub fn new() -> Operation { + ::std::default::Default::default() + } + + // bytes view_id = 1; + + + pub fn get_view_id(&self) -> &[u8] { + &self.view_id + } + pub fn clear_view_id(&mut self) { + self.view_id.clear(); + } + + // Param is passed by value, moved + pub fn set_view_id(&mut self, v: ::std::vec::Vec) { + self.view_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_view_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.view_id + } + + // Take field + pub fn take_view_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.view_id, ::std::vec::Vec::new()) + } + + // repeated bytes parents = 2; + + + pub fn get_parents(&self) -> &[::std::vec::Vec] { + &self.parents + } + pub fn clear_parents(&mut self) { + self.parents.clear(); + } + + // Param is passed by value, moved + pub fn set_parents(&mut self, v: ::protobuf::RepeatedField<::std::vec::Vec>) { + self.parents = v; + } + + // Mutable pointer to the field. + pub fn mut_parents(&mut self) -> &mut ::protobuf::RepeatedField<::std::vec::Vec> { + &mut self.parents + } + + // Take field + pub fn take_parents(&mut self) -> ::protobuf::RepeatedField<::std::vec::Vec> { + ::std::mem::replace(&mut self.parents, ::protobuf::RepeatedField::new()) + } + + // .OperationMetadata metadata = 3; + + + pub fn get_metadata(&self) -> &OperationMetadata { + self.metadata.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_metadata(&mut self) { + self.metadata.clear(); + } + + pub fn has_metadata(&self) -> bool { + self.metadata.is_some() + } + + // Param is passed by value, moved + pub fn set_metadata(&mut self, v: OperationMetadata) { + self.metadata = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_metadata(&mut self) -> &mut OperationMetadata { + if self.metadata.is_none() { + self.metadata.set_default(); + } + self.metadata.as_mut().unwrap() + } + + // Take field + pub fn take_metadata(&mut self) -> OperationMetadata { + self.metadata.take().unwrap_or_else(|| OperationMetadata::new()) + } +} + +impl ::protobuf::Message for Operation { + fn is_initialized(&self) -> bool { + for v in &self.metadata { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.view_id)?; + }, + 2 => { + ::protobuf::rt::read_repeated_bytes_into(wire_type, is, &mut self.parents)?; + }, + 3 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.metadata)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.view_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(1, &self.view_id); + } + for value in &self.parents { + my_size += ::protobuf::rt::bytes_size(2, &value); + }; + if let Some(ref v) = self.metadata.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.view_id.is_empty() { + os.write_bytes(1, &self.view_id)?; + } + for v in &self.parents { + os.write_bytes(2, &v)?; + }; + if let Some(ref v) = self.metadata.as_ref() { + os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Operation { + Operation::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "view_id", + |m: &Operation| { &m.view_id }, + |m: &mut Operation| { &mut m.view_id }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "parents", + |m: &Operation| { &m.parents }, + |m: &mut Operation| { &mut m.parents }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "metadata", + |m: &Operation| { &m.metadata }, + |m: &mut Operation| { &mut m.metadata }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Operation", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Operation { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Operation::new) + } +} + +impl ::protobuf::Clear for Operation { + fn clear(&mut self) { + self.view_id.clear(); + self.parents.clear(); + self.metadata.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Operation { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Operation { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Timestamp { + // message fields + pub millis_since_epoch: u64, + pub tz_offset: i32, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Timestamp { + fn default() -> &'a Timestamp { + ::default_instance() + } +} + +impl Timestamp { + pub fn new() -> Timestamp { + ::std::default::Default::default() + } + + // uint64 millis_since_epoch = 1; + + + pub fn get_millis_since_epoch(&self) -> u64 { + self.millis_since_epoch + } + pub fn clear_millis_since_epoch(&mut self) { + self.millis_since_epoch = 0; + } + + // Param is passed by value, moved + pub fn set_millis_since_epoch(&mut self, v: u64) { + self.millis_since_epoch = v; + } + + // int32 tz_offset = 2; + + + pub fn get_tz_offset(&self) -> i32 { + self.tz_offset + } + pub fn clear_tz_offset(&mut self) { + self.tz_offset = 0; + } + + // Param is passed by value, moved + pub fn set_tz_offset(&mut self, v: i32) { + self.tz_offset = v; + } +} + +impl ::protobuf::Message for Timestamp { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_uint64()?; + self.millis_since_epoch = tmp; + }, + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.tz_offset = tmp; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.millis_since_epoch != 0 { + my_size += ::protobuf::rt::value_size(1, self.millis_since_epoch, ::protobuf::wire_format::WireTypeVarint); + } + if self.tz_offset != 0 { + my_size += ::protobuf::rt::value_size(2, self.tz_offset, ::protobuf::wire_format::WireTypeVarint); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.millis_since_epoch != 0 { + os.write_uint64(1, self.millis_since_epoch)?; + } + if self.tz_offset != 0 { + os.write_int32(2, self.tz_offset)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Timestamp { + Timestamp::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>( + "millis_since_epoch", + |m: &Timestamp| { &m.millis_since_epoch }, + |m: &mut Timestamp| { &mut m.millis_since_epoch }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "tz_offset", + |m: &Timestamp| { &m.tz_offset }, + |m: &mut Timestamp| { &mut m.tz_offset }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Timestamp", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Timestamp { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Timestamp::new) + } +} + +impl ::protobuf::Clear for Timestamp { + fn clear(&mut self) { + self.millis_since_epoch = 0; + self.tz_offset = 0; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Timestamp { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Timestamp { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct OperationMetadata { + // message fields + pub start_time: ::protobuf::SingularPtrField, + pub end_time: ::protobuf::SingularPtrField, + pub description: ::std::string::String, + pub hostname: ::std::string::String, + pub username: ::std::string::String, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a OperationMetadata { + fn default() -> &'a OperationMetadata { + ::default_instance() + } +} + +impl OperationMetadata { + pub fn new() -> OperationMetadata { + ::std::default::Default::default() + } + + // .Timestamp start_time = 1; + + + pub fn get_start_time(&self) -> &Timestamp { + self.start_time.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_start_time(&mut self) { + self.start_time.clear(); + } + + pub fn has_start_time(&self) -> bool { + self.start_time.is_some() + } + + // Param is passed by value, moved + pub fn set_start_time(&mut self, v: Timestamp) { + self.start_time = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_start_time(&mut self) -> &mut Timestamp { + if self.start_time.is_none() { + self.start_time.set_default(); + } + self.start_time.as_mut().unwrap() + } + + // Take field + pub fn take_start_time(&mut self) -> Timestamp { + self.start_time.take().unwrap_or_else(|| Timestamp::new()) + } + + // .Timestamp end_time = 2; + + + pub fn get_end_time(&self) -> &Timestamp { + self.end_time.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_end_time(&mut self) { + self.end_time.clear(); + } + + pub fn has_end_time(&self) -> bool { + self.end_time.is_some() + } + + // Param is passed by value, moved + pub fn set_end_time(&mut self, v: Timestamp) { + self.end_time = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_end_time(&mut self) -> &mut Timestamp { + if self.end_time.is_none() { + self.end_time.set_default(); + } + self.end_time.as_mut().unwrap() + } + + // Take field + pub fn take_end_time(&mut self) -> Timestamp { + self.end_time.take().unwrap_or_else(|| Timestamp::new()) + } + + // string description = 3; + + + pub fn get_description(&self) -> &str { + &self.description + } + pub fn clear_description(&mut self) { + self.description.clear(); + } + + // Param is passed by value, moved + pub fn set_description(&mut self, v: ::std::string::String) { + self.description = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_description(&mut self) -> &mut ::std::string::String { + &mut self.description + } + + // Take field + pub fn take_description(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.description, ::std::string::String::new()) + } + + // string hostname = 4; + + + pub fn get_hostname(&self) -> &str { + &self.hostname + } + pub fn clear_hostname(&mut self) { + self.hostname.clear(); + } + + // Param is passed by value, moved + pub fn set_hostname(&mut self, v: ::std::string::String) { + self.hostname = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_hostname(&mut self) -> &mut ::std::string::String { + &mut self.hostname + } + + // Take field + pub fn take_hostname(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.hostname, ::std::string::String::new()) + } + + // string username = 5; + + + pub fn get_username(&self) -> &str { + &self.username + } + pub fn clear_username(&mut self) { + self.username.clear(); + } + + // Param is passed by value, moved + pub fn set_username(&mut self, v: ::std::string::String) { + self.username = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_username(&mut self) -> &mut ::std::string::String { + &mut self.username + } + + // Take field + pub fn take_username(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.username, ::std::string::String::new()) + } +} + +impl ::protobuf::Message for OperationMetadata { + fn is_initialized(&self) -> bool { + for v in &self.start_time { + if !v.is_initialized() { + return false; + } + }; + for v in &self.end_time { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.start_time)?; + }, + 2 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.end_time)?; + }, + 3 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.description)?; + }, + 4 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.hostname)?; + }, + 5 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.username)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let Some(ref v) = self.start_time.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + if let Some(ref v) = self.end_time.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + if !self.description.is_empty() { + my_size += ::protobuf::rt::string_size(3, &self.description); + } + if !self.hostname.is_empty() { + my_size += ::protobuf::rt::string_size(4, &self.hostname); + } + if !self.username.is_empty() { + my_size += ::protobuf::rt::string_size(5, &self.username); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let Some(ref v) = self.start_time.as_ref() { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + if let Some(ref v) = self.end_time.as_ref() { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + if !self.description.is_empty() { + os.write_string(3, &self.description)?; + } + if !self.hostname.is_empty() { + os.write_string(4, &self.hostname)?; + } + if !self.username.is_empty() { + os.write_string(5, &self.username)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> OperationMetadata { + OperationMetadata::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "start_time", + |m: &OperationMetadata| { &m.start_time }, + |m: &mut OperationMetadata| { &mut m.start_time }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "end_time", + |m: &OperationMetadata| { &m.end_time }, + |m: &mut OperationMetadata| { &mut m.end_time }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "description", + |m: &OperationMetadata| { &m.description }, + |m: &mut OperationMetadata| { &mut m.description }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "hostname", + |m: &OperationMetadata| { &m.hostname }, + |m: &mut OperationMetadata| { &mut m.hostname }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "username", + |m: &OperationMetadata| { &m.username }, + |m: &mut OperationMetadata| { &mut m.username }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "OperationMetadata", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static OperationMetadata { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(OperationMetadata::new) + } +} + +impl ::protobuf::Clear for OperationMetadata { + fn clear(&mut self) { + self.start_time.clear(); + self.end_time.clear(); + self.description.clear(); + self.hostname.clear(); + self.username.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for OperationMetadata { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for OperationMetadata { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +static file_descriptor_proto_data: &'static [u8] = b"\ + \n\x0eop_store.proto\"C\n\x04View\x12\x1b\n\x08head_ids\x18\x01\x20\x03(\ + \x0cR\x07headIdsB\0\x12\x1c\n\x08checkout\x18\x02\x20\x01(\x0cR\x08check\ + outB\0:\0\"v\n\tOperation\x12\x19\n\x07view_id\x18\x01\x20\x01(\x0cR\x06\ + viewIdB\0\x12\x1a\n\x07parents\x18\x02\x20\x03(\x0cR\x07parentsB\0\x120\ + \n\x08metadata\x18\x03\x20\x01(\x0b2\x12.OperationMetadataR\x08metadataB\ + \0:\0\"\\\n\tTimestamp\x12.\n\x12millis_since_epoch\x18\x01\x20\x01(\x04\ + R\x10millisSinceEpochB\0\x12\x1d\n\ttz_offset\x18\x02\x20\x01(\x05R\x08t\ + zOffsetB\0:\0\"\xcb\x01\n\x11OperationMetadata\x12+\n\nstart_time\x18\ + \x01\x20\x01(\x0b2\n.TimestampR\tstartTimeB\0\x12'\n\x08end_time\x18\x02\ + \x20\x01(\x0b2\n.TimestampR\x07endTimeB\0\x12\"\n\x0bdescription\x18\x03\ + \x20\x01(\tR\x0bdescriptionB\0\x12\x1c\n\x08hostname\x18\x04\x20\x01(\tR\ + \x08hostnameB\0\x12\x1c\n\x08username\x18\x05\x20\x01(\tR\x08usernameB\0\ + :\0B\0b\x06proto3\ +"; + +static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; + +fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { + ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() +} + +pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { + file_descriptor_proto_lazy.get(|| { + parse_descriptor_proto() + }) +} diff --git a/lib/protos/src/store.proto b/lib/protos/src/store.proto new file mode 100644 index 000000000..2604f767a --- /dev/null +++ b/lib/protos/src/store.proto @@ -0,0 +1,70 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +message TreeValue { + message NormalFile { + bytes id = 1; + bool executable = 2; + } + + oneof value { + NormalFile normal_file = 2; + bytes symlink_id = 3; + bytes tree_id = 4; + bytes conflict_id = 5; + } +} + +message Tree { + message Entry { + string name = 1; + TreeValue value = 2; + } + + repeated Entry entries = 1; +} + +message Commit { + repeated bytes parents = 1; + repeated bytes predecessors = 2; + bytes root_tree = 3; + bytes change_id = 4; + string description = 5; + + message Timestamp { + uint64 millis_since_epoch = 1; + int32 tz_offset = 2; + } + message Signature { + string name = 1; + string email = 2; + Timestamp timestamp = 3; + } + Signature author = 6; + Signature committer = 7; + + bool is_open = 8; + bool is_pruned = 9; +} + +message Conflict { + message Part { + TreeValue content = 1; + } + + repeated Part removes = 1; + repeated Part adds = 2; +} diff --git a/lib/protos/src/store.rs b/lib/protos/src/store.rs new file mode 100644 index 000000000..e0aec0d9d --- /dev/null +++ b/lib/protos/src/store.rs @@ -0,0 +1,2395 @@ +// This file is generated by rust-protobuf 2.18.0. Do not edit +// @generated + +// https://github.com/rust-lang/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(unused_attributes)] +#![rustfmt::skip] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unused_imports)] +#![allow(unused_results)] +//! Generated file from `store.proto` + +/// Generated files are compatible only with the same version +/// of protobuf runtime. +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_18_0; + +#[derive(PartialEq,Clone,Default)] +pub struct TreeValue { + // message oneof groups + pub value: ::std::option::Option, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a TreeValue { + fn default() -> &'a TreeValue { + ::default_instance() + } +} + +#[derive(Clone,PartialEq,Debug)] +pub enum TreeValue_oneof_value { + normal_file(TreeValue_NormalFile), + symlink_id(::std::vec::Vec), + tree_id(::std::vec::Vec), + conflict_id(::std::vec::Vec), +} + +impl TreeValue { + pub fn new() -> TreeValue { + ::std::default::Default::default() + } + + // .TreeValue.NormalFile normal_file = 2; + + + pub fn get_normal_file(&self) -> &TreeValue_NormalFile { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::normal_file(ref v)) => v, + _ => ::default_instance(), + } + } + pub fn clear_normal_file(&mut self) { + self.value = ::std::option::Option::None; + } + + pub fn has_normal_file(&self) -> bool { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::normal_file(..)) => true, + _ => false, + } + } + + // Param is passed by value, moved + pub fn set_normal_file(&mut self, v: TreeValue_NormalFile) { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::normal_file(v)) + } + + // Mutable pointer to the field. + pub fn mut_normal_file(&mut self) -> &mut TreeValue_NormalFile { + if let ::std::option::Option::Some(TreeValue_oneof_value::normal_file(_)) = self.value { + } else { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::normal_file(TreeValue_NormalFile::new())); + } + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::normal_file(ref mut v)) => v, + _ => panic!(), + } + } + + // Take field + pub fn take_normal_file(&mut self) -> TreeValue_NormalFile { + if self.has_normal_file() { + match self.value.take() { + ::std::option::Option::Some(TreeValue_oneof_value::normal_file(v)) => v, + _ => panic!(), + } + } else { + TreeValue_NormalFile::new() + } + } + + // bytes symlink_id = 3; + + + pub fn get_symlink_id(&self) -> &[u8] { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(ref v)) => v, + _ => &[], + } + } + pub fn clear_symlink_id(&mut self) { + self.value = ::std::option::Option::None; + } + + pub fn has_symlink_id(&self) -> bool { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(..)) => true, + _ => false, + } + } + + // Param is passed by value, moved + pub fn set_symlink_id(&mut self, v: ::std::vec::Vec) { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(v)) + } + + // Mutable pointer to the field. + pub fn mut_symlink_id(&mut self) -> &mut ::std::vec::Vec { + if let ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(_)) = self.value { + } else { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(::std::vec::Vec::new())); + } + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(ref mut v)) => v, + _ => panic!(), + } + } + + // Take field + pub fn take_symlink_id(&mut self) -> ::std::vec::Vec { + if self.has_symlink_id() { + match self.value.take() { + ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(v)) => v, + _ => panic!(), + } + } else { + ::std::vec::Vec::new() + } + } + + // bytes tree_id = 4; + + + pub fn get_tree_id(&self) -> &[u8] { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::tree_id(ref v)) => v, + _ => &[], + } + } + pub fn clear_tree_id(&mut self) { + self.value = ::std::option::Option::None; + } + + pub fn has_tree_id(&self) -> bool { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::tree_id(..)) => true, + _ => false, + } + } + + // Param is passed by value, moved + pub fn set_tree_id(&mut self, v: ::std::vec::Vec) { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::tree_id(v)) + } + + // Mutable pointer to the field. + pub fn mut_tree_id(&mut self) -> &mut ::std::vec::Vec { + if let ::std::option::Option::Some(TreeValue_oneof_value::tree_id(_)) = self.value { + } else { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::tree_id(::std::vec::Vec::new())); + } + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::tree_id(ref mut v)) => v, + _ => panic!(), + } + } + + // Take field + pub fn take_tree_id(&mut self) -> ::std::vec::Vec { + if self.has_tree_id() { + match self.value.take() { + ::std::option::Option::Some(TreeValue_oneof_value::tree_id(v)) => v, + _ => panic!(), + } + } else { + ::std::vec::Vec::new() + } + } + + // bytes conflict_id = 5; + + + pub fn get_conflict_id(&self) -> &[u8] { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(ref v)) => v, + _ => &[], + } + } + pub fn clear_conflict_id(&mut self) { + self.value = ::std::option::Option::None; + } + + pub fn has_conflict_id(&self) -> bool { + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(..)) => true, + _ => false, + } + } + + // Param is passed by value, moved + pub fn set_conflict_id(&mut self, v: ::std::vec::Vec) { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(v)) + } + + // Mutable pointer to the field. + pub fn mut_conflict_id(&mut self) -> &mut ::std::vec::Vec { + if let ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(_)) = self.value { + } else { + self.value = ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(::std::vec::Vec::new())); + } + match self.value { + ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(ref mut v)) => v, + _ => panic!(), + } + } + + // Take field + pub fn take_conflict_id(&mut self) -> ::std::vec::Vec { + if self.has_conflict_id() { + match self.value.take() { + ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(v)) => v, + _ => panic!(), + } + } else { + ::std::vec::Vec::new() + } + } +} + +impl ::protobuf::Message for TreeValue { + fn is_initialized(&self) -> bool { + if let Some(TreeValue_oneof_value::normal_file(ref v)) = self.value { + if !v.is_initialized() { + return false; + } + } + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + self.value = ::std::option::Option::Some(TreeValue_oneof_value::normal_file(is.read_message()?)); + }, + 3 => { + if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + self.value = ::std::option::Option::Some(TreeValue_oneof_value::symlink_id(is.read_bytes()?)); + }, + 4 => { + if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + self.value = ::std::option::Option::Some(TreeValue_oneof_value::tree_id(is.read_bytes()?)); + }, + 5 => { + if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + self.value = ::std::option::Option::Some(TreeValue_oneof_value::conflict_id(is.read_bytes()?)); + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let ::std::option::Option::Some(ref v) = self.value { + match v { + &TreeValue_oneof_value::normal_file(ref v) => { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }, + &TreeValue_oneof_value::symlink_id(ref v) => { + my_size += ::protobuf::rt::bytes_size(3, &v); + }, + &TreeValue_oneof_value::tree_id(ref v) => { + my_size += ::protobuf::rt::bytes_size(4, &v); + }, + &TreeValue_oneof_value::conflict_id(ref v) => { + my_size += ::protobuf::rt::bytes_size(5, &v); + }, + }; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let ::std::option::Option::Some(ref v) = self.value { + match v { + &TreeValue_oneof_value::normal_file(ref v) => { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }, + &TreeValue_oneof_value::symlink_id(ref v) => { + os.write_bytes(3, v)?; + }, + &TreeValue_oneof_value::tree_id(ref v) => { + os.write_bytes(4, v)?; + }, + &TreeValue_oneof_value::conflict_id(ref v) => { + os.write_bytes(5, v)?; + }, + }; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> TreeValue { + TreeValue::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, TreeValue_NormalFile>( + "normal_file", + TreeValue::has_normal_file, + TreeValue::get_normal_file, + )); + fields.push(::protobuf::reflect::accessor::make_singular_bytes_accessor::<_>( + "symlink_id", + TreeValue::has_symlink_id, + TreeValue::get_symlink_id, + )); + fields.push(::protobuf::reflect::accessor::make_singular_bytes_accessor::<_>( + "tree_id", + TreeValue::has_tree_id, + TreeValue::get_tree_id, + )); + fields.push(::protobuf::reflect::accessor::make_singular_bytes_accessor::<_>( + "conflict_id", + TreeValue::has_conflict_id, + TreeValue::get_conflict_id, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "TreeValue", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static TreeValue { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(TreeValue::new) + } +} + +impl ::protobuf::Clear for TreeValue { + fn clear(&mut self) { + self.value = ::std::option::Option::None; + self.value = ::std::option::Option::None; + self.value = ::std::option::Option::None; + self.value = ::std::option::Option::None; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for TreeValue { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for TreeValue { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct TreeValue_NormalFile { + // message fields + pub id: ::std::vec::Vec, + pub executable: bool, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a TreeValue_NormalFile { + fn default() -> &'a TreeValue_NormalFile { + ::default_instance() + } +} + +impl TreeValue_NormalFile { + pub fn new() -> TreeValue_NormalFile { + ::std::default::Default::default() + } + + // bytes id = 1; + + + pub fn get_id(&self) -> &[u8] { + &self.id + } + pub fn clear_id(&mut self) { + self.id.clear(); + } + + // Param is passed by value, moved + pub fn set_id(&mut self, v: ::std::vec::Vec) { + self.id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.id + } + + // Take field + pub fn take_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.id, ::std::vec::Vec::new()) + } + + // bool executable = 2; + + + pub fn get_executable(&self) -> bool { + self.executable + } + pub fn clear_executable(&mut self) { + self.executable = false; + } + + // Param is passed by value, moved + pub fn set_executable(&mut self, v: bool) { + self.executable = v; + } +} + +impl ::protobuf::Message for TreeValue_NormalFile { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.id)?; + }, + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.executable = tmp; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.id.is_empty() { + my_size += ::protobuf::rt::bytes_size(1, &self.id); + } + if self.executable != false { + my_size += 2; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.id.is_empty() { + os.write_bytes(1, &self.id)?; + } + if self.executable != false { + os.write_bool(2, self.executable)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> TreeValue_NormalFile { + TreeValue_NormalFile::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "id", + |m: &TreeValue_NormalFile| { &m.id }, + |m: &mut TreeValue_NormalFile| { &mut m.id }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "executable", + |m: &TreeValue_NormalFile| { &m.executable }, + |m: &mut TreeValue_NormalFile| { &mut m.executable }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "TreeValue.NormalFile", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static TreeValue_NormalFile { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(TreeValue_NormalFile::new) + } +} + +impl ::protobuf::Clear for TreeValue_NormalFile { + fn clear(&mut self) { + self.id.clear(); + self.executable = false; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for TreeValue_NormalFile { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for TreeValue_NormalFile { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Tree { + // message fields + pub entries: ::protobuf::RepeatedField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Tree { + fn default() -> &'a Tree { + ::default_instance() + } +} + +impl Tree { + pub fn new() -> Tree { + ::std::default::Default::default() + } + + // repeated .Tree.Entry entries = 1; + + + pub fn get_entries(&self) -> &[Tree_Entry] { + &self.entries + } + pub fn clear_entries(&mut self) { + self.entries.clear(); + } + + // Param is passed by value, moved + pub fn set_entries(&mut self, v: ::protobuf::RepeatedField) { + self.entries = v; + } + + // Mutable pointer to the field. + pub fn mut_entries(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.entries + } + + // Take field + pub fn take_entries(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.entries, ::protobuf::RepeatedField::new()) + } +} + +impl ::protobuf::Message for Tree { + fn is_initialized(&self) -> bool { + for v in &self.entries { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.entries)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.entries { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.entries { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Tree { + Tree::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "entries", + |m: &Tree| { &m.entries }, + |m: &mut Tree| { &mut m.entries }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Tree", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Tree { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Tree::new) + } +} + +impl ::protobuf::Clear for Tree { + fn clear(&mut self) { + self.entries.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Tree { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Tree { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Tree_Entry { + // message fields + pub name: ::std::string::String, + pub value: ::protobuf::SingularPtrField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Tree_Entry { + fn default() -> &'a Tree_Entry { + ::default_instance() + } +} + +impl Tree_Entry { + pub fn new() -> Tree_Entry { + ::std::default::Default::default() + } + + // string name = 1; + + + pub fn get_name(&self) -> &str { + &self.name + } + pub fn clear_name(&mut self) { + self.name.clear(); + } + + // Param is passed by value, moved + pub fn set_name(&mut self, v: ::std::string::String) { + self.name = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_name(&mut self) -> &mut ::std::string::String { + &mut self.name + } + + // Take field + pub fn take_name(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.name, ::std::string::String::new()) + } + + // .TreeValue value = 2; + + + pub fn get_value(&self) -> &TreeValue { + self.value.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_value(&mut self) { + self.value.clear(); + } + + pub fn has_value(&self) -> bool { + self.value.is_some() + } + + // Param is passed by value, moved + pub fn set_value(&mut self, v: TreeValue) { + self.value = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_value(&mut self) -> &mut TreeValue { + if self.value.is_none() { + self.value.set_default(); + } + self.value.as_mut().unwrap() + } + + // Take field + pub fn take_value(&mut self) -> TreeValue { + self.value.take().unwrap_or_else(|| TreeValue::new()) + } +} + +impl ::protobuf::Message for Tree_Entry { + fn is_initialized(&self) -> bool { + for v in &self.value { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?; + }, + 2 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.value)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.name.is_empty() { + my_size += ::protobuf::rt::string_size(1, &self.name); + } + if let Some(ref v) = self.value.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.name.is_empty() { + os.write_string(1, &self.name)?; + } + if let Some(ref v) = self.value.as_ref() { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Tree_Entry { + Tree_Entry::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "name", + |m: &Tree_Entry| { &m.name }, + |m: &mut Tree_Entry| { &mut m.name }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "value", + |m: &Tree_Entry| { &m.value }, + |m: &mut Tree_Entry| { &mut m.value }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Tree.Entry", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Tree_Entry { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Tree_Entry::new) + } +} + +impl ::protobuf::Clear for Tree_Entry { + fn clear(&mut self) { + self.name.clear(); + self.value.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Tree_Entry { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Tree_Entry { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Commit { + // message fields + pub parents: ::protobuf::RepeatedField<::std::vec::Vec>, + pub predecessors: ::protobuf::RepeatedField<::std::vec::Vec>, + pub root_tree: ::std::vec::Vec, + pub change_id: ::std::vec::Vec, + pub description: ::std::string::String, + pub author: ::protobuf::SingularPtrField, + pub committer: ::protobuf::SingularPtrField, + pub is_open: bool, + pub is_pruned: bool, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Commit { + fn default() -> &'a Commit { + ::default_instance() + } +} + +impl Commit { + pub fn new() -> Commit { + ::std::default::Default::default() + } + + // repeated bytes parents = 1; + + + pub fn get_parents(&self) -> &[::std::vec::Vec] { + &self.parents + } + pub fn clear_parents(&mut self) { + self.parents.clear(); + } + + // Param is passed by value, moved + pub fn set_parents(&mut self, v: ::protobuf::RepeatedField<::std::vec::Vec>) { + self.parents = v; + } + + // Mutable pointer to the field. + pub fn mut_parents(&mut self) -> &mut ::protobuf::RepeatedField<::std::vec::Vec> { + &mut self.parents + } + + // Take field + pub fn take_parents(&mut self) -> ::protobuf::RepeatedField<::std::vec::Vec> { + ::std::mem::replace(&mut self.parents, ::protobuf::RepeatedField::new()) + } + + // repeated bytes predecessors = 2; + + + pub fn get_predecessors(&self) -> &[::std::vec::Vec] { + &self.predecessors + } + pub fn clear_predecessors(&mut self) { + self.predecessors.clear(); + } + + // Param is passed by value, moved + pub fn set_predecessors(&mut self, v: ::protobuf::RepeatedField<::std::vec::Vec>) { + self.predecessors = v; + } + + // Mutable pointer to the field. + pub fn mut_predecessors(&mut self) -> &mut ::protobuf::RepeatedField<::std::vec::Vec> { + &mut self.predecessors + } + + // Take field + pub fn take_predecessors(&mut self) -> ::protobuf::RepeatedField<::std::vec::Vec> { + ::std::mem::replace(&mut self.predecessors, ::protobuf::RepeatedField::new()) + } + + // bytes root_tree = 3; + + + pub fn get_root_tree(&self) -> &[u8] { + &self.root_tree + } + pub fn clear_root_tree(&mut self) { + self.root_tree.clear(); + } + + // Param is passed by value, moved + pub fn set_root_tree(&mut self, v: ::std::vec::Vec) { + self.root_tree = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_root_tree(&mut self) -> &mut ::std::vec::Vec { + &mut self.root_tree + } + + // Take field + pub fn take_root_tree(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.root_tree, ::std::vec::Vec::new()) + } + + // bytes change_id = 4; + + + pub fn get_change_id(&self) -> &[u8] { + &self.change_id + } + pub fn clear_change_id(&mut self) { + self.change_id.clear(); + } + + // Param is passed by value, moved + pub fn set_change_id(&mut self, v: ::std::vec::Vec) { + self.change_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_change_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.change_id + } + + // Take field + pub fn take_change_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.change_id, ::std::vec::Vec::new()) + } + + // string description = 5; + + + pub fn get_description(&self) -> &str { + &self.description + } + pub fn clear_description(&mut self) { + self.description.clear(); + } + + // Param is passed by value, moved + pub fn set_description(&mut self, v: ::std::string::String) { + self.description = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_description(&mut self) -> &mut ::std::string::String { + &mut self.description + } + + // Take field + pub fn take_description(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.description, ::std::string::String::new()) + } + + // .Commit.Signature author = 6; + + + pub fn get_author(&self) -> &Commit_Signature { + self.author.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_author(&mut self) { + self.author.clear(); + } + + pub fn has_author(&self) -> bool { + self.author.is_some() + } + + // Param is passed by value, moved + pub fn set_author(&mut self, v: Commit_Signature) { + self.author = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_author(&mut self) -> &mut Commit_Signature { + if self.author.is_none() { + self.author.set_default(); + } + self.author.as_mut().unwrap() + } + + // Take field + pub fn take_author(&mut self) -> Commit_Signature { + self.author.take().unwrap_or_else(|| Commit_Signature::new()) + } + + // .Commit.Signature committer = 7; + + + pub fn get_committer(&self) -> &Commit_Signature { + self.committer.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_committer(&mut self) { + self.committer.clear(); + } + + pub fn has_committer(&self) -> bool { + self.committer.is_some() + } + + // Param is passed by value, moved + pub fn set_committer(&mut self, v: Commit_Signature) { + self.committer = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_committer(&mut self) -> &mut Commit_Signature { + if self.committer.is_none() { + self.committer.set_default(); + } + self.committer.as_mut().unwrap() + } + + // Take field + pub fn take_committer(&mut self) -> Commit_Signature { + self.committer.take().unwrap_or_else(|| Commit_Signature::new()) + } + + // bool is_open = 8; + + + pub fn get_is_open(&self) -> bool { + self.is_open + } + pub fn clear_is_open(&mut self) { + self.is_open = false; + } + + // Param is passed by value, moved + pub fn set_is_open(&mut self, v: bool) { + self.is_open = v; + } + + // bool is_pruned = 9; + + + pub fn get_is_pruned(&self) -> bool { + self.is_pruned + } + pub fn clear_is_pruned(&mut self) { + self.is_pruned = false; + } + + // Param is passed by value, moved + pub fn set_is_pruned(&mut self, v: bool) { + self.is_pruned = v; + } +} + +impl ::protobuf::Message for Commit { + fn is_initialized(&self) -> bool { + for v in &self.author { + if !v.is_initialized() { + return false; + } + }; + for v in &self.committer { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_bytes_into(wire_type, is, &mut self.parents)?; + }, + 2 => { + ::protobuf::rt::read_repeated_bytes_into(wire_type, is, &mut self.predecessors)?; + }, + 3 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.root_tree)?; + }, + 4 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.change_id)?; + }, + 5 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.description)?; + }, + 6 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.author)?; + }, + 7 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.committer)?; + }, + 8 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.is_open = tmp; + }, + 9 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.is_pruned = tmp; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.parents { + my_size += ::protobuf::rt::bytes_size(1, &value); + }; + for value in &self.predecessors { + my_size += ::protobuf::rt::bytes_size(2, &value); + }; + if !self.root_tree.is_empty() { + my_size += ::protobuf::rt::bytes_size(3, &self.root_tree); + } + if !self.change_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(4, &self.change_id); + } + if !self.description.is_empty() { + my_size += ::protobuf::rt::string_size(5, &self.description); + } + if let Some(ref v) = self.author.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + if let Some(ref v) = self.committer.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + if self.is_open != false { + my_size += 2; + } + if self.is_pruned != false { + my_size += 2; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.parents { + os.write_bytes(1, &v)?; + }; + for v in &self.predecessors { + os.write_bytes(2, &v)?; + }; + if !self.root_tree.is_empty() { + os.write_bytes(3, &self.root_tree)?; + } + if !self.change_id.is_empty() { + os.write_bytes(4, &self.change_id)?; + } + if !self.description.is_empty() { + os.write_string(5, &self.description)?; + } + if let Some(ref v) = self.author.as_ref() { + os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + if let Some(ref v) = self.committer.as_ref() { + os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + if self.is_open != false { + os.write_bool(8, self.is_open)?; + } + if self.is_pruned != false { + os.write_bool(9, self.is_pruned)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Commit { + Commit::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "parents", + |m: &Commit| { &m.parents }, + |m: &mut Commit| { &mut m.parents }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "predecessors", + |m: &Commit| { &m.predecessors }, + |m: &mut Commit| { &mut m.predecessors }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "root_tree", + |m: &Commit| { &m.root_tree }, + |m: &mut Commit| { &mut m.root_tree }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "change_id", + |m: &Commit| { &m.change_id }, + |m: &mut Commit| { &mut m.change_id }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "description", + |m: &Commit| { &m.description }, + |m: &mut Commit| { &mut m.description }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "author", + |m: &Commit| { &m.author }, + |m: &mut Commit| { &mut m.author }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "committer", + |m: &Commit| { &m.committer }, + |m: &mut Commit| { &mut m.committer }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "is_open", + |m: &Commit| { &m.is_open }, + |m: &mut Commit| { &mut m.is_open }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "is_pruned", + |m: &Commit| { &m.is_pruned }, + |m: &mut Commit| { &mut m.is_pruned }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Commit", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Commit { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Commit::new) + } +} + +impl ::protobuf::Clear for Commit { + fn clear(&mut self) { + self.parents.clear(); + self.predecessors.clear(); + self.root_tree.clear(); + self.change_id.clear(); + self.description.clear(); + self.author.clear(); + self.committer.clear(); + self.is_open = false; + self.is_pruned = false; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Commit { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Commit { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Commit_Timestamp { + // message fields + pub millis_since_epoch: u64, + pub tz_offset: i32, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Commit_Timestamp { + fn default() -> &'a Commit_Timestamp { + ::default_instance() + } +} + +impl Commit_Timestamp { + pub fn new() -> Commit_Timestamp { + ::std::default::Default::default() + } + + // uint64 millis_since_epoch = 1; + + + pub fn get_millis_since_epoch(&self) -> u64 { + self.millis_since_epoch + } + pub fn clear_millis_since_epoch(&mut self) { + self.millis_since_epoch = 0; + } + + // Param is passed by value, moved + pub fn set_millis_since_epoch(&mut self, v: u64) { + self.millis_since_epoch = v; + } + + // int32 tz_offset = 2; + + + pub fn get_tz_offset(&self) -> i32 { + self.tz_offset + } + pub fn clear_tz_offset(&mut self) { + self.tz_offset = 0; + } + + // Param is passed by value, moved + pub fn set_tz_offset(&mut self, v: i32) { + self.tz_offset = v; + } +} + +impl ::protobuf::Message for Commit_Timestamp { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_uint64()?; + self.millis_since_epoch = tmp; + }, + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.tz_offset = tmp; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.millis_since_epoch != 0 { + my_size += ::protobuf::rt::value_size(1, self.millis_since_epoch, ::protobuf::wire_format::WireTypeVarint); + } + if self.tz_offset != 0 { + my_size += ::protobuf::rt::value_size(2, self.tz_offset, ::protobuf::wire_format::WireTypeVarint); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.millis_since_epoch != 0 { + os.write_uint64(1, self.millis_since_epoch)?; + } + if self.tz_offset != 0 { + os.write_int32(2, self.tz_offset)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Commit_Timestamp { + Commit_Timestamp::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>( + "millis_since_epoch", + |m: &Commit_Timestamp| { &m.millis_since_epoch }, + |m: &mut Commit_Timestamp| { &mut m.millis_since_epoch }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "tz_offset", + |m: &Commit_Timestamp| { &m.tz_offset }, + |m: &mut Commit_Timestamp| { &mut m.tz_offset }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Commit.Timestamp", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Commit_Timestamp { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Commit_Timestamp::new) + } +} + +impl ::protobuf::Clear for Commit_Timestamp { + fn clear(&mut self) { + self.millis_since_epoch = 0; + self.tz_offset = 0; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Commit_Timestamp { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Commit_Timestamp { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Commit_Signature { + // message fields + pub name: ::std::string::String, + pub email: ::std::string::String, + pub timestamp: ::protobuf::SingularPtrField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Commit_Signature { + fn default() -> &'a Commit_Signature { + ::default_instance() + } +} + +impl Commit_Signature { + pub fn new() -> Commit_Signature { + ::std::default::Default::default() + } + + // string name = 1; + + + pub fn get_name(&self) -> &str { + &self.name + } + pub fn clear_name(&mut self) { + self.name.clear(); + } + + // Param is passed by value, moved + pub fn set_name(&mut self, v: ::std::string::String) { + self.name = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_name(&mut self) -> &mut ::std::string::String { + &mut self.name + } + + // Take field + pub fn take_name(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.name, ::std::string::String::new()) + } + + // string email = 2; + + + pub fn get_email(&self) -> &str { + &self.email + } + pub fn clear_email(&mut self) { + self.email.clear(); + } + + // Param is passed by value, moved + pub fn set_email(&mut self, v: ::std::string::String) { + self.email = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_email(&mut self) -> &mut ::std::string::String { + &mut self.email + } + + // Take field + pub fn take_email(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.email, ::std::string::String::new()) + } + + // .Commit.Timestamp timestamp = 3; + + + pub fn get_timestamp(&self) -> &Commit_Timestamp { + self.timestamp.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_timestamp(&mut self) { + self.timestamp.clear(); + } + + pub fn has_timestamp(&self) -> bool { + self.timestamp.is_some() + } + + // Param is passed by value, moved + pub fn set_timestamp(&mut self, v: Commit_Timestamp) { + self.timestamp = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_timestamp(&mut self) -> &mut Commit_Timestamp { + if self.timestamp.is_none() { + self.timestamp.set_default(); + } + self.timestamp.as_mut().unwrap() + } + + // Take field + pub fn take_timestamp(&mut self) -> Commit_Timestamp { + self.timestamp.take().unwrap_or_else(|| Commit_Timestamp::new()) + } +} + +impl ::protobuf::Message for Commit_Signature { + fn is_initialized(&self) -> bool { + for v in &self.timestamp { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?; + }, + 2 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.email)?; + }, + 3 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.timestamp)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.name.is_empty() { + my_size += ::protobuf::rt::string_size(1, &self.name); + } + if !self.email.is_empty() { + my_size += ::protobuf::rt::string_size(2, &self.email); + } + if let Some(ref v) = self.timestamp.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.name.is_empty() { + os.write_string(1, &self.name)?; + } + if !self.email.is_empty() { + os.write_string(2, &self.email)?; + } + if let Some(ref v) = self.timestamp.as_ref() { + os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Commit_Signature { + Commit_Signature::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "name", + |m: &Commit_Signature| { &m.name }, + |m: &mut Commit_Signature| { &mut m.name }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "email", + |m: &Commit_Signature| { &m.email }, + |m: &mut Commit_Signature| { &mut m.email }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "timestamp", + |m: &Commit_Signature| { &m.timestamp }, + |m: &mut Commit_Signature| { &mut m.timestamp }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Commit.Signature", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Commit_Signature { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Commit_Signature::new) + } +} + +impl ::protobuf::Clear for Commit_Signature { + fn clear(&mut self) { + self.name.clear(); + self.email.clear(); + self.timestamp.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Commit_Signature { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Commit_Signature { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Conflict { + // message fields + pub removes: ::protobuf::RepeatedField, + pub adds: ::protobuf::RepeatedField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Conflict { + fn default() -> &'a Conflict { + ::default_instance() + } +} + +impl Conflict { + pub fn new() -> Conflict { + ::std::default::Default::default() + } + + // repeated .Conflict.Part removes = 1; + + + pub fn get_removes(&self) -> &[Conflict_Part] { + &self.removes + } + pub fn clear_removes(&mut self) { + self.removes.clear(); + } + + // Param is passed by value, moved + pub fn set_removes(&mut self, v: ::protobuf::RepeatedField) { + self.removes = v; + } + + // Mutable pointer to the field. + pub fn mut_removes(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.removes + } + + // Take field + pub fn take_removes(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.removes, ::protobuf::RepeatedField::new()) + } + + // repeated .Conflict.Part adds = 2; + + + pub fn get_adds(&self) -> &[Conflict_Part] { + &self.adds + } + pub fn clear_adds(&mut self) { + self.adds.clear(); + } + + // Param is passed by value, moved + pub fn set_adds(&mut self, v: ::protobuf::RepeatedField) { + self.adds = v; + } + + // Mutable pointer to the field. + pub fn mut_adds(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.adds + } + + // Take field + pub fn take_adds(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.adds, ::protobuf::RepeatedField::new()) + } +} + +impl ::protobuf::Message for Conflict { + fn is_initialized(&self) -> bool { + for v in &self.removes { + if !v.is_initialized() { + return false; + } + }; + for v in &self.adds { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.removes)?; + }, + 2 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.adds)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.removes { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + for value in &self.adds { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.removes { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + for v in &self.adds { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Conflict { + Conflict::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "removes", + |m: &Conflict| { &m.removes }, + |m: &mut Conflict| { &mut m.removes }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "adds", + |m: &Conflict| { &m.adds }, + |m: &mut Conflict| { &mut m.adds }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Conflict", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Conflict { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Conflict::new) + } +} + +impl ::protobuf::Clear for Conflict { + fn clear(&mut self) { + self.removes.clear(); + self.adds.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Conflict { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Conflict { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Conflict_Part { + // message fields + pub content: ::protobuf::SingularPtrField, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Conflict_Part { + fn default() -> &'a Conflict_Part { + ::default_instance() + } +} + +impl Conflict_Part { + pub fn new() -> Conflict_Part { + ::std::default::Default::default() + } + + // .TreeValue content = 1; + + + pub fn get_content(&self) -> &TreeValue { + self.content.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_content(&mut self) { + self.content.clear(); + } + + pub fn has_content(&self) -> bool { + self.content.is_some() + } + + // Param is passed by value, moved + pub fn set_content(&mut self, v: TreeValue) { + self.content = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_content(&mut self) -> &mut TreeValue { + if self.content.is_none() { + self.content.set_default(); + } + self.content.as_mut().unwrap() + } + + // Take field + pub fn take_content(&mut self) -> TreeValue { + self.content.take().unwrap_or_else(|| TreeValue::new()) + } +} + +impl ::protobuf::Message for Conflict_Part { + fn is_initialized(&self) -> bool { + for v in &self.content { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.content)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let Some(ref v) = self.content.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let Some(ref v) = self.content.as_ref() { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Conflict_Part { + Conflict_Part::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "content", + |m: &Conflict_Part| { &m.content }, + |m: &mut Conflict_Part| { &mut m.content }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Conflict.Part", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Conflict_Part { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Conflict_Part::new) + } +} + +impl ::protobuf::Clear for Conflict_Part { + fn clear(&mut self) { + self.content.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Conflict_Part { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Conflict_Part { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +static file_descriptor_proto_data: &'static [u8] = b"\ + \n\x0bstore.proto\"\xfb\x01\n\tTreeValue\x12:\n\x0bnormal_file\x18\x02\ + \x20\x01(\x0b2\x15.TreeValue.NormalFileH\0R\nnormalFileB\0\x12!\n\nsymli\ + nk_id\x18\x03\x20\x01(\x0cH\0R\tsymlinkIdB\0\x12\x1b\n\x07tree_id\x18\ + \x04\x20\x01(\x0cH\0R\x06treeIdB\0\x12#\n\x0bconflict_id\x18\x05\x20\x01\ + (\x0cH\0R\nconflictIdB\0\x1aB\n\nNormalFile\x12\x10\n\x02id\x18\x01\x20\ + \x01(\x0cR\x02idB\0\x12\x20\n\nexecutable\x18\x02\x20\x01(\x08R\nexecuta\ + bleB\0:\0B\x07\n\x05value:\0\"v\n\x04Tree\x12'\n\x07entries\x18\x01\x20\ + \x03(\x0b2\x0b.Tree.EntryR\x07entriesB\0\x1aC\n\x05Entry\x12\x14\n\x04na\ + me\x18\x01\x20\x01(\tR\x04nameB\0\x12\"\n\x05value\x18\x02\x20\x01(\x0b2\ + \n.TreeValueR\x05valueB\0:\0:\0\"\x96\x04\n\x06Commit\x12\x1a\n\x07paren\ + ts\x18\x01\x20\x03(\x0cR\x07parentsB\0\x12$\n\x0cpredecessors\x18\x02\ + \x20\x03(\x0cR\x0cpredecessorsB\0\x12\x1d\n\troot_tree\x18\x03\x20\x01(\ + \x0cR\x08rootTreeB\0\x12\x1d\n\tchange_id\x18\x04\x20\x01(\x0cR\x08chang\ + eIdB\0\x12\"\n\x0bdescription\x18\x05\x20\x01(\tR\x0bdescriptionB\0\x12+\ + \n\x06author\x18\x06\x20\x01(\x0b2\x11.Commit.SignatureR\x06authorB\0\ + \x121\n\tcommitter\x18\x07\x20\x01(\x0b2\x11.Commit.SignatureR\tcommitte\ + rB\0\x12\x19\n\x07is_open\x18\x08\x20\x01(\x08R\x06isOpenB\0\x12\x1d\n\t\ + is_pruned\x18\t\x20\x01(\x08R\x08isPrunedB\0\x1a\\\n\tTimestamp\x12.\n\ + \x12millis_since_epoch\x18\x01\x20\x01(\x04R\x10millisSinceEpochB\0\x12\ + \x1d\n\ttz_offset\x18\x02\x20\x01(\x05R\x08tzOffsetB\0:\0\x1an\n\tSignat\ + ure\x12\x14\n\x04name\x18\x01\x20\x01(\tR\x04nameB\0\x12\x16\n\x05email\ + \x18\x02\x20\x01(\tR\x05emailB\0\x121\n\ttimestamp\x18\x03\x20\x01(\x0b2\ + \x11.Commit.TimestampR\ttimestampB\0:\0:\0\"\x90\x01\n\x08Conflict\x12*\ + \n\x07removes\x18\x01\x20\x03(\x0b2\x0e.Conflict.PartR\x07removesB\0\x12\ + $\n\x04adds\x18\x02\x20\x03(\x0b2\x0e.Conflict.PartR\x04addsB\0\x1a0\n\ + \x04Part\x12&\n\x07content\x18\x01\x20\x01(\x0b2\n.TreeValueR\x07content\ + B\0:\0:\0B\0b\x06proto3\ +"; + +static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; + +fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { + ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() +} + +pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { + file_descriptor_proto_lazy.get(|| { + parse_descriptor_proto() + }) +} diff --git a/lib/protos/src/working_copy.proto b/lib/protos/src/working_copy.proto new file mode 100644 index 000000000..29e048ce7 --- /dev/null +++ b/lib/protos/src/working_copy.proto @@ -0,0 +1,36 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +enum FileType { + Normal = 0; + Symlink = 1; + Executable = 2; +} + +message FileState { + uint64 mtime_millis_since_epoch = 1; + uint64 size = 2; + FileType file_type = 3; +} + +message TreeState { + bytes tree_id = 1; + map file_states = 2; +} + +message Checkout { + bytes commit_id = 1; +} \ No newline at end of file diff --git a/lib/protos/src/working_copy.rs b/lib/protos/src/working_copy.rs new file mode 100644 index 000000000..ea2d66f77 --- /dev/null +++ b/lib/protos/src/working_copy.rs @@ -0,0 +1,676 @@ +// This file is generated by rust-protobuf 2.18.0. Do not edit +// @generated + +// https://github.com/rust-lang/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(unused_attributes)] +#![rustfmt::skip] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unused_imports)] +#![allow(unused_results)] +//! Generated file from `working_copy.proto` + +/// Generated files are compatible only with the same version +/// of protobuf runtime. +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_18_0; + +#[derive(PartialEq,Clone,Default)] +pub struct FileState { + // message fields + pub mtime_millis_since_epoch: u64, + pub size: u64, + pub file_type: FileType, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a FileState { + fn default() -> &'a FileState { + ::default_instance() + } +} + +impl FileState { + pub fn new() -> FileState { + ::std::default::Default::default() + } + + // uint64 mtime_millis_since_epoch = 1; + + + pub fn get_mtime_millis_since_epoch(&self) -> u64 { + self.mtime_millis_since_epoch + } + pub fn clear_mtime_millis_since_epoch(&mut self) { + self.mtime_millis_since_epoch = 0; + } + + // Param is passed by value, moved + pub fn set_mtime_millis_since_epoch(&mut self, v: u64) { + self.mtime_millis_since_epoch = v; + } + + // uint64 size = 2; + + + pub fn get_size(&self) -> u64 { + self.size + } + pub fn clear_size(&mut self) { + self.size = 0; + } + + // Param is passed by value, moved + pub fn set_size(&mut self, v: u64) { + self.size = v; + } + + // .FileType file_type = 3; + + + pub fn get_file_type(&self) -> FileType { + self.file_type + } + pub fn clear_file_type(&mut self) { + self.file_type = FileType::Normal; + } + + // Param is passed by value, moved + pub fn set_file_type(&mut self, v: FileType) { + self.file_type = v; + } +} + +impl ::protobuf::Message for FileState { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_uint64()?; + self.mtime_millis_since_epoch = tmp; + }, + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_uint64()?; + self.size = tmp; + }, + 3 => { + ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.file_type, 3, &mut self.unknown_fields)? + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.mtime_millis_since_epoch != 0 { + my_size += ::protobuf::rt::value_size(1, self.mtime_millis_since_epoch, ::protobuf::wire_format::WireTypeVarint); + } + if self.size != 0 { + my_size += ::protobuf::rt::value_size(2, self.size, ::protobuf::wire_format::WireTypeVarint); + } + if self.file_type != FileType::Normal { + my_size += ::protobuf::rt::enum_size(3, self.file_type); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.mtime_millis_since_epoch != 0 { + os.write_uint64(1, self.mtime_millis_since_epoch)?; + } + if self.size != 0 { + os.write_uint64(2, self.size)?; + } + if self.file_type != FileType::Normal { + os.write_enum(3, ::protobuf::ProtobufEnum::value(&self.file_type))?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> FileState { + FileState::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>( + "mtime_millis_since_epoch", + |m: &FileState| { &m.mtime_millis_since_epoch }, + |m: &mut FileState| { &mut m.mtime_millis_since_epoch }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint64>( + "size", + |m: &FileState| { &m.size }, + |m: &mut FileState| { &mut m.size }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum>( + "file_type", + |m: &FileState| { &m.file_type }, + |m: &mut FileState| { &mut m.file_type }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "FileState", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static FileState { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(FileState::new) + } +} + +impl ::protobuf::Clear for FileState { + fn clear(&mut self) { + self.mtime_millis_since_epoch = 0; + self.size = 0; + self.file_type = FileType::Normal; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for FileState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for FileState { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct TreeState { + // message fields + pub tree_id: ::std::vec::Vec, + pub file_states: ::std::collections::HashMap<::std::string::String, FileState>, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a TreeState { + fn default() -> &'a TreeState { + ::default_instance() + } +} + +impl TreeState { + pub fn new() -> TreeState { + ::std::default::Default::default() + } + + // bytes tree_id = 1; + + + pub fn get_tree_id(&self) -> &[u8] { + &self.tree_id + } + pub fn clear_tree_id(&mut self) { + self.tree_id.clear(); + } + + // Param is passed by value, moved + pub fn set_tree_id(&mut self, v: ::std::vec::Vec) { + self.tree_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_tree_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.tree_id + } + + // Take field + pub fn take_tree_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.tree_id, ::std::vec::Vec::new()) + } + + // repeated .TreeState.file_states_MapEntry file_states = 2; + + + pub fn get_file_states(&self) -> &::std::collections::HashMap<::std::string::String, FileState> { + &self.file_states + } + pub fn clear_file_states(&mut self) { + self.file_states.clear(); + } + + // Param is passed by value, moved + pub fn set_file_states(&mut self, v: ::std::collections::HashMap<::std::string::String, FileState>) { + self.file_states = v; + } + + // Mutable pointer to the field. + pub fn mut_file_states(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, FileState> { + &mut self.file_states + } + + // Take field + pub fn take_file_states(&mut self) -> ::std::collections::HashMap<::std::string::String, FileState> { + ::std::mem::replace(&mut self.file_states, ::std::collections::HashMap::new()) + } +} + +impl ::protobuf::Message for TreeState { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.tree_id)?; + }, + 2 => { + ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(wire_type, is, &mut self.file_states)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.tree_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(1, &self.tree_id); + } + my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(2, &self.file_states); + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.tree_id.is_empty() { + os.write_bytes(1, &self.tree_id)?; + } + ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(2, &self.file_states, os)?; + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> TreeState { + TreeState::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "tree_id", + |m: &TreeState| { &m.tree_id }, + |m: &mut TreeState| { &mut m.tree_id }, + )); + fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>( + "file_states", + |m: &TreeState| { &m.file_states }, + |m: &mut TreeState| { &mut m.file_states }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "TreeState", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static TreeState { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(TreeState::new) + } +} + +impl ::protobuf::Clear for TreeState { + fn clear(&mut self) { + self.tree_id.clear(); + self.file_states.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for TreeState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for TreeState { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +pub struct Checkout { + // message fields + pub commit_id: ::std::vec::Vec, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a Checkout { + fn default() -> &'a Checkout { + ::default_instance() + } +} + +impl Checkout { + pub fn new() -> Checkout { + ::std::default::Default::default() + } + + // bytes commit_id = 1; + + + pub fn get_commit_id(&self) -> &[u8] { + &self.commit_id + } + pub fn clear_commit_id(&mut self) { + self.commit_id.clear(); + } + + // Param is passed by value, moved + pub fn set_commit_id(&mut self, v: ::std::vec::Vec) { + self.commit_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_commit_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.commit_id + } + + // Take field + pub fn take_commit_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.commit_id, ::std::vec::Vec::new()) + } +} + +impl ::protobuf::Message for Checkout { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.commit_id)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if !self.commit_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(1, &self.commit_id); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if !self.commit_id.is_empty() { + os.write_bytes(1, &self.commit_id)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> Checkout { + Checkout::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "commit_id", + |m: &Checkout| { &m.commit_id }, + |m: &mut Checkout| { &mut m.commit_id }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "Checkout", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static Checkout { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(Checkout::new) + } +} + +impl ::protobuf::Clear for Checkout { + fn clear(&mut self) { + self.commit_id.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for Checkout { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for Checkout { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(Clone,PartialEq,Eq,Debug,Hash)] +pub enum FileType { + Normal = 0, + Symlink = 1, + Executable = 2, +} + +impl ::protobuf::ProtobufEnum for FileType { + fn value(&self) -> i32 { + *self as i32 + } + + fn from_i32(value: i32) -> ::std::option::Option { + match value { + 0 => ::std::option::Option::Some(FileType::Normal), + 1 => ::std::option::Option::Some(FileType::Symlink), + 2 => ::std::option::Option::Some(FileType::Executable), + _ => ::std::option::Option::None + } + } + + fn values() -> &'static [Self] { + static values: &'static [FileType] = &[ + FileType::Normal, + FileType::Symlink, + FileType::Executable, + ]; + values + } + + fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + ::protobuf::reflect::EnumDescriptor::new_pb_name::("FileType", file_descriptor_proto()) + }) + } +} + +impl ::std::marker::Copy for FileType { +} + +impl ::std::default::Default for FileType { + fn default() -> Self { + FileType::Normal + } +} + +impl ::protobuf::reflect::ProtobufValue for FileType { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self)) + } +} + +static file_descriptor_proto_data: &'static [u8] = b"\ + \n\x12working_copy.proto\"\x88\x01\n\tFileState\x129\n\x18mtime_millis_s\ + ince_epoch\x18\x01\x20\x01(\x04R\x15mtimeMillisSinceEpochB\0\x12\x14\n\ + \x04size\x18\x02\x20\x01(\x04R\x04sizeB\0\x12(\n\tfile_type\x18\x03\x20\ + \x01(\x0e2\t.FileTypeR\x08fileTypeB\0:\0\"\xb8\x01\n\tTreeState\x12\x19\ + \n\x07tree_id\x18\x01\x20\x01(\x0cR\x06treeIdB\0\x12B\n\x0bfile_states\ + \x18\x02\x20\x03(\x0b2\x1f.TreeState.file_states_MapEntryR\nfileStatesB\ + \0\x1aJ\n\x14file_states_MapEntry\x12\x0e\n\x03key\x18\x01(\tR\x03key\ + \x12\x1e\n\x05value\x18\x02(\x0b2\n.FileStateR\x05value:\x028\x01:\0\"+\ + \n\x08Checkout\x12\x1d\n\tcommit_id\x18\x01\x20\x01(\x0cR\x08commitIdB\0\ + :\0*5\n\x08FileType\x12\n\n\x06Normal\x10\0\x12\x0b\n\x07Symlink\x10\x01\ + \x12\x0e\n\nExecutable\x10\x02\x1a\0B\0b\x06proto3\ +"; + +static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; + +fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { + ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() +} + +pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { + file_descriptor_proto_lazy.get(|| { + parse_descriptor_proto() + }) +} diff --git a/lib/src/commit.rs b/lib/src/commit.rs new file mode 100644 index 000000000..7ef8223fc --- /dev/null +++ b/lib/src/commit.rs @@ -0,0 +1,140 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::Ordering; +use std::fmt::{Debug, Error, Formatter}; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; + +use crate::repo_path::DirRepoPath; +use crate::store; +use crate::store::{ChangeId, CommitId, Signature}; +use crate::store_wrapper::StoreWrapper; +use crate::tree::Tree; + +#[derive(Clone)] +pub struct Commit { + store: Arc, + id: CommitId, + data: Arc, +} + +impl Debug for Commit { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_struct("Commit").field("id", &self.id).finish() + } +} + +impl PartialEq for Commit { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl Eq for Commit {} + +impl Ord for Commit { + fn cmp(&self, other: &Self) -> Ordering { + self.id.cmp(&other.id) + } +} + +impl PartialOrd for Commit { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.id.cmp(&other.id)) + } +} + +impl Hash for Commit { + fn hash(&self, state: &mut H) { + self.id.hash(state) + } +} + +impl Commit { + pub fn new(store: Arc, id: CommitId, data: Arc) -> Self { + Commit { store, id, data } + } + + pub fn id(&self) -> &CommitId { + &self.id + } + + pub fn parent_ids(&self) -> Vec { + if self.data.parents.is_empty() && &self.id != self.store.root_commit_id() { + vec![self.store.root_commit_id().clone()] + } else { + self.data.parents.clone() + } + } + + pub fn parents(&self) -> Vec { + let mut parents = Vec::new(); + for parent in &self.data.parents { + parents.push(self.store.get_commit(parent).unwrap()); + } + if parents.is_empty() && &self.id != self.store.root_commit_id() { + parents.push(self.store.root_commit()) + } + parents + } + + pub fn predecessors(&self) -> Vec { + let mut predecessors = Vec::new(); + for predecessor in &self.data.predecessors { + predecessors.push(self.store.get_commit(predecessor).unwrap()); + } + predecessors + } + + pub fn tree(&self) -> Tree { + self.store + .get_tree(&DirRepoPath::root(), &self.data.root_tree) + .unwrap() + } + + pub fn change_id(&self) -> &ChangeId { + &self.data.change_id + } + + pub fn store_commit(&self) -> &store::Commit { + &self.data + } + + pub fn is_open(&self) -> bool { + self.data.is_open + } + + pub fn is_pruned(&self) -> bool { + self.data.is_pruned + } + + pub fn is_empty(&self) -> bool { + let parents = self.parents(); + // TODO: Perhaps the root commit should also be considered empty. + parents.len() == 1 && parents[0].tree().id() == self.tree().id() + } + + pub fn description(&self) -> &str { + &self.data.description + } + + pub fn author(&self) -> &Signature { + &self.data.author + } + + pub fn committer(&self) -> &Signature { + &self.data.committer + } +} diff --git a/lib/src/commit_builder.rs b/lib/src/commit_builder.rs new file mode 100644 index 000000000..3fba500ce --- /dev/null +++ b/lib/src/commit_builder.rs @@ -0,0 +1,172 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use uuid::Uuid; + +use crate::commit::Commit; +use crate::repo::ReadonlyRepo; +use crate::settings::UserSettings; +use crate::store; +use crate::store::{ChangeId, CommitId, Signature, Timestamp, TreeId}; +use crate::store_wrapper::StoreWrapper; +use crate::transaction::Transaction; +use std::sync::Arc; + +#[derive(Debug)] +pub struct CommitBuilder { + store: Arc, + commit: store::Commit, +} + +pub fn new_change_id() -> ChangeId { + ChangeId(Uuid::new_v4().as_bytes().to_vec()) +} +pub fn signature(settings: &UserSettings) -> Signature { + // TODO: check if it's slow to get the timezone etc for every signature + let timestamp = Timestamp::now(); + Signature { + name: settings.user_name(), + email: settings.user_email(), + timestamp, + } +} + +impl CommitBuilder { + pub fn for_new_commit( + settings: &UserSettings, + store: &Arc, + tree_id: TreeId, + ) -> CommitBuilder { + let signature = signature(settings); + let commit = store::Commit { + parents: vec![], + predecessors: vec![], + root_tree: tree_id, + change_id: new_change_id(), + description: String::new(), + author: signature.clone(), + committer: signature, + is_open: false, + is_pruned: false, + }; + CommitBuilder { + store: store.clone(), + commit, + } + } + + pub fn for_rewrite_from( + settings: &UserSettings, + store: &Arc, + predecessor: &Commit, + ) -> CommitBuilder { + let mut commit = predecessor.store_commit().clone(); + commit.predecessors = vec![predecessor.id().clone()]; + commit.committer = signature(settings); + CommitBuilder { + store: store.clone(), + commit, + } + } + + pub fn for_open_commit( + settings: &UserSettings, + store: &Arc, + parent_id: CommitId, + tree_id: TreeId, + ) -> CommitBuilder { + let signature = signature(settings); + let commit = store::Commit { + parents: vec![parent_id], + predecessors: vec![], + root_tree: tree_id, + change_id: new_change_id(), + description: String::new(), + author: signature.clone(), + committer: signature, + is_open: true, + is_pruned: false, + }; + CommitBuilder { + store: store.clone(), + commit, + } + } + + pub fn set_parents(mut self, parents: Vec) -> Self { + self.commit.parents = parents; + self + } + + pub fn set_predecessors(mut self, predecessors: Vec) -> Self { + self.commit.predecessors = predecessors; + self + } + + pub fn set_tree(mut self, tree_id: TreeId) -> Self { + self.commit.root_tree = tree_id; + self + } + + pub fn set_change_id(mut self, change_id: ChangeId) -> Self { + self.commit.change_id = change_id; + self + } + + pub fn generate_new_change_id(mut self) -> Self { + self.commit.change_id = new_change_id(); + self + } + + pub fn set_description(mut self, description: String) -> Self { + self.commit.description = description; + self + } + + pub fn set_open(mut self, is_open: bool) -> Self { + self.commit.is_open = is_open; + self + } + + pub fn set_pruned(mut self, is_pruned: bool) -> Self { + self.commit.is_pruned = is_pruned; + self + } + + pub fn set_author(mut self, author: Signature) -> Self { + self.commit.author = author; + self + } + + pub fn set_committer(mut self, committer: Signature) -> Self { + self.commit.committer = committer; + self + } + + pub fn write_to_new_transaction(self, repo: &ReadonlyRepo, description: &str) -> Commit { + let mut tx = repo.start_transaction(description); + let commit = self.write_to_transaction(&mut tx); + tx.commit(); + commit + } + + pub fn write_to_transaction(mut self, tx: &mut Transaction) -> Commit { + let parents = &mut self.commit.parents; + if parents.contains(self.store.root_commit_id()) { + assert_eq!(parents.len(), 1); + parents.clear(); + } + tx.write_commit(self.commit) + } +} diff --git a/lib/src/conflicts.rs b/lib/src/conflicts.rs new file mode 100644 index 000000000..98f71c636 --- /dev/null +++ b/lib/src/conflicts.rs @@ -0,0 +1,101 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::files; +use crate::repo_path::RepoPath; +use crate::store::{Conflict, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use std::io::Write; + +pub fn materialize_conflict( + store: &StoreWrapper, + path: &RepoPath, + conflict: &Conflict, + file: &mut dyn Write, +) { + match conflict.to_three_way() { + None => { + file.write_all(b"Unresolved complex conflict.\n").unwrap(); + } + Some((Some(left), Some(base), Some(right))) => { + match (left.value, base.value, right.value) { + ( + TreeValue::Normal { + id: left_id, + executable: false, + }, + TreeValue::Normal { + id: base_id, + executable: false, + }, + TreeValue::Normal { + id: right_id, + executable: false, + }, + ) => { + let mut left_contents: Vec = vec![]; + let mut base_contents: Vec = vec![]; + let mut right_contents: Vec = vec![]; + let file_path = path.to_file_repo_path(); + store + .read_file(&file_path, &left_id) + .unwrap() + .read_to_end(&mut left_contents) + .unwrap(); + store + .read_file(&file_path, &base_id) + .unwrap() + .read_to_end(&mut base_contents) + .unwrap(); + store + .read_file(&file_path, &right_id) + .unwrap() + .read_to_end(&mut right_contents) + .unwrap(); + let merge_result = + files::merge(&base_contents, &left_contents, &right_contents); + match merge_result { + files::MergeResult::Resolved(contents) => { + file.write_all(&contents).unwrap(); + } + files::MergeResult::Conflict(hunks) => { + for hunk in hunks { + match hunk { + files::MergeHunk::Resolved(contents) => { + file.write_all(&contents).unwrap(); + } + files::MergeHunk::Conflict { base, left, right } => { + file.write_all(b"<<<<<<<").unwrap(); + file.write_all(&left).unwrap(); + file.write_all(b"|||||||").unwrap(); + file.write_all(&base).unwrap(); + file.write_all(b"=======").unwrap(); + file.write_all(&right).unwrap(); + file.write_all(b">>>>>>>").unwrap(); + } + } + } + } + } + } + _ => { + file.write_all(b"Unresolved 3-way conflict.\n").unwrap(); + } + } + } + Some(_) => { + file.write_all(b"Unresolved complex conflict.\n").unwrap(); + } + } +} diff --git a/lib/src/dag_walk.rs b/lib/src/dag_walk.rs new file mode 100644 index 000000000..1aae063ae --- /dev/null +++ b/lib/src/dag_walk.rs @@ -0,0 +1,456 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashSet; +use std::iter::Iterator; + +use crate::commit::Commit; +use crate::store::CommitId; +use std::hash::Hash; + +pub struct AncestorsIter { + bfs_iter: BfsIter<'static, 'static, Commit, CommitId, Vec>, +} + +impl Iterator for AncestorsIter { + type Item = Commit; + + fn next(&mut self) -> Option { + self.bfs_iter.next() + } +} + +pub fn walk_ancestors(start: II) -> AncestorsIter +where + II: IntoIterator, +{ + let bfs_iter = bfs( + start, + Box::new(|commit| commit.id().clone()), + Box::new(|commit| commit.parents()), + ); + AncestorsIter { bfs_iter } +} + +pub struct BfsIter<'id_fn, 'neighbors_fn, T, ID, NI> { + id_fn: Box ID + 'id_fn>, + neighbors_fn: Box NI + 'neighbors_fn>, + work: Vec, + visited: HashSet, +} + +impl Iterator for BfsIter<'_, '_, T, ID, NI> +where + ID: Hash + Eq, + NI: IntoIterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + while !self.work.is_empty() { + let c = self.work.pop().unwrap(); + let id = (self.id_fn)(&c); + if self.visited.contains(&id) { + continue; + } + for p in (self.neighbors_fn)(&c) { + self.work.push(p); + } + self.visited.insert(id); + return Some(c); + } + None + } +} + +pub fn bfs<'id_fn, 'neighbors_fn, T, ID, II, NI>( + start: II, + id_fn: Box ID + 'id_fn>, + neighbors_fn: Box NI + 'neighbors_fn>, +) -> BfsIter<'id_fn, 'neighbors_fn, T, ID, NI> +where + ID: Hash + Eq, + II: IntoIterator, + NI: IntoIterator, +{ + BfsIter { + id_fn, + neighbors_fn, + work: start.into_iter().collect(), + visited: Default::default(), + } +} + +pub struct TopoIter<'id_fn, 'neighbors_fn, T, ID, NI> { + id_fn: Box ID + 'id_fn>, + neighbors_fn: Box NI + 'neighbors_fn>, + work: Vec, + visited: HashSet, +} + +impl Iterator for TopoIter<'_, '_, T, ID, NI> +where + ID: Hash + Eq, + NI: IntoIterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + while !self.work.is_empty() { + let c = self.work.pop().unwrap(); + let id = (self.id_fn)(&c); + if self.visited.contains(&id) { + continue; + } + for p in (self.neighbors_fn)(&c) { + self.work.push(p); + } + self.visited.insert(id); + return Some(c); + } + None + } +} + +/// Returns neighbors before the node itself. +pub fn topo_order_reverse( + start: II, + id_fn: Box ID>, + mut neighbors_fn: Box NI>, +) -> Vec +where + T: Hash + Eq + Clone, + ID: Hash + Eq + Clone, + II: IntoIterator, + NI: IntoIterator, +{ + let mut visiting = HashSet::new(); + let mut emitted = HashSet::new(); + let mut result = vec![]; + + let mut start_nodes: Vec<_> = start.into_iter().collect(); + start_nodes.reverse(); + + for start_node in start_nodes { + let mut stack = vec![(start_node, false)]; + while !stack.is_empty() { + let (node, neighbors_visited) = stack.pop().unwrap(); + let id = id_fn(&node); + if emitted.contains(&id) { + continue; + } + if !neighbors_visited { + assert!(visiting.insert(id.clone()), "graph has cycle"); + let neighbors = neighbors_fn(&node); + stack.push((node, true)); + for neighbor in neighbors { + stack.push((neighbor, false)); + } + } else { + visiting.remove(&id); + emitted.insert(id); + result.push(node); + } + } + } + result.reverse(); + result +} + +pub fn leaves( + start: II, + neighbors_fn: &mut impl FnMut(&T) -> NI, + id_fn: &impl Fn(&T) -> ID, +) -> HashSet +where + T: Hash + Eq + Clone, + ID: Hash + Eq, + II: IntoIterator, + NI: IntoIterator, +{ + let mut visited = HashSet::new(); + let mut work: Vec = start.into_iter().collect(); + let mut leaves: HashSet = work.iter().cloned().collect(); + let mut non_leaves = HashSet::new(); + while !work.is_empty() { + // TODO: make this not waste so much memory on the sets + let mut new_work = vec![]; + for c in work { + let id: ID = id_fn(&c); + if visited.contains(&id) { + continue; + } + for p in neighbors_fn(&c) { + non_leaves.insert(c.clone()); + new_work.push(p); + } + visited.insert(id); + leaves.insert(c); + } + work = new_work; + } + leaves.difference(&non_leaves).cloned().collect() +} + +/// Find nodes in the start set that are not reachable from other nodes in the +/// start set. +pub fn unreachable( + start: II, + neighbors_fn: &impl Fn(&T) -> NI, + id_fn: &impl Fn(&T) -> ID, +) -> HashSet +where + T: Hash + Eq + Clone, + ID: Hash + Eq, + II: IntoIterator, + NI: IntoIterator, +{ + let start: Vec = start.into_iter().collect(); + let mut reachable: HashSet = start.iter().cloned().collect(); + for _node in bfs( + start.into_iter(), + Box::new(id_fn), + Box::new(|node| { + let neighbors: Vec = neighbors_fn(node).into_iter().collect(); + for neighbor in &neighbors { + reachable.remove(&neighbor); + } + neighbors + }), + ) {} + reachable +} + +pub fn common_ancestor<'a, I1, I2>(set1: I1, set2: I2) -> Commit +where + I1: IntoIterator, + I2: IntoIterator, +{ + let set1: Vec = set1.into_iter().cloned().collect(); + let set2: Vec = set2.into_iter().cloned().collect(); + closest_common_node(set1, set2, &|commit| commit.parents(), &|commit| { + commit.id().clone() + }) + .unwrap() +} + +pub fn closest_common_node( + set1: II1, + set2: II2, + neighbors_fn: &impl Fn(&T) -> NI, + id_fn: &impl Fn(&T) -> ID, +) -> Option +where + T: Hash + Eq + Clone, + ID: Hash + Eq, + II1: IntoIterator, + II2: IntoIterator, + NI: IntoIterator, +{ + let mut visited1 = HashSet::new(); + let mut visited2 = HashSet::new(); + + let mut work1: Vec = set1.into_iter().collect(); + let mut work2: Vec = set2.into_iter().collect(); + while !work1.is_empty() || !work2.is_empty() { + let mut new_work1 = vec![]; + for node in work1 { + let id: ID = id_fn(&node); + if visited2.contains(&id) { + return Some(node); + } + if visited1.insert(id) { + for neighbor in neighbors_fn(&node) { + new_work1.push(neighbor); + } + } + } + work1 = new_work1; + + let mut new_work2 = vec![]; + for node in work2 { + let id: ID = id_fn(&node); + if visited1.contains(&id) { + return Some(node); + } + if visited2.insert(id) { + for neighbor in neighbors_fn(&node) { + new_work2.push(neighbor); + } + } + } + work2 = new_work2; + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashMap; + + #[test] + fn topo_order_reverse_linear() { + // This graph: + // o C + // o B + // o A + + let mut neighbors = HashMap::new(); + neighbors.insert('A', vec![]); + neighbors.insert('B', vec!['A']); + neighbors.insert('C', vec!['B']); + + let common = topo_order_reverse( + vec!['C'], + Box::new(|node| *node), + Box::new(move |node| neighbors[node].clone()), + ); + + assert_eq!(common, vec!['C', 'B', 'A']); + } + + #[test] + fn topo_order_reverse_merge() { + // This graph: + // o F + // |\ + // o | E + // | o D + // | o C + // | o B + // |/ + // o A + + let mut neighbors = HashMap::new(); + neighbors.insert('A', vec![]); + neighbors.insert('B', vec!['A']); + neighbors.insert('C', vec!['B']); + neighbors.insert('D', vec!['C']); + neighbors.insert('E', vec!['A']); + neighbors.insert('F', vec!['E', 'D']); + + let common = topo_order_reverse( + vec!['F'], + Box::new(|node| *node), + Box::new(move |node| neighbors[node].clone()), + ); + + assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']); + } + + #[test] + fn topo_order_reverse_multiple_heads() { + // This graph: + // o F + // |\ + // o | E + // | o D + // | | o C + // | | | + // | | o B + // | |/ + // |/ + // o A + + let mut neighbors = HashMap::new(); + neighbors.insert('A', vec![]); + neighbors.insert('B', vec!['A']); + neighbors.insert('C', vec!['B']); + neighbors.insert('D', vec!['A']); + neighbors.insert('E', vec!['A']); + neighbors.insert('F', vec!['E', 'D']); + + let common = topo_order_reverse( + vec!['F', 'C'], + Box::new(|node| *node), + Box::new(move |node| neighbors[node].clone()), + ); + + assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']); + } + + #[test] + fn closest_common_node_tricky() { + // Test this case where A is the shortest distance away, but we still want the + // result to be B because A is an ancestor of B. In other words, we want + // to minimize the longest distance. + // + // E H + // |\ /| + // | D G | + // | C F | + // \ \ / / + // \ B / + // \|/ + // A + + let mut neighbors = HashMap::new(); + neighbors.insert('A', vec![]); + neighbors.insert('B', vec!['A']); + neighbors.insert('C', vec!['B']); + neighbors.insert('D', vec!['C']); + neighbors.insert('E', vec!['A', 'D']); + neighbors.insert('F', vec!['B']); + neighbors.insert('G', vec!['F']); + neighbors.insert('H', vec!['A', 'G']); + + let common = closest_common_node( + vec!['E'], + vec!['H'], + &|node| neighbors[node].clone(), + &|node| *node, + ); + + // TODO: fix the implementation to return B + assert_eq!(common, Some('A')); + } + + #[test] + fn unreachable_mixed() { + // Test the uppercase letters are in the start set + // + // D F + // |/| + // C e + // |/ + // b + // | + // A + + let mut neighbors = HashMap::new(); + neighbors.insert('A', vec![]); + neighbors.insert('b', vec!['A']); + neighbors.insert('C', vec!['b']); + neighbors.insert('D', vec!['C']); + neighbors.insert('e', vec!['b']); + neighbors.insert('F', vec!['C', 'e']); + let expected: HashSet = vec!['D', 'F'].into_iter().collect(); + + let actual = unreachable( + vec!['A', 'C', 'D', 'F'], + &|node| neighbors[node].clone(), + &|node| *node, + ); + assert_eq!(actual, expected); + + // Check with a different order in the start set + let actual = unreachable( + vec!['F', 'D', 'C', 'A'], + &|node| neighbors[node].clone(), + &|node| *node, + ); + assert_eq!(actual, expected); + } +} diff --git a/lib/src/evolution.rs b/lib/src/evolution.rs new file mode 100644 index 000000000..d7c08ed43 --- /dev/null +++ b/lib/src/evolution.rs @@ -0,0 +1,500 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::{HashMap, HashSet}; +use std::sync::{Arc, Mutex}; + +use crate::commit::Commit; +use crate::commit_builder::CommitBuilder; +use crate::dag_walk::{bfs, closest_common_node, leaves, walk_ancestors}; +use crate::repo::{ReadonlyRepo, Repo}; +use crate::repo_path::DirRepoPath; +use crate::rewrite::{merge_commit_trees, rebase_commit}; +use crate::settings::UserSettings; +use crate::store::{ChangeId, CommitId}; +use crate::store_wrapper::StoreWrapper; +use crate::transaction::{MutableRepo, Transaction}; +use crate::trees::merge_trees; +use crate::view::View; + +#[derive(Debug, Clone)] +struct State { + /// Contains all successors whether they have the same change id or not. + successors: HashMap>, + /// Contains the subset of the keys in `successors` for which there is a + /// successor with the same change id. + obsolete_commits: HashSet, + orphan_commits: HashSet, + divergent_changes: HashMap>, +} + +impl State { + fn calculate(store: &StoreWrapper, view: &dyn View) -> State { + let mut successors = HashMap::new(); + let mut obsolete_commits = HashSet::new(); + let mut orphan_commits = HashSet::new(); + let mut divergent_changes = HashMap::new(); + let mut heads = vec![]; + for commit_id in view.heads() { + heads.push(store.get_commit(commit_id).unwrap()); + } + let mut commits = HashSet::new(); + let mut children = HashMap::new(); + let mut change_to_commits = HashMap::new(); + for commit in walk_ancestors(heads) { + children.insert(commit.id().clone(), HashSet::new()); + change_to_commits + .entry(commit.change_id().clone()) + .or_insert_with(HashSet::new) + .insert(commit.id().clone()); + commits.insert(commit); + } + // Scan all commits to find obsolete commits and to build a lookup of for + // children of a commit + for commit in &commits { + if commit.is_pruned() { + obsolete_commits.insert(commit.id().clone()); + } + for predecessor in commit.predecessors() { + if !commits.contains(&predecessor) { + continue; + } + successors + .entry(predecessor.id().clone()) + .or_insert_with(HashSet::new) + .insert(commit.id().clone()); + if predecessor.change_id() == commit.change_id() { + obsolete_commits.insert(predecessor.id().clone()); + } + } + for parent in commit.parents() { + if let Some(children) = children.get_mut(parent.id()) { + children.insert(commit.id().clone()); + } + } + } + // Find divergent commits + for (change_id, commit_ids) in change_to_commits { + let divergent: HashSet = + commit_ids.difference(&obsolete_commits).cloned().collect(); + if divergent.len() > 1 { + divergent_changes.insert(change_id, divergent); + } + } + // Find orphans by walking to the children of obsolete commits + let mut work: Vec = obsolete_commits.iter().map(ToOwned::to_owned).collect(); + while !work.is_empty() { + let commit_id = work.pop().unwrap(); + for child in children.get(&commit_id).unwrap() { + if orphan_commits.insert(child.clone()) { + work.push(child.clone()); + } + } + } + orphan_commits = orphan_commits + .difference(&obsolete_commits) + .map(ToOwned::to_owned) + .collect(); + + State { + successors, + obsolete_commits, + orphan_commits, + divergent_changes, + } + } + + pub fn new_parent(&self, store: &StoreWrapper, old_parent_id: &CommitId) -> HashSet { + let mut new_parents = HashSet::new(); + if let Some(successor_ids) = self.successors.get(old_parent_id) { + let old_parent = store.get_commit(old_parent_id).unwrap(); + let successors: HashSet<_> = successor_ids + .iter() + .map(|id| store.get_commit(id).unwrap()) + .collect(); + let mut children = HashMap::new(); + for successor in &successors { + for parent in successor.parents() { + if let Some(parent) = successors.get(&parent) { + children + .entry(parent.clone()) + .or_insert_with(HashSet::new) + .insert(successor.clone()); + } + } + } + let mut all_candidates = HashSet::new(); + for successor in &successors { + if successor.change_id() != old_parent.change_id() { + continue; + } + + // Start with the successor as candidate. + let mut candidates = HashSet::new(); + candidates.insert(successor.clone()); + + // If the successor has children that are successors of the same + // commit, we consider the original commit to be a split. We then return + // the tip-most successor. + candidates = leaves( + candidates, + &mut |commit: &Commit| -> HashSet { + if let Some(children) = children.get(commit) { + children.clone() + } else { + HashSet::new() + } + }, + &|commit: &Commit| -> CommitId { commit.id().clone() }, + ); + + // If a successor is pruned, use its parent(s) instead. + candidates = leaves( + candidates, + &mut |commit: &Commit| -> Vec { + if commit.is_pruned() { + commit.parents() + } else { + vec![] + } + }, + &|commit: &Commit| -> CommitId { commit.id().clone() }, + ); + + for candidate in candidates { + all_candidates.insert(candidate.clone()); + } + } + + // Filter out candidates that are ancestors of or other candidates. + let non_heads: Vec<_> = all_candidates + .iter() + .flat_map(|commit| commit.parents()) + .collect(); + for commit in walk_ancestors(non_heads) { + all_candidates.remove(&commit); + } + + for candidate in all_candidates { + // TODO: Make this not recursive + for effective_successor in self.new_parent(store, candidate.id()) { + new_parents.insert(effective_successor); + } + } + } + if new_parents.is_empty() { + // TODO: Should we go to the parents here too if the commit is pruned? + new_parents.insert(old_parent_id.clone()); + } + new_parents + } +} + +pub trait Evolution { + fn successors(&self, commit_id: &CommitId) -> HashSet; + + fn is_obsolete(&self, commit_id: &CommitId) -> bool; + + fn is_orphan(&self, commit_id: &CommitId) -> bool; + + fn is_divergent(&self, change_id: &ChangeId) -> bool; + + /// Given a current parent, finds the new parent candidates. If the current + /// parent is not obsolete, then a singleton set of that commit will be + /// returned. + /// + /// * If a successor is pruned, its parent(s) will instead be included (or + /// their parents if they are also pruned). + /// + /// * If the commit has multiple live successors, the tip-most one(s) of + /// them will be chosen. + /// + /// The second case is more complex than it probably seems. For example, + /// let's say commit A was split into B, A', and C (where A' has the same + /// change id as A). Then C is rebased to somewhere else and becomes C'. + /// We will choose that C' as effective successor even though it has a + /// different change id and is not a descendant of one that does. + fn new_parent(&self, old_parent_id: &CommitId) -> HashSet; +} + +pub struct ReadonlyEvolution<'r> { + repo: &'r ReadonlyRepo, + state: Mutex>>, +} + +pub trait EvolveListener { + fn orphan_evolved(&mut self, orphan: &Commit, new_commit: &Commit); + fn orphan_target_ambiguous(&mut self, orphan: &Commit); + fn divergent_resolved(&mut self, divergents: &[Commit], resolved: &Commit); + fn divergent_no_common_predecessor(&mut self, commit1: &Commit, commit2: &Commit); +} + +impl Evolution for ReadonlyEvolution<'_> { + fn successors(&self, commit_id: &CommitId) -> HashSet { + self.get_state() + .successors + .get(commit_id) + .cloned() + .unwrap_or_else(HashSet::new) + } + + fn is_obsolete(&self, commit_id: &CommitId) -> bool { + self.get_state().obsolete_commits.contains(commit_id) + } + + fn is_orphan(&self, commit_id: &CommitId) -> bool { + self.get_state().orphan_commits.contains(commit_id) + } + + fn is_divergent(&self, change_id: &ChangeId) -> bool { + self.get_state().divergent_changes.contains_key(change_id) + } + + fn new_parent(&self, old_parent_id: &CommitId) -> HashSet { + self.get_state() + .new_parent(self.repo.store(), old_parent_id) + } +} + +impl<'r> ReadonlyEvolution<'r> { + pub fn new(repo: &'r ReadonlyRepo) -> Self { + ReadonlyEvolution { + repo, + state: Mutex::new(None), + } + } + + fn get_state(&self) -> Arc { + let mut locked_state = self.state.lock().unwrap(); + if locked_state.is_none() { + locked_state.replace(Arc::new(State::calculate( + self.repo.store(), + self.repo.view(), + ))); + } + locked_state.as_ref().unwrap().clone() + } + + pub fn start_modification<'m>(&self, repo: &'m MutableRepo<'r>) -> MutableEvolution<'r, 'm> { + MutableEvolution { + repo, + state: Mutex::new(self.state.lock().unwrap().clone()), + } + } +} + +pub struct MutableEvolution<'r, 'm: 'r> { + repo: &'m MutableRepo<'r>, + state: Mutex>>, +} + +impl Evolution for MutableEvolution<'_, '_> { + fn successors(&self, commit_id: &CommitId) -> HashSet { + self.get_state() + .successors + .get(commit_id) + .cloned() + .unwrap_or_else(HashSet::new) + } + + fn is_obsolete(&self, commit_id: &CommitId) -> bool { + self.get_state().obsolete_commits.contains(commit_id) + } + + fn is_orphan(&self, commit_id: &CommitId) -> bool { + self.get_state().orphan_commits.contains(commit_id) + } + + fn is_divergent(&self, change_id: &ChangeId) -> bool { + self.get_state().divergent_changes.contains_key(change_id) + } + + fn new_parent(&self, old_parent_id: &CommitId) -> HashSet { + self.get_state() + .new_parent(self.repo.store(), old_parent_id) + } +} + +impl MutableEvolution<'_, '_> { + fn get_state(&self) -> Arc { + let mut locked_state = self.state.lock().unwrap(); + if locked_state.is_none() { + locked_state.replace(Arc::new(State::calculate( + self.repo.store(), + self.repo.view(), + ))); + } + locked_state.as_ref().unwrap().clone() + } + + pub fn invalidate(&mut self) { + let mut locked_state = self.state.lock(); + locked_state.as_mut().unwrap().take(); + } +} + +pub fn evolve( + user_settings: &UserSettings, + tx: &mut Transaction, + listener: &mut dyn EvolveListener, +) { + let store = tx.store().clone(); + // TODO: update the state in the transaction + let state = tx.as_repo_mut().evolution_mut().get_state(); + + // Resolving divergence can creates new orphans but not vice versa, so resolve + // divergence first. + for commit_ids in state.divergent_changes.values() { + let commits: HashSet = commit_ids + .iter() + .map(|id| store.get_commit(&id).unwrap()) + .collect(); + evolve_divergent_change(user_settings, &store, tx, listener, &commits); + } + + let orphans: HashSet = state + .orphan_commits + .iter() + .map(|id| store.get_commit(&id).unwrap()) + .collect(); + let non_heads: HashSet = orphans.iter().flat_map(|commit| commit.parents()).collect(); + let orphan_heads: HashSet = orphans.difference(&non_heads).cloned().collect(); + let mut orphans_topo_order = vec![]; + for commit in bfs( + orphan_heads, + Box::new(|commit| commit.id().clone()), + Box::new(|commit| { + commit + .parents() + .iter() + .filter(|commit| state.orphan_commits.contains(commit.id())) + .cloned() + .collect::>() + }), + ) { + orphans_topo_order.push(commit); + } + + while !orphans_topo_order.is_empty() { + let orphan = orphans_topo_order.pop().unwrap(); + let old_parents = orphan.parents(); + let mut new_parents = vec![]; + let mut ambiguous_new_parents = false; + for old_parent in &old_parents { + let new_parent_candidates = state.new_parent(&store, old_parent.id()); + if new_parent_candidates.len() > 1 { + ambiguous_new_parents = true; + break; + } + new_parents.push( + store + .get_commit(new_parent_candidates.iter().next().unwrap()) + .unwrap(), + ); + } + if ambiguous_new_parents { + listener.orphan_target_ambiguous(&orphan); + } else { + let new_commit = rebase_commit(user_settings, tx, &orphan, &new_parents); + listener.orphan_evolved(&orphan, &new_commit); + } + } +} + +fn evolve_divergent_change( + user_settings: &UserSettings, + store: &Arc, + tx: &mut Transaction, + listener: &mut dyn EvolveListener, + commits: &HashSet, +) { + // Resolve divergence pair-wise, starting with the two oldest commits. + let mut commits: Vec = commits.iter().cloned().collect(); + commits.sort_by(|a: &Commit, b: &Commit| a.committer().timestamp.cmp(&b.committer().timestamp)); + commits.reverse(); + + // Create a copy to pass to the listener + let sources = commits.clone(); + + while commits.len() > 1 { + let commit2 = commits.pop().unwrap(); + let commit1 = commits.pop().unwrap(); + + let common_predecessor = closest_common_node( + vec![commit1.clone()], + vec![commit2.clone()], + &|commit: &Commit| commit.predecessors(), + &|commit: &Commit| commit.id().clone(), + ); + match common_predecessor { + None => { + listener.divergent_no_common_predecessor(&commit1, &commit2); + return; + } + Some(common_predecessor) => { + let resolved_commit = evolve_two_divergent_commits( + user_settings, + store, + tx, + &common_predecessor, + &commit1, + &commit2, + ); + commits.push(resolved_commit); + } + } + } + + let resolved = commits.pop().unwrap(); + listener.divergent_resolved(&sources, &resolved); +} + +fn evolve_two_divergent_commits( + user_settings: &UserSettings, + store: &Arc, + tx: &mut Transaction, + common_predecessor: &Commit, + commit1: &Commit, + commit2: &Commit, +) -> Commit { + let new_parents = commit1.parents(); + let rebased_tree2 = if commit2.parents() == new_parents { + commit2.tree() + } else { + let old_base_tree = merge_commit_trees(store, &commit2.parents()); + let new_base_tree = merge_commit_trees(store, &new_parents); + let tree_id = merge_trees(&new_base_tree, &old_base_tree, &commit2.tree()).unwrap(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + let rebased_predecessor_tree = if common_predecessor.parents() == new_parents { + common_predecessor.tree() + } else { + let old_base_tree = merge_commit_trees(store, &common_predecessor.parents()); + let new_base_tree = merge_commit_trees(store, &new_parents); + let tree_id = + merge_trees(&new_base_tree, &old_base_tree, &common_predecessor.tree()).unwrap(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + + let resolved_tree = + merge_trees(&commit1.tree(), &rebased_predecessor_tree, &rebased_tree2).unwrap(); + + // TODO: Merge commit description and other commit metadata. How do we deal with + // conflicts? It's probably best to interactively ask the caller (which + // might ask the user in interactive use). + CommitBuilder::for_rewrite_from(user_settings, store, &commit1) + .set_tree(resolved_tree) + .set_predecessors(vec![commit1.id().clone(), commit2.id().clone()]) + .write_to_transaction(tx) +} diff --git a/lib/src/files.rs b/lib/src/files.rs new file mode 100644 index 000000000..f821851b5 --- /dev/null +++ b/lib/src/files.rs @@ -0,0 +1,351 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use diff::slice as diff_slice; +use std::fmt::{Debug, Error, Formatter}; + +fn is_word_byte(a: u8) -> bool { + a.is_ascii_alphanumeric() || a == b'_' +} + +fn is_same_word(a: u8, b: u8) -> bool { + // Don't allow utf-8 code points to be split into separate words + (is_word_byte(a) && is_word_byte(b)) || a & 0x80 != 0 +} + +fn tokenize(data: &[u8]) -> Vec> { + // TODO: Fix this code to not be so inefficient, and to allow the word + // delimiter to be configured. + let mut output = vec![]; + let mut current = vec![]; + let mut maybe_prev: Option = None; + for b in data { + let b = *b; + match maybe_prev { + None => current.push(b), + Some(prev) => { + if is_same_word(prev, b) { + current.push(b); + } else { + output.push(current); + current = vec![b]; + } + } + } + maybe_prev = Some(b); + } + if !current.is_empty() { + output.push(current); + } + output +} + +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum DiffHunk { + Unmodified(Vec), + Added(Vec), + Removed(Vec), +} + +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct DiffLine { + pub left_line_number: u32, + pub right_line_number: u32, + pub has_left_content: bool, + pub has_right_content: bool, + pub hunks: Vec, +} + +impl DiffLine { + fn reset_line(&mut self) { + self.has_left_content = false; + self.has_right_content = false; + self.hunks.clear(); + } + + pub fn is_unmodified(&self) -> bool { + self.hunks + .iter() + .all(|hunk| matches!(hunk, DiffHunk::Unmodified(_))) + } +} + +pub fn diff(left: &[u8], right: &[u8], callback: &mut impl FnMut(&DiffLine)) { + // TODO: Should we attempt to interpret as utf-8 and otherwise break only at + // newlines? + let left_tokens = tokenize(left); + let right_tokens = tokenize(right); + let result = diff_slice(&left_tokens, &right_tokens); + let mut diff_line = DiffLine { + left_line_number: 1, + right_line_number: 1, + has_left_content: false, + has_right_content: false, + hunks: vec![], + }; + for hunk in result { + match hunk { + diff::Result::Both(left, right) => { + assert!(left == right); + diff_line.has_left_content = true; + diff_line.has_right_content = true; + diff_line.hunks.push(DiffHunk::Unmodified(left.clone())); + if left == &[b'\n'] { + callback(&diff_line); + diff_line.left_line_number += 1; + diff_line.right_line_number += 1; + diff_line.reset_line(); + } + } + diff::Result::Left(left) => { + diff_line.has_left_content = true; + diff_line.hunks.push(DiffHunk::Removed(left.clone())); + if left == &[b'\n'] { + callback(&diff_line); + diff_line.left_line_number += 1; + diff_line.reset_line(); + } + } + diff::Result::Right(right) => { + diff_line.has_right_content = true; + diff_line.hunks.push(DiffHunk::Added(right.clone())); + if right == &[b'\n'] { + callback(&diff_line); + diff_line.right_line_number += 1; + diff_line.reset_line(); + } + } + } + } + if !diff_line.hunks.is_empty() { + callback(&diff_line); + } +} + +#[derive(PartialEq, Eq, Clone)] +pub enum MergeHunk { + Resolved(Vec), + Conflict { + base: Vec, + left: Vec, + right: Vec, + }, +} + +impl Debug for MergeHunk { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + match self { + MergeHunk::Resolved(data) => f + .debug_tuple("Resolved") + .field(&String::from_utf8_lossy(data)) + .finish(), + MergeHunk::Conflict { base, left, right } => f + .debug_struct("Conflict") + .field("base", &String::from_utf8_lossy(base)) + .field("left", &String::from_utf8_lossy(left)) + .field("right", &String::from_utf8_lossy(right)) + .finish(), + } + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum MergeResult { + Resolved(Vec), + Conflict(Vec), +} + +/// Returns None if the merge fails +pub fn merge(base: &[u8], left: &[u8], right: &[u8]) -> MergeResult { + let base_tokens = tokenize(base); + let left_tokens = tokenize(left); + let right_tokens = tokenize(right); + + let left_diff = diff_slice(&base_tokens, &left_tokens); + let right_diff = diff_slice(&base_tokens, &right_tokens); + + let mut hunk: Vec = vec![]; + let mut hunks: Vec = vec![]; + let mut left_it = left_diff.iter(); + let mut right_it = right_diff.iter(); + + let mut left_hunk = left_it.next(); + let mut right_hunk = right_it.next(); + loop { + match (left_hunk, right_hunk) { + (None, None) => { + break; + } + (Some(diff::Result::Both(left_data_before, left_data_after)), _) + if left_data_before == left_data_after => + { + // Left unmodified + match right_hunk.unwrap() { + diff::Result::Both(right_data_before, right_data_after) => { + // Left unmodified, right modified + assert_eq!(left_data_before, right_data_before); + hunk.append(&mut right_data_after.to_vec()); + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + diff::Result::Left(right_data_before) => { + // Left unmodified, right deleted + assert_eq!(left_data_before, right_data_before); + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + diff::Result::Right(right_data_after) => { + // Left unmodified, right inserted + hunk.append(&mut right_data_after.to_vec()); + right_hunk = right_it.next(); + } + } + } + (_, Some(diff::Result::Both(right_data_before, right_data_after))) + if right_data_before == right_data_after => + { + // Right unmodified + match left_hunk.unwrap() { + diff::Result::Both(left_data_before, left_data_after) => { + // Right unmodified, left modified + assert_eq!(left_data_before, right_data_before); + hunk.append(&mut left_data_after.to_vec()); + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + diff::Result::Left(left_data_before) => { + // Right unmodified, left deleted + assert_eq!(left_data_before, right_data_before); + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + diff::Result::Right(left_data_after) => { + // Right unmodified, left inserted + hunk.append(&mut left_data_after.to_vec()); + left_hunk = left_it.next(); + } + } + } + ( + Some(diff::Result::Left(left_data_before)), + Some(diff::Result::Left(right_data_before)), + ) => { + // Both deleted the same + assert_eq!(left_data_before, right_data_before); + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + ( + Some(diff::Result::Right(left_data_after)), + Some(diff::Result::Right(right_data_after)), + ) => { + if left_data_after == right_data_after { + // Both inserted the same + hunk.append(&mut left_data_after.to_vec()); + } else { + // Each side inserted different + if !hunk.is_empty() { + hunks.push(MergeHunk::Resolved(hunk)); + } + hunks.push(MergeHunk::Conflict { + base: vec![], + left: left_data_after.to_vec(), + right: right_data_after.to_vec(), + }); + hunk = vec![]; + } + left_hunk = left_it.next(); + right_hunk = right_it.next(); + } + (Some(diff::Result::Right(left_data_after)), None) => { + // Left inserted at EOF + hunk.append(&mut left_data_after.to_vec()); + left_hunk = left_it.next(); + } + (None, Some(diff::Result::Right(right_data_after))) => { + // Right inserted at EOF + hunk.append(&mut right_data_after.to_vec()); + right_hunk = right_it.next(); + } + _ => { + panic!("unhandled merge case: {:?}, {:?}", left_hunk, right_hunk); + } + } + } + if hunks.is_empty() { + MergeResult::Resolved(hunk) + } else { + if !hunk.is_empty() { + hunks.push(MergeHunk::Resolved(hunk)); + } + MergeResult::Conflict(hunks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_merge() { + assert_eq!(merge(b"", b"", b""), MergeResult::Resolved(b"".to_vec())); + assert_eq!( + merge(b"a", b"a", b"a"), + MergeResult::Resolved(b"a".to_vec()) + ); + assert_eq!(merge(b"a", b"", b"a"), MergeResult::Resolved(b"".to_vec())); + assert_eq!(merge(b"a", b"a", b""), MergeResult::Resolved(b"".to_vec())); + assert_eq!(merge(b"a", b"", b""), MergeResult::Resolved(b"".to_vec())); + assert_eq!( + merge(b"a", b"a b", b"a"), + MergeResult::Resolved(b"a b".to_vec()) + ); + assert_eq!( + merge(b"a", b"a", b"a b"), + MergeResult::Resolved(b"a b".to_vec()) + ); + assert_eq!( + merge(b"a", b"a b", b"a c"), + MergeResult::Conflict(vec![ + MergeHunk::Resolved(b"a ".to_vec()), + MergeHunk::Conflict { + base: b"".to_vec(), + left: b"b".to_vec(), + right: b"c".to_vec() + } + ]) + ); + assert_eq!( + merge(b"a", b"b", b"a"), + MergeResult::Resolved(b"b".to_vec()) + ); + assert_eq!( + merge(b"a", b"a", b"b"), + MergeResult::Resolved(b"b".to_vec()) + ); + // TODO: It seems like the a->b transition get reported as [Left(a),Right(b)] + // instead of [Both(a,b)], so there is unexpectedly no conflict + // here + assert_eq!(merge(b"a", b"", b"b"), MergeResult::Resolved(b"b".to_vec())); + assert_eq!(merge(b"a", b"b", b""), MergeResult::Resolved(b"b".to_vec())); + assert_eq!( + merge(b"a", b"b", b"c"), + MergeResult::Conflict(vec![MergeHunk::Conflict { + base: b"".to_vec(), + left: b"b".to_vec(), + right: b"c".to_vec() + }]) + ); + } +} diff --git a/lib/src/git_store.rs b/lib/src/git_store.rs new file mode 100644 index 000000000..5179667c3 --- /dev/null +++ b/lib/src/git_store.rs @@ -0,0 +1,597 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::{Debug, Error, Formatter}; +use std::io::Cursor; +use std::io::Read; +use std::path::PathBuf; +use std::sync::Mutex; +use std::time::Duration; + +use git2::Oid; +use protobuf::Message; + +use crate::repo_path::{DirRepoPath, FileRepoPath}; +use crate::store::{ + ChangeId, Commit, CommitId, Conflict, ConflictId, ConflictPart, FileId, MillisSinceEpoch, + Signature, Store, StoreError, StoreResult, SymlinkId, Timestamp, Tree, TreeId, TreeValue, +}; + +const NOTES_REF: &str = "refs/notes/jj/commits"; +const NOTES_REF_LOCK: &str = "refs/notes/jj/commits.lock"; +const CONFLICT_SUFFIX: &str = ".jjconflict"; + +impl From for StoreError { + fn from(err: git2::Error) -> Self { + match err.code() { + git2::ErrorCode::NotFound => StoreError::NotFound, + _other => StoreError::Other(err.to_string()), + } + } +} + +pub struct GitStore { + repo: Mutex, + empty_tree_id: TreeId, +} + +impl GitStore { + pub fn load(path: PathBuf) -> Self { + let repo = Mutex::new(git2::Repository::open(path).unwrap()); + let empty_tree_id = + TreeId(hex::decode("4b825dc642cb6eb9a060e54bf8d69288fbee4904").unwrap()); + GitStore { + repo, + empty_tree_id, + } + } +} + +fn signature_from_git(signature: git2::Signature) -> Signature { + let name = signature.name().unwrap_or("").to_owned(); + let email = signature.email().unwrap_or("").to_owned(); + let timestamp = MillisSinceEpoch((signature.when().seconds() * 1000) as u64); + let tz_offset = signature.when().offset_minutes(); + Signature { + name, + email, + timestamp: Timestamp { + timestamp, + tz_offset, + }, + } +} + +fn signature_to_git(signature: &Signature) -> git2::Signature { + let name = &signature.name; + let email = &signature.email; + let time = git2::Time::new( + (signature.timestamp.timestamp.0 / 1000) as i64, + signature.timestamp.tz_offset, + ); + git2::Signature::new(&name, &email, &time).unwrap() +} + +fn serialize_note(commit: &Commit) -> String { + let mut proto = protos::store::Commit::new(); + proto.is_open = commit.is_open; + proto.is_pruned = commit.is_pruned; + proto.change_id = commit.change_id.0.to_vec(); + for predecessor in &commit.predecessors { + proto.predecessors.push(predecessor.0.to_vec()); + } + let bytes = proto.write_to_bytes().unwrap(); + hex::encode(bytes) +} + +fn deserialize_note(commit: &mut Commit, note: &str) { + let bytes = hex::decode(note).unwrap(); + let mut cursor = Cursor::new(bytes); + let proto: protos::store::Commit = protobuf::parse_from_reader(&mut cursor).unwrap(); + commit.is_open = proto.is_open; + commit.is_pruned = proto.is_pruned; + commit.change_id = ChangeId(proto.change_id); + for predecessor in &proto.predecessors { + commit.predecessors.push(CommitId(predecessor.clone())); + } +} + +impl Debug for GitStore { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_struct("GitStore") + .field("path", &self.repo.lock().unwrap().path()) + .finish() + } +} + +impl Store for GitStore { + fn hash_length(&self) -> usize { + 20 + } + + fn read_file(&self, _path: &FileRepoPath, id: &FileId) -> StoreResult> { + if id.0.len() != self.hash_length() { + return Err(StoreError::NotFound); + } + let locked_repo = self.repo.lock().unwrap(); + let blob = locked_repo + .find_blob(Oid::from_bytes(id.0.as_slice()).unwrap()) + .unwrap(); + let content = blob.content().to_owned(); + Ok(Box::new(Cursor::new(content))) + } + + fn write_file(&self, _path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult { + let mut bytes = Vec::new(); + contents.read_to_end(&mut bytes).unwrap(); + let locked_repo = self.repo.lock().unwrap(); + let oid = locked_repo.blob(bytes.as_slice()).unwrap(); + Ok(FileId(oid.as_bytes().to_vec())) + } + + fn read_symlink(&self, _path: &FileRepoPath, id: &SymlinkId) -> Result { + if id.0.len() != self.hash_length() { + return Err(StoreError::NotFound); + } + let locked_repo = self.repo.lock().unwrap(); + let blob = locked_repo + .find_blob(Oid::from_bytes(id.0.as_slice()).unwrap()) + .unwrap(); + let target = String::from_utf8(blob.content().to_owned()).unwrap(); + Ok(target) + } + + fn write_symlink(&self, _path: &FileRepoPath, target: &str) -> Result { + let locked_repo = self.repo.lock().unwrap(); + let oid = locked_repo.blob(target.as_bytes()).unwrap(); + Ok(SymlinkId(oid.as_bytes().to_vec())) + } + + fn empty_tree_id(&self) -> &TreeId { + &self.empty_tree_id + } + + fn read_tree(&self, _path: &DirRepoPath, id: &TreeId) -> StoreResult { + if id == &self.empty_tree_id { + return Ok(Tree::default()); + } + if id.0.len() != self.hash_length() { + return Err(StoreError::NotFound); + } + + let locked_repo = self.repo.lock().unwrap(); + let git_tree = locked_repo + .find_tree(Oid::from_bytes(id.0.as_slice()).unwrap()) + .unwrap(); + let mut tree = Tree::default(); + for entry in git_tree.iter() { + let name = entry.name().unwrap(); + let (name, value) = match entry.kind().unwrap() { + git2::ObjectType::Tree => { + let id = TreeId(entry.id().as_bytes().to_vec()); + (entry.name().unwrap(), TreeValue::Tree(id)) + } + git2::ObjectType::Blob => match entry.filemode() { + 0o100644 => { + let id = FileId(entry.id().as_bytes().to_vec()); + if name.ends_with(CONFLICT_SUFFIX) { + ( + &name[0..name.len() - CONFLICT_SUFFIX.len()], + TreeValue::Conflict(ConflictId(entry.id().as_bytes().to_vec())), + ) + } else { + ( + name, + TreeValue::Normal { + id, + executable: false, + }, + ) + } + } + 0o100755 => { + let id = FileId(entry.id().as_bytes().to_vec()); + ( + name, + TreeValue::Normal { + id, + executable: true, + }, + ) + } + 0o120000 => { + let id = SymlinkId(entry.id().as_bytes().to_vec()); + (name, TreeValue::Symlink(id)) + } + mode => panic!("unexpected file mode {:?}", mode), + }, + git2::ObjectType::Commit => { + let id = CommitId(entry.id().as_bytes().to_vec()); + (name, TreeValue::GitSubmodule(id)) + } + kind => panic!("unexpected object type {:?}", kind), + }; + tree.set(name.to_string(), value); + } + Ok(tree) + } + + fn write_tree(&self, _path: &DirRepoPath, contents: &Tree) -> StoreResult { + let locked_repo = self.repo.lock().unwrap(); + let mut builder = locked_repo.treebuilder(None).unwrap(); + for entry in contents.entries() { + let name = entry.name().to_owned(); + let (name, id, filemode) = match entry.value() { + TreeValue::Normal { + id, + executable: false, + } => (name, &id.0, 0o100644), + TreeValue::Normal { + id, + executable: true, + } => (name, &id.0, 0o100755), + TreeValue::Symlink(id) => (name, &id.0, 0o120000), + TreeValue::Tree(id) => (name, &id.0, 0o040000), + TreeValue::GitSubmodule(id) => (name, &id.0, 0o160000), + TreeValue::Conflict(id) => (name + CONFLICT_SUFFIX, &id.0, 0o100644), + }; + builder + .insert(name, Oid::from_bytes(id).unwrap(), filemode) + .unwrap(); + } + let oid = builder.write().unwrap(); + Ok(TreeId(oid.as_bytes().to_vec())) + } + + fn read_commit(&self, id: &CommitId) -> StoreResult { + if id.0.len() != self.hash_length() { + return Err(StoreError::NotFound); + } + + let locked_repo = self.repo.lock().unwrap(); + let git_commit_id = Oid::from_bytes(id.0.as_slice())?; + let commit = locked_repo.find_commit(git_commit_id)?; + let change_id = ChangeId(id.0.clone().as_slice()[0..16].to_vec()); + let parents: Vec<_> = commit + .parent_ids() + .map(|oid| CommitId(oid.as_bytes().to_vec())) + .collect(); + let tree_id = TreeId(commit.tree_id().as_bytes().to_vec()); + let description = commit.message().unwrap_or("").to_owned(); + let author = signature_from_git(commit.author()); + let committer = signature_from_git(commit.committer()); + + let mut commit = Commit { + parents, + predecessors: vec![], + root_tree: tree_id, + change_id, + description, + author, + committer, + is_open: false, + is_pruned: false, + }; + + let maybe_note = locked_repo.find_note(Some(NOTES_REF), git_commit_id).ok(); + if let Some(note) = maybe_note { + deserialize_note(&mut commit, note.message().unwrap()); + } + + Ok(commit) + } + + fn write_commit(&self, contents: &Commit) -> StoreResult { + // TODO: We shouldn't have to create an in-memory index just to write an + // object... + let locked_repo = self.repo.lock().unwrap(); + let git_tree = locked_repo.find_tree(Oid::from_bytes(contents.root_tree.0.as_slice())?)?; + let author = signature_to_git(&contents.author); + let committer = signature_to_git(&contents.committer); + let message = &contents.description; + + let mut parents = vec![]; + for parent_id in &contents.parents { + let parent_git_commit = + locked_repo.find_commit(Oid::from_bytes(parent_id.0.as_slice())?)?; + parents.push(parent_git_commit); + } + let parent_refs: Vec<_> = parents.iter().collect(); + let git_id = + locked_repo.commit(None, &author, &committer, &message, &git_tree, &parent_refs)?; + let id = CommitId(git_id.as_bytes().to_vec()); + let note = serialize_note(contents); + + // TODO: Include the extra commit data in commit headers instead of a ref. + // Unfortunately, it doesn't seem like libgit2-rs supports that. Perhaps + // we'll have to serialize/deserialize the commit data ourselves. + loop { + let note_status = locked_repo.note( + &committer, + &committer, + Some(NOTES_REF), + git_id, + ¬e, + false, + ); + match note_status { + Err(err) if err.message().contains(NOTES_REF_LOCK) => { + // It seems that libgit2 doesn't retry when .git/refs/notes/jj/commits.lock + // already exists. + // TODO: Report this to libgit2. + let retry_delay = Duration::from_millis(10); + std::thread::sleep(retry_delay); + } + Err(err) => { + return Err(StoreError::from(err)); + } + Ok(_) => { + break; + } + } + } + + Ok(id) + } + + fn read_conflict(&self, id: &ConflictId) -> StoreResult { + let mut file = self.read_file(&FileRepoPath::from("unused"), &FileId(id.0.clone()))?; + let mut data = String::new(); + file.read_to_string(&mut data)?; + let json: serde_json::Value = serde_json::from_str(&data).unwrap(); + Ok(Conflict { + removes: conflict_part_list_from_json(json.get("removes").unwrap()), + adds: conflict_part_list_from_json(json.get("adds").unwrap()), + }) + } + + fn write_conflict(&self, conflict: &Conflict) -> StoreResult { + let json = serde_json::json!({ + "removes": conflict_part_list_to_json(&conflict.removes), + "adds": conflict_part_list_to_json(&conflict.adds), + }); + let json_string = json.to_string(); + let mut bytes = json_string.as_bytes(); + // TODO: add a ref pointing to it so it won't get GC'd + let file_id = self.write_file(&FileRepoPath::from("unused"), &mut bytes)?; + Ok(ConflictId(file_id.0)) + } +} + +fn conflict_part_list_to_json(parts: &[ConflictPart]) -> serde_json::Value { + serde_json::Value::Array(parts.iter().map(conflict_part_to_json).collect()) +} + +fn conflict_part_list_from_json(json: &serde_json::Value) -> Vec { + json.as_array() + .unwrap() + .iter() + .map(conflict_part_from_json) + .collect() +} + +fn conflict_part_to_json(part: &ConflictPart) -> serde_json::Value { + serde_json::json!({ + "value": tree_value_to_json(&part.value), + }) +} + +fn conflict_part_from_json(json: &serde_json::Value) -> ConflictPart { + let json_value = json.get("value").unwrap(); + ConflictPart { + value: tree_value_from_json(json_value), + } +} + +fn tree_value_to_json(value: &TreeValue) -> serde_json::Value { + match value { + TreeValue::Normal { id, executable } => serde_json::json!({ + "file": { + "id": id.hex(), + "executable": executable, + }, + }), + TreeValue::Symlink(id) => serde_json::json!({ + "symlink_id": id.hex(), + }), + TreeValue::Tree(id) => serde_json::json!({ + "tree_id": id.hex(), + }), + TreeValue::GitSubmodule(id) => serde_json::json!({ + "submodule_id": id.hex(), + }), + TreeValue::Conflict(id) => serde_json::json!({ + "conflict_id": id.hex(), + }), + } +} + +fn tree_value_from_json(json: &serde_json::Value) -> TreeValue { + if let Some(json_file) = json.get("file") { + TreeValue::Normal { + id: FileId(bytes_vec_from_json(json_file.get("id").unwrap())), + executable: json_file.get("executable").unwrap().as_bool().unwrap(), + } + } else if let Some(json_id) = json.get("symlink_id") { + TreeValue::Symlink(SymlinkId(bytes_vec_from_json(json_id))) + } else if let Some(json_id) = json.get("tree_id") { + TreeValue::Tree(TreeId(bytes_vec_from_json(json_id))) + } else if let Some(json_id) = json.get("submodule_id") { + TreeValue::GitSubmodule(CommitId(bytes_vec_from_json(json_id))) + } else if let Some(json_id) = json.get("conflict_id") { + TreeValue::Conflict(ConflictId(bytes_vec_from_json(json_id))) + } else { + panic!("unexpected json value in conflict: {:#?}", json); + } +} + +fn bytes_vec_from_json(value: &serde_json::Value) -> Vec { + hex::decode(value.as_str().unwrap()).unwrap() +} + +#[cfg(test)] +mod tests { + + use crate::store::{FileId, MillisSinceEpoch}; + + use super::*; + + #[test] + fn read_plain_git_commit() { + let temp_dir = tempfile::tempdir().unwrap(); + let git_repo_path = temp_dir.path(); + let git_repo = git2::Repository::init(git_repo_path.clone()).unwrap(); + + // Add a commit with some files in + let blob1 = git_repo.blob(b"content1").unwrap(); + let blob2 = git_repo.blob(b"normal").unwrap(); + let mut dir_tree_builder = git_repo.treebuilder(None).unwrap(); + dir_tree_builder.insert("normal", blob1, 0o100644).unwrap(); + dir_tree_builder.insert("symlink", blob2, 0o120000).unwrap(); + let dir_tree_id = dir_tree_builder.write().unwrap(); + let mut root_tree_builder = git_repo.treebuilder(None).unwrap(); + root_tree_builder + .insert("dir", dir_tree_id, 0o040000) + .unwrap(); + let root_tree_id = root_tree_builder.write().unwrap(); + let git_author = git2::Signature::new( + "git author", + "git.author@example.com", + &git2::Time::new(1000, 60), + ) + .unwrap(); + let git_committer = git2::Signature::new( + "git committer", + "git.committer@example.com", + &git2::Time::new(2000, -480), + ) + .unwrap(); + let git_tree = git_repo.find_tree(root_tree_id).unwrap(); + let git_commit_id = git_repo + .commit( + None, + &git_author, + &git_committer, + "git commit message", + &git_tree, + &[], + ) + .unwrap(); + let commit_id = CommitId(git_commit_id.as_bytes().to_vec()); + + let store = GitStore::load(git_repo_path.to_owned()); + let commit = store.read_commit(&commit_id).unwrap(); + assert_eq!( + &commit.change_id, + &ChangeId(commit_id.0.as_slice()[0..16].to_vec()) + ); + assert_eq!(commit.parents, vec![]); + assert_eq!(commit.predecessors, vec![]); + assert_eq!(commit.root_tree.0.as_slice(), root_tree_id.as_bytes()); + assert_eq!(commit.is_open, false); + assert_eq!(commit.is_pruned, false); + assert_eq!(commit.description, "git commit message"); + assert_eq!(commit.author.name, "git author"); + assert_eq!(commit.author.email, "git.author@example.com"); + assert_eq!( + commit.author.timestamp.timestamp, + MillisSinceEpoch(1000 * 1000) + ); + assert_eq!(commit.author.timestamp.tz_offset, 60); + assert_eq!(commit.committer.name, "git committer"); + assert_eq!(commit.committer.email, "git.committer@example.com"); + assert_eq!( + commit.committer.timestamp.timestamp, + MillisSinceEpoch(2000 * 1000) + ); + assert_eq!(commit.committer.timestamp.tz_offset, -480); + + let root_tree = store + .read_tree( + &DirRepoPath::root(), + &TreeId(root_tree_id.as_bytes().to_vec()), + ) + .unwrap(); + let mut root_entries = root_tree.entries(); + let dir = root_entries.next().unwrap(); + assert_eq!(root_entries.next(), None); + assert_eq!(dir.name(), "dir"); + assert_eq!( + dir.value(), + &TreeValue::Tree(TreeId(dir_tree_id.as_bytes().to_vec())) + ); + + let dir_tree = store + .read_tree( + &DirRepoPath::from("dir/"), + &TreeId(dir_tree_id.as_bytes().to_vec()), + ) + .unwrap(); + let mut files = dir_tree.entries(); + let normal_file = files.next().unwrap(); + let symlink = files.next().unwrap(); + assert_eq!(files.next(), None); + assert_eq!(normal_file.name(), "normal"); + assert_eq!( + normal_file.value(), + &TreeValue::Normal { + id: FileId(blob1.as_bytes().to_vec()), + executable: false + } + ); + assert_eq!(symlink.name(), "symlink"); + assert_eq!( + symlink.value(), + &TreeValue::Symlink(SymlinkId(blob2.as_bytes().to_vec())) + ); + } + + #[test] + fn overlapping_git_commit_id() { + let temp_dir = tempfile::tempdir().unwrap(); + let git_repo_path = temp_dir.path(); + git2::Repository::init(git_repo_path.clone()).unwrap(); + let store = GitStore::load(git_repo_path.to_owned()); + let signature = Signature { + name: "Someone".to_string(), + email: "someone@example.com".to_string(), + timestamp: Timestamp { + timestamp: MillisSinceEpoch(0), + tz_offset: 0, + }, + }; + let commit1 = Commit { + parents: vec![], + predecessors: vec![], + root_tree: store.empty_tree_id().clone(), + change_id: ChangeId(vec![]), + description: "initial".to_string(), + author: signature.clone(), + committer: signature, + is_open: false, + is_pruned: false, + }; + let commit_id1 = store.write_commit(&commit1).unwrap(); + let mut commit2 = commit1; + commit2.predecessors.push(commit_id1.clone()); + let expected_error_message = format!("note for '{}' exists already", commit_id1.hex()); + match store.write_commit(&commit2) { + Ok(_) => { + panic!("expectedly successfully wrote two commits with the same git commit object") + } + Err(StoreError::Other(message)) if message.contains(&expected_error_message) => {} + Err(err) => panic!("unexpected error: {:?}", err), + }; + } +} diff --git a/lib/src/index.rs b/lib/src/index.rs new file mode 100644 index 000000000..4aaf87e77 --- /dev/null +++ b/lib/src/index.rs @@ -0,0 +1,1521 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate byteorder; + +use std::cmp::{max, min, Ordering}; +use std::collections::{BTreeMap, BinaryHeap, HashMap, HashSet}; +use std::fs::File; +use std::io; +use std::io::{Cursor, Read, Write}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; + +use blake2::{Blake2b, Digest}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use tempfile::NamedTempFile; + +use crate::commit::Commit; +use crate::dag_walk; +use crate::op_store::OperationId; +use crate::operation::Operation; +use crate::repo::{ReadonlyRepo, Repo}; +use crate::store::CommitId; +use crate::store_wrapper::StoreWrapper; +use std::fmt::{Debug, Formatter}; +use std::ops::Bound; + +struct CommitGraphEntry<'a> { + data: &'a [u8], + hash_length: usize, +} + +// TODO: Add pointers to ancestors further back, like a skip list. Clear the +// lowest set bit to determine which generation number the pointers point to. +impl CommitGraphEntry<'_> { + fn size(hash_length: usize) -> usize { + 16 + hash_length + } + + fn generation_number(&self) -> u32 { + (&self.data[0..]).read_u32::().unwrap() + } + + fn num_parents(&self) -> u32 { + (&self.data[4..]).read_u32::().unwrap() + } + + fn parent1_pos(&self) -> u32 { + (&self.data[8..]).read_u32::().unwrap() + } + + fn parent2_overflow_pos(&self) -> u32 { + (&self.data[12..]).read_u32::().unwrap() + } + + fn commit_id(&self) -> CommitId { + CommitId(self.data[16..16 + self.hash_length].to_vec()) + } +} + +struct CommitLookupEntry<'a> { + data: &'a [u8], + hash_length: usize, +} + +impl CommitLookupEntry<'_> { + fn size(hash_length: usize) -> usize { + hash_length + 4 + } + + fn commit_id(&self) -> CommitId { + CommitId(self.data[0..self.hash_length].to_vec()) + } + + fn pos(&self) -> u32 { + (&self.data[self.hash_length..self.hash_length + 4]) + .read_u32::() + .unwrap() + } +} + +// File format: +// u32: number of entries +// u32: number of parent overflow entries +// for each entry, in some topological order with parents first: +// u32: generation number +// u32: number of parents +// u32: position in this table for parent 1 +// u32: position in the overflow table of parent 2 +// : commit id +// for each entry, sorted by commit id: +// : commit id +// u32: position in the entry table above +// TODO: add a version number +// TODO: replace the table by a trie so we don't have to repeat the full commit +// ids +// TODO: add a fanout table like git's commit graph has? +pub struct IndexFile { + parent_file: Option>, + num_parent_commits: u32, + name: String, + hash_length: usize, + commit_graph_entry_size: usize, + commit_lookup_entry_size: usize, + // Number of commits not counting the parent file + num_local_commits: u32, + graph: Vec, + lookup: Vec, + overflow_parent: Vec, +} + +impl Debug for IndexFile { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_struct("IndexFile") + .field("name", &self.name) + .field("parent_file", &self.parent_file) + .finish() + } +} + +fn topo_order_parents_first( + store: &StoreWrapper, + heads: Vec, + parent_file: Option>, +) -> Vec { + // First create a list of all commits in topological order with children first + // (reverse of what we want) + let mut work = vec![]; + for head in &heads { + work.push(store.get_commit(head).unwrap()); + } + let mut commits = vec![]; + let mut visited = HashSet::new(); + let mut in_parent_file = HashSet::new(); + let parent_file_source = parent_file + .as_ref() + .map(|file| file.as_ref().as_composite()); + while !work.is_empty() { + let commit = work.pop().unwrap(); + if parent_file_source + .as_ref() + .map_or(false, |index| index.has_id(commit.id())) + { + in_parent_file.insert(commit.id().clone()); + continue; + } else if !visited.insert(commit.id().clone()) { + continue; + } + + work.extend(commit.parents()); + commits.push(commit); + } + drop(visited); + + // Now create the topological order with parents first. If we run into any + // commits whose parents have not all been indexed, put them in the map of + // waiting commit (keyed by the parent commit they're waiting for). + // Note that the order in the graph doesn't really have to be topological, but + // it seems like a useful property to have. + + // Commits waiting for their parents to be added + let mut waiting = HashMap::new(); + + let mut result = vec![]; + let mut visited = in_parent_file; + while !commits.is_empty() { + let commit = commits.pop().unwrap(); + let mut waiting_for_parent = false; + for parent in &commit.parents() { + if !visited.contains(parent.id()) { + waiting + .entry(parent.id().clone()) + .or_insert_with(Vec::new) + .push(commit.clone()); + waiting_for_parent = true; + break; + } + } + if !waiting_for_parent { + visited.insert(commit.id().clone()); + if let Some(children) = waiting.remove(commit.id()) { + commits.extend(children); + } + result.push(commit); + } + } + assert!(waiting.is_empty()); + result +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HexPrefix(String); + +impl HexPrefix { + pub fn new(prefix: String) -> HexPrefix { + assert!( + prefix + .matches(|c: char| !c.is_ascii_hexdigit() || c.is_ascii_uppercase()) + .next() + .is_none(), + "invalid hex prefix: {}", + &prefix + ); + HexPrefix(prefix) + } + + pub fn bytes_prefixes(&self) -> (CommitId, CommitId) { + if self.0.len() % 2 == 0 { + let bytes = hex::decode(&self.0).unwrap(); + (CommitId(bytes.clone()), CommitId(bytes)) + } else { + let min_bytes = hex::decode(&(self.0.clone() + "0")).unwrap(); + let prefix = min_bytes[0..min_bytes.len() - 1].to_vec(); + (CommitId(prefix), CommitId(min_bytes)) + } + } + + pub fn matches(&self, id: &CommitId) -> bool { + hex::encode(&id.0).starts_with(&self.0) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PrefixResolution { + NoMatch, + SingleMatch(CommitId), + AmbiguousMatch, +} + +impl PrefixResolution { + fn plus(&self, other: &PrefixResolution) -> PrefixResolution { + match (self, other) { + (PrefixResolution::NoMatch, other) => other.clone(), + (local, PrefixResolution::NoMatch) => local.clone(), + (PrefixResolution::AmbiguousMatch, _) => PrefixResolution::AmbiguousMatch, + (_, PrefixResolution::AmbiguousMatch) => PrefixResolution::AmbiguousMatch, + (PrefixResolution::SingleMatch(_), PrefixResolution::SingleMatch(_)) => { + PrefixResolution::AmbiguousMatch + } + } + } +} + +#[derive(Debug)] +struct UnsavedGraphEntry { + commit_id: CommitId, + generation_number: u32, + parent_positions: Vec, +} + +pub struct UnsavedIndexData { + parent_file: Option>, + num_parent_commits: u32, + hash_length: usize, + graph: Vec, + lookup: BTreeMap, +} + +impl UnsavedIndexData { + fn full(hash_length: usize) -> Self { + Self { + parent_file: None, + num_parent_commits: 0, + hash_length, + graph: vec![], + lookup: BTreeMap::new(), + } + } + + fn incremental(parent_file: Arc) -> Self { + let num_parent_commits = parent_file.num_parent_commits + parent_file.num_local_commits; + let hash_length = parent_file.hash_length; + Self { + parent_file: Some(parent_file), + num_parent_commits, + hash_length, + graph: vec![], + lookup: BTreeMap::new(), + } + } + + pub fn as_composite(&self) -> CompositeIndex { + CompositeIndex(self) + } + + fn add_commit(&mut self, commit: &Commit) { + self.add_commit_data(commit.id().clone(), commit.parent_ids()); + } + + fn add_commit_data(&mut self, id: CommitId, parent_ids: Vec) { + let mut entry = UnsavedGraphEntry { + commit_id: id, + generation_number: 0, + parent_positions: vec![], + }; + for parent_id in parent_ids { + let parent_entry = self + .as_composite() + .entry_by_id(&parent_id) + .expect("parent commit is not indexed"); + entry.generation_number = max( + entry.generation_number, + parent_entry.generation_number() + 1, + ); + entry.parent_positions.push(parent_entry.pos); + } + self.lookup.insert( + entry.commit_id.clone(), + self.graph.len() as u32 + self.num_parent_commits, + ); + self.graph.push(entry); + } + + fn serialize(self) -> Vec { + assert_eq!(self.graph.len(), self.lookup.len()); + + let num_commits = self.graph.len() as u32; + + let mut buf = vec![]; + + if let Some(parent_file) = &self.parent_file { + buf.write_u32::(parent_file.name.len() as u32) + .unwrap(); + buf.write_all(&parent_file.name.as_bytes()).unwrap(); + } else { + buf.write_u32::(0).unwrap(); + } + + buf.write_u32::(num_commits).unwrap(); + // We'll write the actual value later + let parent_overflow_offset = buf.len(); + buf.write_u32::(0 as u32).unwrap(); + + let mut parent_overflow = vec![]; + for entry in self.graph { + buf.write_u32::(entry.generation_number) + .unwrap(); + buf.write_u32::(entry.parent_positions.len() as u32) + .unwrap(); + let mut p1_pos = 0; + let parent_overflow_pos = parent_overflow.len() as u32; + for (i, parent_pos) in entry.parent_positions.iter().enumerate() { + if i == 0 { + p1_pos = *parent_pos; + } else { + parent_overflow.push(*parent_pos); + } + } + buf.write_u32::(p1_pos).unwrap(); + buf.write_u32::(parent_overflow_pos).unwrap(); + assert_eq!(entry.commit_id.0.len(), self.hash_length); + buf.write_all(entry.commit_id.0.as_slice()).unwrap(); + } + + for (commit_id, pos) in self.lookup { + buf.write_all(commit_id.0.as_slice()).unwrap(); + buf.write_u32::(pos).unwrap(); + } + + buf[parent_overflow_offset..parent_overflow_offset + 4] + .as_mut() + .write_u32::(parent_overflow.len() as u32) + .unwrap(); + for parent_pos in parent_overflow { + buf.write_u32::(parent_pos).unwrap(); + } + + buf + } +} + +trait IndexSegment { + fn num_parent_commits(&self) -> u32; + + fn num_commits(&self) -> u32; + + fn parent_file(&self) -> &Option>; + + fn name(&self) -> Option; + + fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option; + + fn resolve_prefix(&self, prefix: &HexPrefix) -> PrefixResolution; + + fn generation_number(&self, local_pos: u32) -> u32; + + fn commit_id(&self, local_pos: u32) -> CommitId; + + fn num_parents(&self, local_pos: u32) -> u32; + + fn parents_positions(&self, local_pos: u32) -> Vec; + + fn entry_by_pos(&self, pos: u32, local_pos: u32) -> IndexEntry; +} + +// TODO: This is a weird name for a public type in this module. The callers +// shouldn't need to know that it's composite. Rename. +#[derive(Clone)] +pub struct CompositeIndex<'a>(&'a dyn IndexSegment); + +impl<'a> CompositeIndex<'a> { + pub fn num_commits(&self) -> u32 { + self.0.num_parent_commits() + self.0.num_commits() + } + + pub fn stats(&self) -> IndexStats { + let num_commits = self.num_commits(); + let mut num_merges = 0; + let mut max_generation_number = 0; + let mut is_head = vec![true; num_commits as usize]; + for pos in 0..num_commits { + let entry = self.entry_by_pos(pos); + max_generation_number = max(max_generation_number, entry.generation_number()); + if entry.num_parents() > 1 { + num_merges += 1; + } + for parent_pos in entry.parents_positions() { + is_head[parent_pos as usize] = false; + } + } + let num_heads = is_head.iter().filter(|is_head| **is_head).count() as u32; + + let mut levels = vec![IndexLevelStats { + num_commits: self.0.num_commits(), + name: self.0.name(), + }]; + let mut parent_file = self.0.parent_file().clone(); + while parent_file.is_some() { + let file = parent_file.as_ref().unwrap(); + levels.push(IndexLevelStats { + num_commits: file.num_commits(), + name: self.0.name(), + }); + parent_file = file.parent_file().clone(); + } + + IndexStats { + num_commits, + num_merges, + max_generation_number, + num_heads, + levels, + } + } + + fn entry_by_pos(&self, pos: u32) -> IndexEntry<'a> { + let num_parent_commits = self.0.num_parent_commits(); + if pos >= num_parent_commits { + self.0.entry_by_pos(pos, pos - num_parent_commits) + } else { + let parent_file: &IndexFile = self.0.parent_file().as_ref().unwrap().as_ref(); + // The parent IndexFile outlives the child + let parent_file: &'a IndexFile = unsafe { std::mem::transmute(parent_file) }; + + parent_file.as_composite().entry_by_pos(pos) + } + } + + pub fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option { + let local_match = self.0.commit_id_to_pos(commit_id); + local_match.or_else(|| { + self.0 + .parent_file() + .as_ref() + .and_then(|file| file.as_composite().commit_id_to_pos(commit_id)) + }) + } + + pub fn resolve_prefix(&self, prefix: &HexPrefix) -> PrefixResolution { + let local_match = self.0.resolve_prefix(prefix); + if local_match == PrefixResolution::AmbiguousMatch { + // return early to avoid checking the parent file(s) + return local_match; + } + let parent_match = self + .0 + .parent_file() + .as_ref() + .map_or(PrefixResolution::NoMatch, |file| { + file.as_composite().resolve_prefix(prefix) + }); + local_match.plus(&parent_match) + } + + pub fn entry_by_id(&self, commit_id: &CommitId) -> Option> { + self.commit_id_to_pos(commit_id) + .map(&|pos| self.entry_by_pos(pos)) + } + + pub fn has_id(&self, commit_id: &CommitId) -> bool { + self.commit_id_to_pos(commit_id).is_some() + } + + pub fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> bool { + let ancestor_pos = self.commit_id_to_pos(ancestor_id).unwrap(); + let descendant_pos = self.commit_id_to_pos(descendant_id).unwrap(); + self.is_ancestor_pos(ancestor_pos, descendant_pos) + } + + fn is_ancestor_pos(&self, ancestor_pos: u32, descendant_pos: u32) -> bool { + let ancestor_generation = self.entry_by_pos(ancestor_pos).generation_number(); + let mut work = vec![descendant_pos]; + let mut visited = HashSet::new(); + while !work.is_empty() { + let descendant_pos = work.pop().unwrap(); + let descendant_entry = self.entry_by_pos(descendant_pos); + if descendant_pos == ancestor_pos { + return true; + } + if !visited.insert(descendant_entry.pos) { + continue; + } + if descendant_entry.generation_number() <= ancestor_generation { + continue; + } + work.extend(descendant_entry.parents_positions()); + } + false + } + + pub fn walk_revs(&self, wanted: &[CommitId], unwanted: &[CommitId]) -> RevWalk { + let mut rev_walk = RevWalk::new(self.clone()); + for pos in wanted.iter().map(|id| self.commit_id_to_pos(id).unwrap()) { + rev_walk.add_wanted(pos); + } + for pos in unwanted.iter().map(|id| self.commit_id_to_pos(id).unwrap()) { + rev_walk.add_unwanted(pos); + } + rev_walk + } + + pub fn heads<'candidates>( + &self, + candidates: impl IntoIterator, + ) -> Vec { + // Add all parents of the candidates to the work queue. The parents and their + // ancestors are not heads. + // Also find the smallest generation number among the candidates. + let mut work = BinaryHeap::new(); + let mut min_generation = std::u32::MAX; + let mut candidate_positions = HashSet::new(); + for entry in candidates + .into_iter() + .map(|id| self.entry_by_id(id).unwrap()) + { + candidate_positions.insert(entry.pos); + min_generation = min(min_generation, entry.generation_number()); + for parent_pos in entry.parents_positions() { + work.push(IndexEntryByGeneration(self.entry_by_pos(parent_pos))); + } + } + + // Walk ancestors of the parents of the candidates. Remove visited commits from + // set of candidates. Stop walking when we have gone past the minimum + // candidate generation. + let mut visited = HashSet::new(); + while !work.is_empty() { + let item = work.pop().unwrap().0; + if !visited.insert(item.pos) { + continue; + } + if item.generation_number() < min_generation { + break; + } + candidate_positions.remove(&item.pos); + for parent_pos in item.parents_positions() { + work.push(IndexEntryByGeneration(self.entry_by_pos(parent_pos))); + } + } + + let mut heads: Vec<_> = candidate_positions + .iter() + .map(|pos| self.entry_by_pos(*pos).commit_id()) + .collect(); + heads.sort(); + heads + } +} + +pub struct IndexLevelStats { + pub num_commits: u32, + pub name: Option, +} + +pub struct IndexStats { + pub num_commits: u32, + pub num_merges: u32, + pub max_generation_number: u32, + pub num_heads: u32, + pub levels: Vec, +} + +#[derive(Eq, PartialEq)] +struct IndexEntryByGeneration<'a>(IndexEntry<'a>); + +impl Ord for IndexEntryByGeneration<'_> { + fn cmp(&self, other: &Self) -> Ordering { + self.0 + .generation_number() + .cmp(&other.0.generation_number()) + .then(self.0.pos.cmp(&other.0.pos)) + } +} + +impl PartialOrd for IndexEntryByGeneration<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[derive(Eq, PartialEq, Ord, PartialOrd)] +struct RevWalkWorkItem<'a> { + entry: IndexEntryByGeneration<'a>, + wanted: bool, +} + +pub struct RevWalk<'a> { + index: CompositeIndex<'a>, + items: BinaryHeap>, + wanted_boundary_set: HashSet, + unwanted_boundary_set: HashSet, +} + +impl<'a> RevWalk<'a> { + fn new(index: CompositeIndex<'a>) -> Self { + Self { + index, + items: BinaryHeap::new(), + wanted_boundary_set: HashSet::new(), + unwanted_boundary_set: HashSet::new(), + } + } + + fn add_wanted(&mut self, pos: u32) { + if !self.wanted_boundary_set.insert(pos) { + return; + } + self.items.push(RevWalkWorkItem { + entry: IndexEntryByGeneration(self.index.entry_by_pos(pos)), + wanted: true, + }); + } + + fn add_unwanted(&mut self, pos: u32) { + if !self.unwanted_boundary_set.insert(pos) { + return; + } + self.items.push(RevWalkWorkItem { + entry: IndexEntryByGeneration(self.index.entry_by_pos(pos)), + wanted: false, + }); + } +} + +impl<'a> Iterator for RevWalk<'a> { + type Item = CommitId; + + fn next(&mut self) -> Option { + while !self.wanted_boundary_set.is_empty() { + let item = self.items.pop().unwrap(); + if item.wanted { + self.wanted_boundary_set.remove(&item.entry.0.pos); + if self.unwanted_boundary_set.contains(&item.entry.0.pos) { + continue; + } + for parent_pos in item.entry.0.parents_positions() { + self.add_wanted(parent_pos); + } + return Some(item.entry.0.commit_id()); + } else { + self.unwanted_boundary_set.remove(&item.entry.0.pos); + for parent_pos in item.entry.0.parents_positions() { + self.add_unwanted(parent_pos); + } + } + } + None + } +} + +impl IndexSegment for IndexFile { + fn num_parent_commits(&self) -> u32 { + self.num_parent_commits + } + + fn num_commits(&self) -> u32 { + self.num_local_commits + } + + fn parent_file(&self) -> &Option> { + &self.parent_file + } + + fn name(&self) -> Option { + Some(self.name.clone()) + } + + fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option { + if self.num_local_commits == 0 { + // Avoid overflow when subtracting 1 below + return None; + } + let mut low = 0; + let mut high = self.num_local_commits - 1; + + // binary search for the commit id + loop { + let mid = (low + high) / 2; + let entry = self.lookup_entry(mid); + let entry_commit_id = entry.commit_id(); + if high == low { + return if &entry_commit_id == commit_id { + Some(entry.pos()) + } else { + None + }; + } + if commit_id > &entry_commit_id { + low = mid + 1; + } else { + high = mid; + } + } + } + + fn resolve_prefix(&self, prefix: &HexPrefix) -> PrefixResolution { + let (bytes_prefix, min_bytes_prefix) = prefix.bytes_prefixes(); + match self.commit_id_byte_prefix_to_pos(&min_bytes_prefix) { + None => PrefixResolution::NoMatch, + Some(lookup_pos) => { + let mut first_match = None; + for i in lookup_pos..self.num_local_commits as u32 { + let entry = self.lookup_entry(i); + let id = entry.commit_id(); + if !id.0.starts_with(&bytes_prefix.0) { + break; + } + if prefix.matches(&id) { + if first_match.is_some() { + return PrefixResolution::AmbiguousMatch; + } + first_match = Some(id) + } + } + match first_match { + None => PrefixResolution::NoMatch, + Some(id) => PrefixResolution::SingleMatch(id), + } + } + } + } + + fn generation_number(&self, local_pos: u32) -> u32 { + self.graph_entry(local_pos).generation_number() + } + + fn commit_id(&self, local_pos: u32) -> CommitId { + self.graph_entry(local_pos).commit_id() + } + + fn num_parents(&self, local_pos: u32) -> u32 { + self.graph_entry(local_pos).num_parents() + } + + fn parents_positions(&self, local_pos: u32) -> Vec { + let graph_entry = self.graph_entry(local_pos); + let mut parent_entries = vec![]; + if graph_entry.num_parents() >= 1 { + parent_entries.push(graph_entry.parent1_pos()); + } + if graph_entry.num_parents() >= 2 { + let mut parent_overflow_pos = graph_entry.parent2_overflow_pos(); + for _ in 1..graph_entry.num_parents() { + parent_entries.push(self.overflow_parent(parent_overflow_pos)); + parent_overflow_pos += 1; + } + } + parent_entries + } + + fn entry_by_pos(&self, pos: u32, local_pos: u32) -> IndexEntry { + IndexEntry { + source: self, + local_pos, + pos, + } + } +} + +impl IndexSegment for UnsavedIndexData { + fn num_parent_commits(&self) -> u32 { + self.num_parent_commits + } + + fn num_commits(&self) -> u32 { + self.graph.len() as u32 + } + + fn parent_file(&self) -> &Option> { + &self.parent_file + } + + fn name(&self) -> Option { + None + } + + fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option { + self.lookup.get(commit_id).cloned() + } + + fn resolve_prefix(&self, prefix: &HexPrefix) -> PrefixResolution { + let (bytes_prefix, min_bytes_prefix) = prefix.bytes_prefixes(); + let mut potential_range = self + .lookup + .range((Bound::Included(&min_bytes_prefix), Bound::Unbounded)); + let mut first_match = None; + loop { + match potential_range.next() { + None => { + break; + } + Some((id, _pos)) => { + if !id.0.starts_with(&bytes_prefix.0) { + break; + } + if prefix.matches(&id) { + if first_match.is_some() { + return PrefixResolution::AmbiguousMatch; + } + first_match = Some(id) + } + } + } + } + match first_match { + None => PrefixResolution::NoMatch, + Some(id) => PrefixResolution::SingleMatch(id.clone()), + } + } + + fn generation_number(&self, local_pos: u32) -> u32 { + self.graph[local_pos as usize].generation_number + } + + fn commit_id(&self, local_pos: u32) -> CommitId { + self.graph[local_pos as usize].commit_id.clone() + } + + fn num_parents(&self, local_pos: u32) -> u32 { + self.graph[local_pos as usize].parent_positions.len() as u32 + } + + fn parents_positions(&self, local_pos: u32) -> Vec { + self.graph[local_pos as usize].parent_positions.clone() + } + + fn entry_by_pos(&self, pos: u32, local_pos: u32) -> IndexEntry { + IndexEntry { + source: self, + local_pos, + pos, + } + } +} + +#[derive(Clone)] +pub struct IndexEntry<'a> { + source: &'a dyn IndexSegment, + pos: u32, + // Position within the source segment + local_pos: u32, +} + +impl PartialEq for IndexEntry<'_> { + fn eq(&self, other: &Self) -> bool { + self.pos == other.pos + } +} +impl Eq for IndexEntry<'_> {} + +impl IndexEntry<'_> { + pub fn generation_number(&self) -> u32 { + self.source.generation_number(self.local_pos) + } + + pub fn commit_id(&self) -> CommitId { + self.source.commit_id(self.local_pos) + } + + pub fn num_parents(&self) -> u32 { + self.source.num_parents(self.local_pos) + } + + fn parents_positions(&self) -> Vec { + self.source.parents_positions(self.local_pos) + } +} + +impl IndexFile { + fn load_from( + file: &mut dyn Read, + dir: &Path, + name: String, + hash_length: usize, + ) -> io::Result { + let parent_filename_len = file.read_u32::()?; + let num_parent_commits; + let maybe_parent_file; + if parent_filename_len > 0 { + let mut parent_filename_bytes = vec![0; parent_filename_len as usize]; + file.read_exact(&mut parent_filename_bytes)?; + let parent_filename = String::from_utf8(parent_filename_bytes).unwrap(); + let parent_file_path = dir.join(&parent_filename); + let mut index_file = File::open(&parent_file_path).unwrap(); + let parent_file = + IndexFile::load_from(&mut index_file, dir, parent_filename, hash_length)?; + num_parent_commits = parent_file.num_parent_commits + parent_file.num_local_commits; + maybe_parent_file = Some(Arc::new(parent_file)); + } else { + num_parent_commits = 0; + maybe_parent_file = None; + }; + let num_commits = file.read_u32::()?; + let num_parent_overflow_entries = file.read_u32::()?; + let mut data = vec![]; + file.read_to_end(&mut data)?; + let commit_graph_entry_size = CommitGraphEntry::size(hash_length); + let graph_size = (num_commits as usize) * commit_graph_entry_size; + let commit_lookup_entry_size = CommitLookupEntry::size(hash_length); + let lookup_size = (num_commits as usize) * commit_lookup_entry_size; + let overflow_size = (num_parent_overflow_entries as usize) * 4; + let expected_size = graph_size + lookup_size + overflow_size; + assert_eq!(data.len(), expected_size); + let overflow_parent = data.split_off(graph_size + lookup_size); + let lookup = data.split_off(graph_size); + let graph = data; + Ok(IndexFile { + parent_file: maybe_parent_file, + num_parent_commits, + name, + hash_length, + commit_graph_entry_size, + commit_lookup_entry_size, + num_local_commits: num_commits, + graph, + lookup, + overflow_parent, + }) + } + + fn load_at_operation( + dir: &Path, + hash_length: usize, + op_id: &OperationId, + ) -> io::Result { + let op_id_file = dir.join("operations").join(op_id.hex()); + let mut buf = vec![]; + File::open(op_id_file) + .unwrap() + .read_to_end(&mut buf) + .unwrap(); + let index_file_id_hex = String::from_utf8(buf).unwrap(); + let index_file_path = dir.join(&index_file_id_hex); + let mut index_file = File::open(&index_file_path).unwrap(); + IndexFile::load_from(&mut index_file, dir, index_file_id_hex, hash_length) + } + + fn from_unsaved_data(dir: &Path, data: UnsavedIndexData) -> io::Result { + let hash_length = data.hash_length; + let buf = data.serialize(); + + let mut hasher = Blake2b::new(); + hasher.input(&buf); + let index_file_id_hex = hex::encode(&hasher.result()); + let index_file_path = dir.join(&index_file_id_hex); + + let mut temp_file = NamedTempFile::new_in(&dir)?; + let file = temp_file.as_file_mut(); + file.write_all(&buf).unwrap(); + temp_file.persist(&index_file_path)?; + + let mut cursor = Cursor::new(&buf); + IndexFile::load_from(&mut cursor, dir, index_file_id_hex, hash_length) + } + + fn index(store: &StoreWrapper, dir: &Path, operation: &Operation) -> io::Result { + let view = operation.view(); + let operations_dir = dir.join("operations"); + let hash_length = store.hash_length(); + let mut new_heads = view.heads().clone(); + let mut parent_op_id: Option = None; + for op in dag_walk::bfs( + vec![operation.clone()], + Box::new(|op: &Operation| op.id().clone()), + Box::new(|op: &Operation| op.parents()), + ) { + if operations_dir.join(op.id().hex()).is_file() { + if parent_op_id.is_none() { + parent_op_id = Some(op.id().clone()) + } + } else { + for head in op.view().heads() { + new_heads.insert(head.clone()); + } + } + } + let mut data; + let maybe_parent_file; + match parent_op_id { + None => { + maybe_parent_file = None; + data = UnsavedIndexData::full(hash_length); + } + Some(parent_op_id) => { + let parent_file = Arc::new( + IndexFile::load_at_operation(dir, hash_length, &parent_op_id).unwrap(), + ); + maybe_parent_file = Some(parent_file.clone()); + data = UnsavedIndexData::incremental(parent_file) + } + } + + let mut heads: Vec = new_heads.into_iter().collect(); + heads.sort(); + let commits = topo_order_parents_first(store, heads, maybe_parent_file); + + for commit in &commits { + data.add_commit(&commit); + } + + let index_file = IndexFile::from_unsaved_data(dir, data)?; + + let mut temp_file = NamedTempFile::new_in(&dir)?; + let file = temp_file.as_file_mut(); + file.write_all(&index_file.name.as_bytes()).unwrap(); + temp_file.persist(&operations_dir.join(operation.id().hex()))?; + + Ok(index_file) + } + + pub fn as_composite(&self) -> CompositeIndex { + CompositeIndex(self) + } + + fn graph_entry(&self, local_pos: u32) -> CommitGraphEntry { + let offset = (local_pos as usize) * self.commit_graph_entry_size; + CommitGraphEntry { + data: &self.graph[offset..offset + self.commit_graph_entry_size], + hash_length: self.hash_length, + } + } + + fn lookup_entry(&self, lookup_pos: u32) -> CommitLookupEntry { + let offset = (lookup_pos as usize) * self.commit_lookup_entry_size; + CommitLookupEntry { + data: &self.lookup[offset..offset + self.commit_lookup_entry_size], + hash_length: self.hash_length, + } + } + + fn overflow_parent(&self, overflow_pos: u32) -> u32 { + let offset = (overflow_pos as usize) * 4; + (&self.overflow_parent[offset..offset + 4]) + .read_u32::() + .unwrap() + } + + fn commit_id_byte_prefix_to_pos(&self, prefix: &CommitId) -> Option { + if self.num_local_commits == 0 { + // Avoid overflow when subtracting 1 below + return None; + } + let mut low = 0; + let mut high = self.num_local_commits - 1; + let prefix_len = prefix.0.len(); + + // binary search for the commit id + loop { + let mid = (low + high) / 2; + let entry = self.lookup_entry(mid); + let entry_commit_id = entry.commit_id(); + let entry_prefix = &entry_commit_id.0[0..prefix_len]; + if high == low { + return Some(mid); + } + if entry_prefix < prefix.0.as_slice() { + low = mid + 1; + } else { + high = mid; + } + } + } +} + +pub struct Index<'r> { + repo: &'r ReadonlyRepo, + dir: PathBuf, + op_id: Mutex, + index_file: Mutex>>, +} + +impl Index<'_> { + pub fn init(dir: PathBuf) { + std::fs::create_dir(dir.join("operations")).unwrap(); + } + + pub fn reinit(dir: PathBuf) { + std::fs::remove_dir_all(dir.join("operations")).unwrap(); + Index::init(dir); + } + + pub fn load(repo: &ReadonlyRepo, dir: PathBuf, op_id: OperationId) -> Index { + Index { + repo, + dir, + op_id: Mutex::new(op_id), + index_file: Mutex::new(None), + } + } + + // TODO: Maybe just call this data() or something? We should also hide the + // IndexFile type from the API. + pub fn index_file(&self) -> Arc { + let mut locked_index_file = self.index_file.lock().unwrap(); + if locked_index_file.is_none() { + locked_index_file.replace(Arc::new(self.do_load())); + } + locked_index_file.as_ref().unwrap().clone() + } + + fn do_load(&self) -> IndexFile { + let op_id_hex = self.op_id.lock().unwrap().hex(); + let op_id_file = self.dir.join("operations").join(&op_id_hex); + if op_id_file.exists() { + let op_id = OperationId(hex::decode(op_id_hex).unwrap()); + IndexFile::load_at_operation(&self.dir, self.repo.store().hash_length(), &op_id) + .unwrap() + } else { + let op = self + .repo + .view() + .get_operation(&self.op_id.lock().unwrap()) + .unwrap(); + IndexFile::index(self.repo.store(), &self.dir, &op).unwrap() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use test_case::test_case; + + #[test] + fn commit_graph_entry_accessors() { + let data = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + ]; + let entry = CommitGraphEntry { + data: &data, + hash_length: 4, + }; + + // Check that the correct value can be read + assert_eq!(entry.generation_number(), 0x04030201); + assert_eq!(entry.num_parents(), 0x08070605); + assert_eq!(entry.parent1_pos(), 0x0c0b0a09); + assert_eq!(entry.parent2_overflow_pos(), 0x100f0e0d); + assert_eq!(entry.commit_id(), CommitId(vec![17, 18, 19, 20])); + } + + #[test_case(false; "memory")] + #[test_case(true; "file")] + fn index_empty(use_file: bool) { + let unsaved = UnsavedIndexData::full(3); + let temp_dir; + let source: Box = if use_file { + temp_dir = tempfile::tempdir().unwrap(); + Box::new(IndexFile::from_unsaved_data(temp_dir.path(), unsaved).unwrap()) + } else { + Box::new(unsaved) + }; + let index = CompositeIndex(source.as_ref()); + + // Stats are as expected + let stats = index.stats(); + assert_eq!(stats.num_commits, 0); + assert_eq!(stats.num_heads, 0); + assert_eq!(stats.max_generation_number, 0); + assert_eq!(stats.num_merges, 0); + assert_eq!(index.num_commits(), 0); + // Cannot find any commits + assert!(index.entry_by_id(&CommitId::from_hex("000000")).is_none()); + assert!(index.entry_by_id(&CommitId::from_hex("aaa111")).is_none()); + assert!(index.entry_by_id(&CommitId::from_hex("ffffff")).is_none()); + } + + #[test_case(false; "memory")] + #[test_case(true; "file")] + fn index_root_commit(use_file: bool) { + let mut unsaved = UnsavedIndexData::full(3); + let id_0 = CommitId::from_hex("000000"); + unsaved.add_commit_data(id_0.clone(), vec![]); + let temp_dir; + let source: Box = if use_file { + temp_dir = tempfile::tempdir().unwrap(); + Box::new(IndexFile::from_unsaved_data(temp_dir.path(), unsaved).unwrap()) + } else { + Box::new(unsaved) + }; + let index = CompositeIndex(source.as_ref()); + + // Stats are as expected + let stats = index.stats(); + assert_eq!(stats.num_commits, 1); + assert_eq!(stats.num_heads, 1); + assert_eq!(stats.max_generation_number, 0); + assert_eq!(stats.num_merges, 0); + assert_eq!(index.num_commits(), 1); + // Can find only the root commit + assert_eq!(index.commit_id_to_pos(&id_0), Some(0)); + assert_eq!(index.commit_id_to_pos(&CommitId::from_hex("aaaaaa")), None); + assert_eq!(index.commit_id_to_pos(&CommitId::from_hex("ffffff")), None); + // Check properties of root entry + let entry = index.entry_by_id(&id_0).unwrap(); + assert_eq!(entry.pos, 0); + assert_eq!(entry.commit_id(), id_0); + assert_eq!(entry.generation_number(), 0); + assert_eq!(entry.num_parents(), 0); + assert_eq!(entry.parents_positions(), Vec::::new()); + // Can get same entry by position + let entry = index.entry_by_pos(0); + assert_eq!(entry.pos, 0); + assert_eq!(entry.commit_id(), id_0); + } + + #[test] + #[should_panic(expected = "parent commit is not indexed")] + fn index_missing_parent_commit() { + let mut unsaved = UnsavedIndexData::full(3); + let id_0 = CommitId::from_hex("000000"); + let id_1 = CommitId::from_hex("111111"); + unsaved.add_commit_data(id_1, vec![id_0]); + } + + #[test_case(false, false; "full in memory")] + #[test_case(false, true; "full on disk")] + #[test_case(true, false; "incremental in memory")] + #[test_case(true, true; "incremental on disk")] + fn index_multiple_commits(incremental: bool, use_file: bool) { + let mut unsaved = UnsavedIndexData::full(3); + // 5 + // |\ + // 4 | 3 + // | |/ + // 1 2 + // |/ + // 0 + let id_0 = CommitId::from_hex("000000"); + let id_1 = CommitId::from_hex("009999"); + let id_2 = CommitId::from_hex("055488"); + let id_3 = CommitId::from_hex("055444"); + let id_4 = CommitId::from_hex("055555"); + let id_5 = CommitId::from_hex("033333"); + unsaved.add_commit_data(id_0.clone(), vec![]); + unsaved.add_commit_data(id_1.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_2.clone(), vec![id_0.clone()]); + + // If testing incremental indexing, write the first three commits to one file + // now and build the remainder as another segment on top. + let temp_dir = tempfile::tempdir().unwrap(); + if incremental { + let initial_file = + Arc::new(IndexFile::from_unsaved_data(temp_dir.path(), unsaved).unwrap()); + unsaved = UnsavedIndexData::incremental(initial_file); + } + + unsaved.add_commit_data(id_3.clone(), vec![id_2.clone()]); + unsaved.add_commit_data(id_4.clone(), vec![id_1.clone()]); + unsaved.add_commit_data(id_5.clone(), vec![id_4.clone(), id_2.clone()]); + let source: Box = if use_file { + Box::new(IndexFile::from_unsaved_data(temp_dir.path(), unsaved).unwrap()) + } else { + Box::new(unsaved) + }; + let index = CompositeIndex(source.as_ref()); + + // Stats are as expected + let stats = index.stats(); + assert_eq!(stats.num_commits, 6); + assert_eq!(stats.num_heads, 2); + assert_eq!(stats.max_generation_number, 3); + assert_eq!(stats.num_merges, 1); + assert_eq!(index.num_commits(), 6); + // Can find all the commits + let entry_0 = index.entry_by_id(&id_0).unwrap(); + let entry_9 = index.entry_by_id(&id_1).unwrap(); + let entry_8 = index.entry_by_id(&id_2).unwrap(); + let entry_4 = index.entry_by_id(&id_3).unwrap(); + let entry_5 = index.entry_by_id(&id_4).unwrap(); + let entry_3 = index.entry_by_id(&id_5).unwrap(); + // Check properties of some entries + assert_eq!(entry_0.pos, 0); + assert_eq!(entry_0.commit_id(), id_0); + assert_eq!(entry_9.pos, 1); + assert_eq!(entry_9.commit_id(), id_1); + assert_eq!(entry_9.generation_number(), 1); + assert_eq!(entry_9.parents_positions(), vec![0]); + assert_eq!(entry_8.pos, 2); + assert_eq!(entry_8.commit_id(), id_2); + assert_eq!(entry_8.generation_number(), 1); + assert_eq!(entry_8.parents_positions(), vec![0]); + assert_eq!(entry_4.generation_number(), 2); + assert_eq!(entry_4.parents_positions(), vec![2]); + assert_eq!(entry_5.pos, 4); + assert_eq!(entry_5.generation_number(), 2); + assert_eq!(entry_5.parents_positions(), vec![1]); + assert_eq!(entry_3.generation_number(), 3); + assert_eq!(entry_3.parents_positions(), vec![4, 2]); + + // Test resolve_prefix + assert_eq!( + index.resolve_prefix(&HexPrefix::new(id_0.hex())), + PrefixResolution::SingleMatch(id_0.clone()) + ); + assert_eq!( + index.resolve_prefix(&HexPrefix::new(id_1.hex())), + PrefixResolution::SingleMatch(id_1.clone()) + ); + assert_eq!( + index.resolve_prefix(&HexPrefix::new(id_2.hex())), + PrefixResolution::SingleMatch(id_2.clone()) + ); + assert_eq!( + index.resolve_prefix(&HexPrefix::new("ffffff".to_string())), + PrefixResolution::NoMatch + ); + assert_eq!( + index.resolve_prefix(&HexPrefix::new("000001".to_string())), + PrefixResolution::NoMatch + ); + assert_eq!( + index.resolve_prefix(&HexPrefix::new("0".to_string())), + PrefixResolution::AmbiguousMatch + ); + // Test a globally unique prefix in initial part + assert_eq!( + index.resolve_prefix(&HexPrefix::new("009".to_string())), + PrefixResolution::SingleMatch(CommitId::from_hex("009999")) + ); + // Test a globally unique prefix in incremental part + assert_eq!( + index.resolve_prefix(&HexPrefix::new("03".to_string())), + PrefixResolution::SingleMatch(CommitId::from_hex("033333")) + ); + // Test a locally unique but globally ambiguous prefix + assert_eq!( + index.resolve_prefix(&HexPrefix::new("0554".to_string())), + PrefixResolution::AmbiguousMatch + ); + } + + #[test] + fn test_is_ancestor() { + let mut unsaved = UnsavedIndexData::full(3); + // 5 + // |\ + // 4 | 3 + // | |/ + // 1 2 + // |/ + // 0 + let id_0 = CommitId::from_hex("000000"); + let id_1 = CommitId::from_hex("111111"); + let id_2 = CommitId::from_hex("222222"); + let id_3 = CommitId::from_hex("333333"); + let id_4 = CommitId::from_hex("444444"); + let id_5 = CommitId::from_hex("555555"); + unsaved.add_commit_data(id_0.clone(), vec![]); + unsaved.add_commit_data(id_1.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_2.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_3.clone(), vec![id_2.clone()]); + unsaved.add_commit_data(id_4.clone(), vec![id_1.clone()]); + unsaved.add_commit_data(id_5.clone(), vec![id_4.clone(), id_2.clone()]); + let index = CompositeIndex(&unsaved); + + assert!(index.is_ancestor(&id_0, &id_0)); + assert!(index.is_ancestor(&id_0, &id_1)); + assert!(index.is_ancestor(&id_2, &id_3)); + assert!(index.is_ancestor(&id_2, &id_5)); + assert!(index.is_ancestor(&id_1, &id_5)); + assert!(index.is_ancestor(&id_0, &id_5)); + assert!(!index.is_ancestor(&id_1, &id_0)); + assert!(!index.is_ancestor(&id_5, &id_3)); + assert!(!index.is_ancestor(&id_3, &id_5)); + assert!(!index.is_ancestor(&id_2, &id_4)); + assert!(!index.is_ancestor(&id_4, &id_2)); + } + + #[test] + fn test_walk_revs() { + let mut unsaved = UnsavedIndexData::full(3); + // 5 + // |\ + // 4 | 3 + // | |/ + // 1 2 + // |/ + // 0 + let id_0 = CommitId::from_hex("000000"); + let id_1 = CommitId::from_hex("111111"); + let id_2 = CommitId::from_hex("222222"); + let id_3 = CommitId::from_hex("333333"); + let id_4 = CommitId::from_hex("444444"); + let id_5 = CommitId::from_hex("555555"); + unsaved.add_commit_data(id_0.clone(), vec![]); + unsaved.add_commit_data(id_1.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_2.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_3.clone(), vec![id_2.clone()]); + unsaved.add_commit_data(id_4.clone(), vec![id_1.clone()]); + unsaved.add_commit_data(id_5.clone(), vec![id_4.clone(), id_2.clone()]); + let index = CompositeIndex(&unsaved); + + // No wanted commits + let revs: Vec = index.walk_revs(&[], &[]).collect(); + assert!(revs.is_empty()); + // Simple linear walk to roo + let revs: Vec = index.walk_revs(&[id_4.clone()], &[]).collect(); + assert_eq!(revs, vec![id_4.clone(), id_1.clone(), id_0.clone()]); + // Commits that are both wanted and unwanted are not walked + let revs: Vec = index.walk_revs(&[id_0.clone()], &[id_0.clone()]).collect(); + assert_eq!(revs, vec![]); + // Commits that are listed twice are only walked once + let revs: Vec = index + .walk_revs(&[id_0.clone(), id_0.clone()], &[]) + .collect(); + assert_eq!(revs, vec![id_0.clone()]); + // If a commit and its ancestor are both wanted, the ancestor still gets walked + // only once + let revs: Vec = index + .walk_revs(&[id_0.clone(), id_1.clone()], &[]) + .collect(); + assert_eq!(revs, vec![id_1.clone(), id_0.clone()]); + // Ancestors of both wanted and unwanted commits are not walked + let revs: Vec = index.walk_revs(&[id_2.clone()], &[id_1.clone()]).collect(); + assert_eq!(revs, vec![id_2.clone()]); + // Same as above, but the opposite order, to make sure that order in index + // doesn't matter + let revs: Vec = index.walk_revs(&[id_1.clone()], &[id_2.clone()]).collect(); + assert_eq!(revs, vec![id_1.clone()]); + // Two wanted nodes + let revs: Vec = index + .walk_revs(&[id_1.clone(), id_2.clone()], &[]) + .collect(); + assert_eq!(revs, vec![id_2.clone(), id_1.clone(), id_0.clone()]); + // Order of output doesn't depend on order of input + let revs: Vec = index + .walk_revs(&[id_2.clone(), id_1.clone()], &[]) + .collect(); + assert_eq!(revs, vec![id_2.clone(), id_1.clone(), id_0]); + // Two wanted nodes that share an unwanted ancestor + let revs: Vec = index + .walk_revs(&[id_5.clone(), id_3.clone()], &[id_2]) + .collect(); + assert_eq!(revs, vec![id_5, id_4, id_3, id_1]); + } + + #[test] + fn test_heads() { + let mut unsaved = UnsavedIndexData::full(3); + // 5 + // |\ + // 4 | 3 + // | |/ + // 1 2 + // |/ + // 0 + let id_0 = CommitId::from_hex("000000"); + let id_1 = CommitId::from_hex("111111"); + let id_2 = CommitId::from_hex("222222"); + let id_3 = CommitId::from_hex("333333"); + let id_4 = CommitId::from_hex("444444"); + let id_5 = CommitId::from_hex("555555"); + unsaved.add_commit_data(id_0.clone(), vec![]); + unsaved.add_commit_data(id_1.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_2.clone(), vec![id_0.clone()]); + unsaved.add_commit_data(id_3.clone(), vec![id_2.clone()]); + unsaved.add_commit_data(id_4.clone(), vec![id_1.clone()]); + unsaved.add_commit_data(id_5.clone(), vec![id_4.clone(), id_2.clone()]); + let index = CompositeIndex(&unsaved); + + // Empty input + assert!(index.heads(&[]).is_empty()); + // Single head + assert_eq!(index.heads(&[id_4.clone()]), vec![id_4.clone()]); + // Single head and parent + assert_eq!(index.heads(&[id_4.clone(), id_1]), vec![id_4.clone()]); + // Single head and grand-parent + assert_eq!(index.heads(&[id_4.clone(), id_0]), vec![id_4.clone()]); + // Multiple heads + assert_eq!( + index.heads(&[id_4.clone(), id_3.clone()]), + vec![id_3.clone(), id_4] + ); + // Merge commit and ancestors + assert_eq!(index.heads(&[id_5.clone(), id_2]), vec![id_5.clone()]); + // Merge commit and other commit + assert_eq!(index.heads(&[id_5.clone(), id_3.clone()]), vec![id_3, id_5]); + } +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs new file mode 100644 index 000000000..bc59cc427 --- /dev/null +++ b/lib/src/lib.rs @@ -0,0 +1,43 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![feature(get_mut_unchecked)] + +pub mod commit; +pub mod commit_builder; +pub mod conflicts; +pub mod dag_walk; +pub mod evolution; +pub mod files; +pub mod git_store; +pub mod index; +pub mod local_store; +pub mod lock; +pub mod matchers; +pub mod op_store; +pub mod operation; +pub mod repo; +pub mod repo_path; +pub mod rewrite; +pub mod settings; +pub mod simple_op_store; +pub mod store; +pub mod store_wrapper; +pub mod testutils; +pub mod transaction; +pub mod tree; +pub mod tree_builder; +pub mod trees; +pub mod view; +pub mod working_copy; diff --git a/lib/src/local_store.rs b/lib/src/local_store.rs new file mode 100644 index 000000000..3ae4a542c --- /dev/null +++ b/lib/src/local_store.rs @@ -0,0 +1,396 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Debug; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::io::{ErrorKind, Read}; +use std::path::PathBuf; + +use blake2::{Blake2b, Digest}; +use protobuf::{Message, ProtobufError}; +use tempfile::{NamedTempFile, PersistError}; + +use crate::repo_path::{DirRepoPath, FileRepoPath}; +use crate::store::{ + ChangeId, Commit, CommitId, Conflict, ConflictId, ConflictPart, FileId, MillisSinceEpoch, + Signature, Store, StoreError, StoreResult, SymlinkId, Timestamp, Tree, TreeId, TreeValue, +}; + +impl From for StoreError { + fn from(err: std::io::Error) -> Self { + StoreError::Other(err.to_string()) + } +} + +impl From for StoreError { + fn from(err: PersistError) -> Self { + StoreError::Other(err.to_string()) + } +} + +impl From for StoreError { + fn from(err: ProtobufError) -> Self { + StoreError::Other(err.to_string()) + } +} + +#[derive(Debug)] +pub struct LocalStore { + path: PathBuf, + empty_tree_id: TreeId, +} + +impl LocalStore { + pub fn init(store_path: PathBuf) -> Self { + fs::create_dir(store_path.join("commits")).unwrap(); + fs::create_dir(store_path.join("trees")).unwrap(); + fs::create_dir(store_path.join("files")).unwrap(); + fs::create_dir(store_path.join("symlinks")).unwrap(); + fs::create_dir(store_path.join("conflicts")).unwrap(); + let store = Self::load(store_path); + let empty_tree_id = store + .write_tree(&DirRepoPath::root(), &Tree::default()) + .unwrap(); + assert_eq!(empty_tree_id, store.empty_tree_id); + store + } + + pub fn load(store_path: PathBuf) -> Self { + let empty_tree_id = TreeId(hex::decode("786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce").unwrap()); + LocalStore { + path: store_path, + empty_tree_id, + } + } + + fn file_path(&self, id: &FileId) -> PathBuf { + self.path.join("files").join(id.hex()) + } + + fn symlink_path(&self, id: &SymlinkId) -> PathBuf { + self.path.join("symlinks").join(id.hex()) + } + + fn tree_path(&self, id: &TreeId) -> PathBuf { + self.path.join("trees").join(id.hex()) + } + + fn commit_path(&self, id: &CommitId) -> PathBuf { + self.path.join("commits").join(id.hex()) + } + + fn conflict_path(&self, id: &ConflictId) -> PathBuf { + self.path.join("conflicts").join(id.hex()) + } +} + +fn not_found_to_store_error(err: std::io::Error) -> StoreError { + if err.kind() == ErrorKind::NotFound { + StoreError::NotFound + } else { + StoreError::from(err) + } +} + +impl Store for LocalStore { + fn hash_length(&self) -> usize { + 64 + } + + fn read_file(&self, _path: &FileRepoPath, id: &FileId) -> StoreResult> { + let path = self.file_path(&id); + let file = File::open(path).map_err(not_found_to_store_error)?; + Ok(Box::new(zstd::Decoder::new(file)?)) + } + + fn write_file(&self, _path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + let mut encoder = zstd::Encoder::new(temp_file.as_file(), 0)?; + let mut hasher = Blake2b::new(); + loop { + let mut buff: Vec = Vec::with_capacity(1 << 14); + let bytes_read; + unsafe { + buff.set_len(1 << 14); + bytes_read = contents.read(&mut buff)?; + buff.set_len(bytes_read); + } + if bytes_read == 0 { + break; + } + encoder.write_all(&buff)?; + hasher.input(&buff); + } + encoder.finish()?; + let id = FileId(hasher.result().to_vec()); + + temp_file.persist(self.file_path(&id))?; + Ok(id) + } + + fn read_symlink(&self, _path: &FileRepoPath, id: &SymlinkId) -> Result { + let path = self.symlink_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + let mut target = String::new(); + file.read_to_string(&mut target).unwrap(); + Ok(target) + } + + fn write_symlink(&self, _path: &FileRepoPath, target: &str) -> Result { + let mut temp_file = NamedTempFile::new_in(&self.path)?; + temp_file.write_all(target.as_bytes()).unwrap(); + let mut hasher = Blake2b::new(); + hasher.input(&target.as_bytes()); + let id = SymlinkId(hasher.result().to_vec()); + + temp_file.persist(self.symlink_path(&id))?; + Ok(id) + } + + fn empty_tree_id(&self) -> &TreeId { + &self.empty_tree_id + } + + fn read_tree(&self, _path: &DirRepoPath, id: &TreeId) -> StoreResult { + let path = self.tree_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + + let proto: protos::store::Tree = protobuf::parse_from_reader(&mut file)?; + Ok(tree_from_proto(&proto)) + } + + fn write_tree(&self, _path: &DirRepoPath, tree: &Tree) -> StoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + + let proto = tree_to_proto(tree); + let mut proto_bytes: Vec = Vec::new(); + proto.write_to_writer(&mut proto_bytes)?; + + temp_file.as_file().write_all(&proto_bytes)?; + + let id = TreeId(Blake2b::digest(&proto_bytes).to_vec()); + + temp_file.persist(self.tree_path(&id))?; + Ok(id) + } + + fn read_commit(&self, id: &CommitId) -> StoreResult { + let path = self.commit_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + + let proto: protos::store::Commit = protobuf::parse_from_reader(&mut file)?; + Ok(commit_from_proto(&proto)) + } + + fn write_commit(&self, commit: &Commit) -> StoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + + let proto = commit_to_proto(commit); + let mut proto_bytes: Vec = Vec::new(); + proto.write_to_writer(&mut proto_bytes)?; + + temp_file.as_file().write_all(&proto_bytes)?; + + let id = CommitId(Blake2b::digest(&proto_bytes).to_vec()); + + temp_file.persist(self.commit_path(&id))?; + Ok(id) + } + + fn read_conflict(&self, id: &ConflictId) -> StoreResult { + let path = self.conflict_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + + let proto: protos::store::Conflict = protobuf::parse_from_reader(&mut file)?; + Ok(conflict_from_proto(&proto)) + } + + fn write_conflict(&self, conflict: &Conflict) -> StoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + + let proto = conflict_to_proto(conflict); + let mut proto_bytes: Vec = Vec::new(); + proto.write_to_writer(&mut proto_bytes)?; + + temp_file.as_file().write_all(&proto_bytes)?; + + let id = ConflictId(Blake2b::digest(&proto_bytes).to_vec()); + + temp_file.persist(self.conflict_path(&id))?; + Ok(id) + } +} + +pub fn commit_to_proto(commit: &Commit) -> protos::store::Commit { + let mut proto = protos::store::Commit::new(); + for parent in &commit.parents { + proto.parents.push(parent.0.clone()); + } + for predecessor in &commit.predecessors { + proto.predecessors.push(predecessor.0.clone()); + } + proto.set_root_tree(commit.root_tree.0.clone()); + proto.set_change_id(commit.change_id.0.clone()); + proto.set_description(commit.description.clone()); + proto.set_author(signature_to_proto(&commit.author)); + proto.set_committer(signature_to_proto(&commit.committer)); + proto.set_is_open(commit.is_open); + proto.set_is_pruned(commit.is_pruned); + proto +} + +fn commit_from_proto(proto: &protos::store::Commit) -> Commit { + let commit_id_from_proto = |parent: &Vec| CommitId(parent.clone()); + let parents = proto.parents.iter().map(commit_id_from_proto).collect(); + let predecessors = proto + .predecessors + .iter() + .map(commit_id_from_proto) + .collect(); + let root_tree = TreeId(proto.root_tree.to_vec()); + let change_id = ChangeId(proto.change_id.to_vec()); + Commit { + parents, + predecessors, + root_tree, + change_id, + description: proto.description.clone(), + author: signature_from_proto(proto.author.get_ref()), + committer: signature_from_proto(proto.committer.get_ref()), + is_open: proto.is_open, + is_pruned: proto.is_pruned, + } +} + +fn tree_to_proto(tree: &Tree) -> protos::store::Tree { + let mut proto = protos::store::Tree::new(); + for entry in tree.entries() { + let mut proto_entry = protos::store::Tree_Entry::new(); + proto_entry.set_name(entry.name().to_owned()); + proto_entry.set_value(tree_value_to_proto(entry.value())); + proto.entries.push(proto_entry); + } + proto +} + +fn tree_from_proto(proto: &protos::store::Tree) -> Tree { + let mut tree = Tree::default(); + for proto_entry in proto.entries.iter() { + let value = tree_value_from_proto(proto_entry.value.as_ref().unwrap()); + tree.set(proto_entry.name.to_string(), value); + } + tree +} + +fn tree_value_to_proto(value: &TreeValue) -> protos::store::TreeValue { + let mut proto = protos::store::TreeValue::new(); + match value { + TreeValue::Normal { id, executable } => { + let mut file = protos::store::TreeValue_NormalFile::new(); + file.set_id(id.0.clone()); + file.set_executable(*executable); + proto.set_normal_file(file); + } + TreeValue::Symlink(id) => { + proto.set_symlink_id(id.0.clone()); + } + TreeValue::GitSubmodule(_id) => { + panic!("cannot store git submodules"); + } + TreeValue::Tree(id) => { + proto.set_tree_id(id.0.clone()); + } + TreeValue::Conflict(id) => { + proto.set_conflict_id(id.0.clone()); + } + }; + proto +} + +fn tree_value_from_proto(proto: &protos::store::TreeValue) -> TreeValue { + match proto.value.as_ref().unwrap() { + protos::store::TreeValue_oneof_value::tree_id(id) => TreeValue::Tree(TreeId(id.clone())), + protos::store::TreeValue_oneof_value::normal_file( + protos::store::TreeValue_NormalFile { id, executable, .. }, + ) => TreeValue::Normal { + id: FileId(id.clone()), + executable: *executable, + }, + protos::store::TreeValue_oneof_value::symlink_id(id) => { + TreeValue::Symlink(SymlinkId(id.clone())) + } + protos::store::TreeValue_oneof_value::conflict_id(id) => { + TreeValue::Conflict(ConflictId(id.clone())) + } + } +} + +fn signature_to_proto(signature: &Signature) -> protos::store::Commit_Signature { + let mut proto = protos::store::Commit_Signature::new(); + proto.set_name(signature.name.clone()); + proto.set_email(signature.email.clone()); + let mut timestamp_proto = protos::store::Commit_Timestamp::new(); + timestamp_proto.set_millis_since_epoch(signature.timestamp.timestamp.0); + timestamp_proto.set_tz_offset(signature.timestamp.tz_offset); + proto.set_timestamp(timestamp_proto); + proto +} + +fn signature_from_proto(proto: &protos::store::Commit_Signature) -> Signature { + let timestamp = proto.get_timestamp(); + Signature { + name: proto.name.clone(), + email: proto.email.clone(), + timestamp: Timestamp { + timestamp: MillisSinceEpoch(timestamp.millis_since_epoch), + tz_offset: timestamp.tz_offset, + }, + } +} + +fn conflict_to_proto(conflict: &Conflict) -> protos::store::Conflict { + let mut proto = protos::store::Conflict::new(); + for part in &conflict.adds { + proto.adds.push(conflict_part_to_proto(part)); + } + for part in &conflict.removes { + proto.removes.push(conflict_part_to_proto(part)); + } + proto +} + +fn conflict_from_proto(proto: &protos::store::Conflict) -> Conflict { + let mut conflict = Conflict::default(); + for part in &proto.removes { + conflict.removes.push(conflict_part_from_proto(part)) + } + for part in &proto.adds { + conflict.adds.push(conflict_part_from_proto(part)) + } + conflict +} + +fn conflict_part_from_proto(proto: &protos::store::Conflict_Part) -> ConflictPart { + ConflictPart { + value: tree_value_from_proto(proto.content.as_ref().unwrap()), + } +} + +fn conflict_part_to_proto(part: &ConflictPart) -> protos::store::Conflict_Part { + let mut proto = protos::store::Conflict_Part::new(); + proto.set_content(tree_value_to_proto(&part.value)); + proto +} diff --git a/lib/src/lock.rs b/lib/src/lock.rs new file mode 100644 index 000000000..66d4edc20 --- /dev/null +++ b/lib/src/lock.rs @@ -0,0 +1,108 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fs::{File, OpenOptions}; +use std::path::PathBuf; +use std::time::Duration; + +pub struct FileLock { + path: PathBuf, + _file: File, +} + +impl FileLock { + pub fn lock(path: PathBuf) -> FileLock { + let mut options = OpenOptions::new(); + options.create_new(true); + options.write(true); + let retry_delay = Duration::from_millis(10); + loop { + match options.open(&path) { + Ok(file) => return FileLock { path, _file: file }, + Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => { + std::thread::sleep(retry_delay); + } + Err(err) => panic!( + "failed to create lock file {}: {}", + path.to_string_lossy(), + err + ), + } + } + } +} + +impl Drop for FileLock { + fn drop(&mut self) { + std::fs::remove_file(&self.path).expect("failed to delete lock file"); + } +} + +#[cfg(test)] +mod tests { + use std::env; + use std::thread; + + use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; + + use super::*; + + #[test] + fn lock_basic() { + let number: u32 = rand::random(); + let lock_path = env::temp_dir().join(format!("test-{}.lock", number)); + assert!(!lock_path.exists()); + { + let _lock = FileLock::lock(lock_path.clone()); + assert!(lock_path.exists()); + } + assert!(!lock_path.exists()); + } + + #[test] + fn lock_concurrent() { + let number: u32 = rand::random(); + let data_path = env::temp_dir().join(format!("test-{}", number)); + let lock_path = env::temp_dir().join(format!("test-{}.lock", number)); + let mut data_file = OpenOptions::new() + .create(true) + .write(true) + .open(data_path.clone()) + .unwrap(); + data_file.write_u32::(0).unwrap(); + let mut threads = vec![]; + for _ in 0..100 { + let data_path = data_path.clone(); + let lock_path = lock_path.clone(); + let handle = thread::spawn(move || { + let _lock = FileLock::lock(lock_path); + let mut data_file = OpenOptions::new() + .read(true) + .open(data_path.clone()) + .unwrap(); + let value = data_file.read_u32::().unwrap(); + thread::sleep(Duration::from_millis(1)); + let mut data_file = OpenOptions::new().write(true).open(data_path).unwrap(); + data_file.write_u32::(value + 1).unwrap(); + }); + threads.push(handle); + } + for thread in threads { + thread.join().ok().unwrap(); + } + let mut data_file = OpenOptions::new().read(true).open(data_path).unwrap(); + let value = data_file.read_u32::().unwrap(); + assert_eq!(value, 100); + } +} diff --git a/lib/src/matchers.rs b/lib/src/matchers.rs new file mode 100644 index 000000000..2cab180fb --- /dev/null +++ b/lib/src/matchers.rs @@ -0,0 +1,233 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(dead_code)] + +use std::collections::HashMap; +use std::collections::HashSet; + +use crate::repo_path::DirRepoPath; +use crate::repo_path::DirRepoPathComponent; +use crate::repo_path::FileRepoPath; +use crate::repo_path::FileRepoPathComponent; + +#[derive(PartialEq, Eq, Debug)] +pub struct Visit<'a> { + dirs: VisitDirs<'a>, + files: VisitFiles<'a>, +} + +#[derive(PartialEq, Eq, Debug)] +pub enum VisitDirs<'a> { + All, + Set(&'a HashSet), +} + +#[derive(PartialEq, Eq, Debug)] +pub enum VisitFiles<'a> { + All, + Set(&'a HashSet), +} + +pub trait Matcher { + fn matches(&self, file: &FileRepoPath) -> bool; + fn visit(&self, dir: &DirRepoPath) -> Visit; +} + +#[derive(PartialEq, Eq, Debug)] +pub struct AlwaysMatcher; + +impl Matcher for AlwaysMatcher { + fn matches(&self, _file: &FileRepoPath) -> bool { + true + } + + fn visit(&self, _dir: &DirRepoPath) -> Visit { + Visit { + dirs: VisitDirs::All, + files: VisitFiles::All, + } + } +} + +#[derive(PartialEq, Eq, Debug)] +pub struct FilesMatcher { + files: HashSet, + dirs: Dirs, +} + +impl FilesMatcher { + fn new(files: HashSet) -> Self { + let mut dirs = Dirs::new(); + for f in &files { + dirs.add_file(f); + } + FilesMatcher { files, dirs } + } +} + +impl Matcher for FilesMatcher { + fn matches(&self, file: &FileRepoPath) -> bool { + self.files.contains(file) + } + + fn visit(&self, dir: &DirRepoPath) -> Visit { + let dirs = self.dirs.get_dirs(dir); + let files = self.dirs.get_files(dir); + Visit { + dirs: VisitDirs::Set(dirs), + files: VisitFiles::Set(files), + } + } +} + +/// Keeps track of which subdirectories and files of each directory need to be +/// visited. +#[derive(PartialEq, Eq, Debug)] +struct Dirs { + dirs: HashMap>, + files: HashMap>, + empty_dirs: HashSet, + empty_files: HashSet, +} + +impl Dirs { + fn new() -> Self { + Dirs { + dirs: HashMap::new(), + files: HashMap::new(), + empty_dirs: HashSet::new(), + empty_files: HashSet::new(), + } + } + + fn add_dir(&mut self, mut dir: DirRepoPath) { + let mut maybe_child = None; + loop { + let was_present = self.dirs.contains_key(&dir); + let children = self.dirs.entry(dir.clone()).or_default(); + if let Some(child) = maybe_child { + children.insert(child); + } + if was_present { + break; + } + match dir.split() { + None => break, + Some((new_dir, new_child)) => { + dir = new_dir; + maybe_child = Some(new_child); + } + }; + } + } + + fn add_file(&mut self, file: &FileRepoPath) { + let (dir, basename) = file.split(); + self.add_dir(dir.clone()); + self.files + .entry(dir.clone()) + .or_default() + .insert(basename.clone()); + } + + fn get_dirs(&self, dir: &DirRepoPath) -> &HashSet { + self.dirs.get(&dir).unwrap_or(&self.empty_dirs) + } + + fn get_files(&self, dir: &DirRepoPath) -> &HashSet { + self.files.get(&dir).unwrap_or(&self.empty_files) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + use crate::repo_path::DirRepoPath; + use crate::repo_path::DirRepoPathComponent; + use crate::repo_path::FileRepoPath; + use crate::repo_path::FileRepoPathComponent; + + #[test] + fn dirs_empty() { + let dirs = Dirs::new(); + assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &HashSet::new()); + } + + #[test] + fn dirs_root() { + let mut dirs = Dirs::new(); + dirs.add_dir(DirRepoPath::root()); + assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &HashSet::new()); + } + + #[test] + fn dirs_dir() { + let mut dirs = Dirs::new(); + dirs.add_dir(DirRepoPath::from("dir/")); + let mut expected_root_dirs = HashSet::new(); + expected_root_dirs.insert(DirRepoPathComponent::from("dir")); + assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &expected_root_dirs); + } + + #[test] + fn dirs_file() { + let mut dirs = Dirs::new(); + dirs.add_file(&FileRepoPath::from("dir/file")); + let mut expected_root_dirs = HashSet::new(); + expected_root_dirs.insert(DirRepoPathComponent::from("dir")); + assert_eq!(dirs.get_dirs(&DirRepoPath::root()), &expected_root_dirs); + assert_eq!(dirs.get_files(&DirRepoPath::root()), &HashSet::new()); + } + + #[test] + fn filesmatcher_empty() { + let m = FilesMatcher::new(HashSet::new()); + assert_eq!(m.matches(&FileRepoPath::from("file")), false); + assert_eq!(m.matches(&FileRepoPath::from("dir/file")), false); + assert_eq!( + m.visit(&DirRepoPath::root()), + Visit { + dirs: VisitDirs::Set(&HashSet::new()), + files: VisitFiles::Set(&HashSet::new()), + } + ); + } + + #[test] + fn filesmatcher_nonempty() { + let mut files = HashSet::new(); + files.insert(FileRepoPath::from("dir1/subdir1/file1")); + files.insert(FileRepoPath::from("dir1/subdir1/file2")); + files.insert(FileRepoPath::from("dir1/subdir2/file3")); + files.insert(FileRepoPath::from("file4")); + let m = FilesMatcher::new(files); + + let expected_root_files = vec![FileRepoPathComponent::from("file4")] + .into_iter() + .collect(); + let expected_root_dirs = vec![DirRepoPathComponent::from("dir1")] + .into_iter() + .collect(); + assert_eq!( + m.visit(&DirRepoPath::root()), + Visit { + dirs: VisitDirs::Set(&expected_root_dirs), + files: VisitFiles::Set(&expected_root_files), + } + ); + } +} diff --git a/lib/src/op_store.rs b/lib/src/op_store.rs new file mode 100644 index 000000000..0d14fced1 --- /dev/null +++ b/lib/src/op_store.rs @@ -0,0 +1,131 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::store::{CommitId, Timestamp}; +use std::collections::HashSet; +use std::fmt::{Debug, Error, Formatter}; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct ViewId(pub Vec); + +impl Debug for ViewId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("ViewId").field(&self.hex()).finish() + } +} + +impl ViewId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct OperationId(pub Vec); + +impl Debug for OperationId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("OperationId").field(&self.hex()).finish() + } +} + +impl OperationId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +/// Represents the way the repo looks at a given time, just like how a Tree +/// object represents how the file system looks at a given time. +#[derive(Clone)] +pub struct View { + /// All head commits + pub head_ids: HashSet, + // The commit that *should be* checked out in the (default) working copy. Note that the + // working copy (.jj/working_copy/) has the source of truth about which commit *is* checked out + // (to be precise: the commit to which we most recently completed a checkout to). + // TODO: Allow multiple working copies + pub checkout: CommitId, +} + +impl View { + pub fn new(checkout: CommitId) -> Self { + Self { + head_ids: HashSet::new(), + checkout, + } + } +} + +/// Represents an operation (transaction) on the repo view, just like how a +/// Commit object represents an operation on the tree. +/// +/// Operations and views are not meant to be exchanged between repos or users; +/// they represent local state and history. +/// +/// The operation history will almost always be linear. It will only have +/// forks when parallel operations occurred. The parent is determined when +/// the transaction starts. When the transaction commits, a lock will be +/// taken and it will be checked that the current head of the operation +/// graph is unchanged. If the current head has changed, there has been +/// concurrent operation. +#[derive(Clone)] +pub struct Operation { + pub view_id: ViewId, + pub parents: Vec, + pub metadata: OperationMetadata, +} + +#[derive(Clone)] +pub struct OperationMetadata { + pub start_time: Timestamp, + pub end_time: Timestamp, + // Whatever is useful to the user, such as exact command line call + pub description: String, + pub hostname: String, + pub username: String, +} + +impl OperationMetadata { + pub fn new(description: String) -> Self { + let timestamp = Timestamp::now(); + let hostname = whoami::hostname(); + let username = whoami::username(); + OperationMetadata { + start_time: timestamp.clone(), + end_time: timestamp, + description, + hostname, + username, + } + } +} + +#[derive(Debug)] +pub enum OpStoreError { + NotFound, + Other(String), +} + +pub type OpStoreResult = Result; + +pub trait OpStore: Send + Sync + Debug { + fn read_view(&self, id: &ViewId) -> OpStoreResult; + + fn write_view(&self, contents: &View) -> OpStoreResult; + + fn read_operation(&self, id: &OperationId) -> OpStoreResult; + + fn write_operation(&self, contents: &Operation) -> OpStoreResult; +} diff --git a/lib/src/operation.rs b/lib/src/operation.rs new file mode 100644 index 000000000..96e998ed0 --- /dev/null +++ b/lib/src/operation.rs @@ -0,0 +1,162 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::op_store; +use crate::op_store::{OpStore, OperationId, ViewId}; +use crate::store::CommitId; +use std::cmp::Ordering; +use std::collections::HashSet; +use std::fmt::{Debug, Error, Formatter}; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; + +#[derive(Clone)] +pub struct Operation { + op_store: Arc, + id: OperationId, + data: op_store::Operation, +} + +impl Debug for Operation { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_struct("Operation").field("id", &self.id).finish() + } +} + +impl PartialEq for Operation { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl Eq for Operation {} + +impl Ord for Operation { + fn cmp(&self, other: &Self) -> Ordering { + self.id.cmp(&other.id) + } +} + +impl PartialOrd for Operation { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.id.cmp(&other.id)) + } +} + +impl Hash for Operation { + fn hash(&self, state: &mut H) { + self.id.hash(state) + } +} + +impl Operation { + pub fn new(op_store: Arc, id: OperationId, data: op_store::Operation) -> Self { + Operation { op_store, id, data } + } + + pub fn op_store(&self) -> Arc { + self.op_store.clone() + } + + pub fn id(&self) -> &OperationId { + &self.id + } + + pub fn parents(&self) -> Vec { + let mut parents = Vec::new(); + for parent_id in &self.data.parents { + let data = self.op_store.read_operation(parent_id).unwrap(); + parents.push(Operation::new( + self.op_store.clone(), + parent_id.clone(), + data, + )); + } + parents + } + + pub fn view(&self) -> View { + let data = self.op_store.read_view(&self.data.view_id).unwrap(); + View::new(self.op_store.clone(), self.data.view_id.clone(), data) + } + + pub fn store_operation(&self) -> &op_store::Operation { + &self.data + } +} + +#[derive(Clone)] +pub struct View { + op_store: Arc, + id: ViewId, + data: op_store::View, +} + +impl Debug for View { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_struct("View").field("id", &self.id).finish() + } +} + +impl PartialEq for View { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl Eq for View {} + +impl Ord for View { + fn cmp(&self, other: &Self) -> Ordering { + self.id.cmp(&other.id) + } +} + +impl PartialOrd for View { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.id.cmp(&other.id)) + } +} + +impl Hash for View { + fn hash(&self, state: &mut H) { + self.id.hash(state) + } +} + +impl View { + pub fn new(op_store: Arc, id: ViewId, data: op_store::View) -> Self { + View { op_store, id, data } + } + + pub fn op_store(&self) -> Arc { + self.op_store.clone() + } + + pub fn id(&self) -> &ViewId { + &self.id + } + + pub fn store_view(&self) -> &op_store::View { + &self.data + } + + pub fn take_store_view(self) -> op_store::View { + self.data + } + + pub fn heads(&self) -> &HashSet { + &self.data.head_ids + } +} diff --git a/lib/src/repo.rs b/lib/src/repo.rs new file mode 100644 index 000000000..55342c63f --- /dev/null +++ b/lib/src/repo.rs @@ -0,0 +1,308 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::{Debug, Formatter}; +use std::fs; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::PathBuf; +use std::sync::{Arc, Mutex, MutexGuard}; + +use thiserror::Error; + +use crate::commit_builder::{new_change_id, signature}; +use crate::evolution::{Evolution, ReadonlyEvolution}; +use crate::git_store::GitStore; +use crate::index::Index; +use crate::local_store::LocalStore; +use crate::operation::Operation; +use crate::settings::{RepoSettings, UserSettings}; +use crate::store; +use crate::store::{Store, StoreError}; +use crate::store_wrapper::StoreWrapper; +use crate::transaction::Transaction; +use crate::view::{ReadonlyView, View}; +use crate::working_copy::WorkingCopy; + +#[derive(Debug, Error, PartialEq, Eq)] +pub enum RepoError { + #[error("Object not found")] + NotFound, + #[error("Error: {0}")] + Other(String), +} + +impl From for RepoError { + fn from(err: StoreError) -> Self { + match err { + StoreError::NotFound => RepoError::NotFound, + StoreError::Other(description) => RepoError::Other(description), + } + } +} + +pub type RepoResult = Result; + +pub trait Repo: Sync { + fn store(&self) -> &Arc; + fn view(&self) -> &dyn View; + fn evolution(&self) -> &dyn Evolution; +} + +pub struct ReadonlyRepo { + repo_path: PathBuf, + wc_path: PathBuf, + store: Arc, + settings: RepoSettings, + index: Mutex>>>, + working_copy: Arc>, + view: ReadonlyView, + evolution: Option>, +} + +impl Debug for ReadonlyRepo { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_struct("Repo") + .field("repo_path", &self.repo_path) + .field("wc_path", &self.wc_path) + .field("store", &self.store) + .finish() + } +} + +impl ReadonlyRepo { + pub fn init_local(settings: &UserSettings, wc_path: PathBuf) -> Arc { + let repo_path = wc_path.join(".jj"); + fs::create_dir(repo_path.clone()).unwrap(); + let store_path = repo_path.join("store"); + fs::create_dir(&store_path).unwrap(); + let store = Box::new(LocalStore::init(store_path)); + ReadonlyRepo::init(settings, repo_path, wc_path, store) + } + + pub fn init_git( + settings: &UserSettings, + wc_path: PathBuf, + git_store_path: PathBuf, + ) -> Arc { + let repo_path = wc_path.join(".jj"); + fs::create_dir(repo_path.clone()).unwrap(); + let store_path = repo_path.join("store"); + let git_store_path = fs::canonicalize(git_store_path).unwrap(); + let mut store_file = File::create(store_path).unwrap(); + store_file + .write_all((String::from("git: ") + git_store_path.to_str().unwrap()).as_bytes()) + .unwrap(); + let store = Box::new(GitStore::load(git_store_path)); + ReadonlyRepo::init(settings, repo_path, wc_path, store) + } + + fn init( + user_settings: &UserSettings, + repo_path: PathBuf, + wc_path: PathBuf, + store: Box, + ) -> Arc { + let repo_settings = user_settings.with_repo(&repo_path).unwrap(); + let store = StoreWrapper::new(store); + + fs::create_dir(repo_path.join("working_copy")).unwrap(); + let working_copy = WorkingCopy::init(store.clone(), repo_path.join("working_copy")); + + fs::create_dir(repo_path.join("view")).unwrap(); + let signature = signature(user_settings); + let checkout_commit = store::Commit { + parents: vec![], + predecessors: vec![], + root_tree: store.empty_tree_id().clone(), + change_id: new_change_id(), + description: "".to_string(), + author: signature.clone(), + committer: signature, + is_open: true, + is_pruned: false, + }; + let checkout_commit = store.write_commit(checkout_commit); + let view = ReadonlyView::init( + store.clone(), + repo_path.join("view"), + checkout_commit.id().clone(), + ); + + let repo = ReadonlyRepo { + repo_path: repo_path.clone(), + wc_path, + store, + settings: repo_settings, + index: Mutex::new(None), + working_copy: Arc::new(Mutex::new(working_copy)), + view, + evolution: None, + }; + let mut repo = Arc::new(repo); + let repo_ref: &ReadonlyRepo = repo.as_ref(); + let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) }; + + fs::create_dir(repo_path.join("index")).unwrap(); + Index::init(repo_path.join("index")); + + let evolution = ReadonlyEvolution::new(static_lifetime_repo); + + ReadonlyRepo::init_cycles(&mut repo, evolution); + repo.working_copy_locked() + .check_out(&repo, checkout_commit) + .expect("failed to check out root commit"); + repo + } + + pub fn load(user_settings: &UserSettings, wc_path: PathBuf) -> Arc { + let repo_path = wc_path.join(".jj"); + let store_path = repo_path.join("store"); + let store: Box; + if store_path.is_dir() { + store = Box::new(LocalStore::load(store_path)); + } else { + let mut store_file = File::open(store_path).unwrap(); + let mut buf = Vec::new(); + store_file.read_to_end(&mut buf).unwrap(); + let contents = String::from_utf8(buf).unwrap(); + assert!(contents.starts_with("git: ")); + let git_store_path_str = contents[5..].to_string(); + let git_store_path = PathBuf::from(git_store_path_str); + store = Box::new(GitStore::load(git_store_path)); + } + let store = StoreWrapper::new(store); + let repo_settings = user_settings.with_repo(&repo_path).unwrap(); + let working_copy = WorkingCopy::load(store.clone(), repo_path.join("working_copy")); + let view = ReadonlyView::load(store.clone(), repo_path.join("view")); + let repo = ReadonlyRepo { + repo_path, + wc_path, + store, + settings: repo_settings, + index: Mutex::new(None), + working_copy: Arc::new(Mutex::new(working_copy)), + view, + evolution: None, + }; + let mut repo = Arc::new(repo); + let repo_ref: &ReadonlyRepo = repo.as_ref(); + let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) }; + let evolution = ReadonlyEvolution::new(static_lifetime_repo); + ReadonlyRepo::init_cycles(&mut repo, evolution); + repo + } + + fn init_cycles(mut repo: &mut Arc, evolution: ReadonlyEvolution<'static>) { + let mut repo_ref_mut = Arc::get_mut(&mut repo).unwrap(); + repo_ref_mut.evolution = Some(evolution); + } + + pub fn repo_path(&self) -> &PathBuf { + &self.repo_path + } + + pub fn working_copy_path(&self) -> &PathBuf { + &self.wc_path + } + + pub fn index<'r>(&'r self) -> Arc> { + let mut locked_index = self.index.lock().unwrap(); + if locked_index.is_none() { + let repo_ref: &ReadonlyRepo = self; + let op_id = self.view.base_op_head_id().clone(); + let static_lifetime_repo: &'static ReadonlyRepo = + unsafe { std::mem::transmute(repo_ref) }; + locked_index.replace(Arc::new(Index::load( + static_lifetime_repo, + self.repo_path.join("index"), + op_id, + ))); + } + let index: Arc> = locked_index.as_ref().unwrap().clone(); + // cast to lifetime of self + let index: Arc> = unsafe { std::mem::transmute(index) }; + index + } + + pub fn reindex(&mut self) -> Arc { + Index::reinit(self.repo_path.join("index")); + { + let mut locked_index = self.index.lock().unwrap(); + locked_index.take(); + } + self.index() + } + + pub fn working_copy(&self) -> &Arc> { + &self.working_copy + } + + pub fn working_copy_locked(&self) -> MutexGuard { + self.working_copy.as_ref().lock().unwrap() + } + + pub fn store(&self) -> &Arc { + &self.store + } + + pub fn settings(&self) -> &RepoSettings { + &self.settings + } + + pub fn start_transaction(&self, description: &str) -> Transaction { + Transaction::new( + &self, + &self.view, + &self.evolution.as_ref().unwrap(), + description, + ) + } + + pub fn reload(&mut self) { + self.view.reload(); + let repo_ref: &ReadonlyRepo = self; + let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) }; + { + let mut locked_index = self.index.lock().unwrap(); + locked_index.take(); + } + self.evolution = Some(ReadonlyEvolution::new(static_lifetime_repo)); + } + + pub fn reload_at(&mut self, operation: &Operation) { + self.view.reload_at(operation); + let repo_ref: &ReadonlyRepo = self; + let static_lifetime_repo: &'static ReadonlyRepo = unsafe { std::mem::transmute(repo_ref) }; + { + let mut locked_index = self.index.lock().unwrap(); + locked_index.take(); + } + self.evolution = Some(ReadonlyEvolution::new(static_lifetime_repo)); + } +} + +impl Repo for ReadonlyRepo { + fn store(&self) -> &Arc { + &self.store + } + + fn view(&self) -> &dyn View { + &self.view + } + + fn evolution(&self) -> &dyn Evolution { + self.evolution.as_ref().unwrap() + } +} diff --git a/lib/src/repo_path.rs b/lib/src/repo_path.rs new file mode 100644 index 000000000..d6b332e85 --- /dev/null +++ b/lib/src/repo_path.rs @@ -0,0 +1,518 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::{Debug, Error, Formatter}; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct RepoPathComponent { + value: String, +} + +impl RepoPathComponent { + pub fn value(&self) -> &str { + &self.value + } +} + +impl From<&str> for RepoPathComponent { + fn from(value: &str) -> Self { + assert!(!value.contains('/')); + RepoPathComponent { + value: value.to_owned(), + } + } +} + +// Does not include a trailing slash +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct DirRepoPathComponent { + value: String, +} + +impl DirRepoPathComponent { + pub fn value(&self) -> &str { + &self.value + } +} + +impl From<&str> for DirRepoPathComponent { + fn from(value: &str) -> Self { + assert!(!value.contains('/')); + DirRepoPathComponent { + value: value.to_owned(), + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct FileRepoPathComponent { + value: String, +} + +impl FileRepoPathComponent { + pub fn value(&self) -> &str { + &self.value + } +} + +impl From<&str> for FileRepoPathComponent { + fn from(value: &str) -> Self { + assert!(!value.contains('/')); + assert!(!value.is_empty()); + FileRepoPathComponent { + value: value.to_owned(), + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct RepoPath { + dir: DirRepoPath, + basename: RepoPathComponent, +} + +impl Debug for RepoPath { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.write_fmt(format_args!("{:?}", &self.to_internal_string())) + } +} + +impl RepoPath { + pub fn root() -> Self { + RepoPath { + dir: DirRepoPath::root(), + basename: RepoPathComponent { + value: String::from(""), + }, + } + } + + pub fn new(dir: DirRepoPath, basename: RepoPathComponent) -> Self { + RepoPath { dir, basename } + } + + /// The full string form used internally, not for presenting to users (where + /// we may want to use the platform's separator). + pub fn to_internal_string(&self) -> String { + self.dir.to_internal_string() + self.basename.value() + } + + pub fn to_file_repo_path(&self) -> FileRepoPath { + FileRepoPath { + dir: self.dir.clone(), + basename: FileRepoPathComponent { + value: self.basename.value.clone(), + }, + } + } + pub fn to_dir_repo_path(&self) -> DirRepoPath { + if self.is_root() { + DirRepoPath::root() + } else { + self.dir.join(&DirRepoPathComponent { + value: self.basename.value.clone(), + }) + } + } + + pub fn is_root(&self) -> bool { + self.dir.is_root() && self.basename.value.is_empty() + } + + pub fn dir(&self) -> Option<&DirRepoPath> { + if self.is_root() { + None + } else { + Some(&self.dir) + } + } + + pub fn split(&self) -> Option<(&DirRepoPath, &RepoPathComponent)> { + if self.is_root() { + None + } else { + Some((&self.dir, &self.basename)) + } + } +} + +impl From<&str> for RepoPath { + fn from(value: &str) -> Self { + assert!(!value.ends_with('/')); + match value.rfind('/') { + None => RepoPath { + dir: DirRepoPath::root(), + basename: RepoPathComponent::from(value), + }, + Some(i) => RepoPath { + dir: DirRepoPath::from(&value[..=i]), + basename: RepoPathComponent::from(&value[i + 1..]), + }, + } + } +} + +// Includes a trailing slash +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DirRepoPath { + value: Vec, +} + +impl Debug for DirRepoPath { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.write_fmt(format_args!("{:?}", &self.to_internal_string())) + } +} + +impl DirRepoPath { + pub fn root() -> Self { + DirRepoPath { value: Vec::new() } + } + + pub fn is_root(&self) -> bool { + return self.components().is_empty(); + } + + /// The full string form used internally, not for presenting to users (where + /// we may want to use the platform's separator). + pub fn to_internal_string(&self) -> String { + let mut result = String::new(); + for component in &self.value { + result.push_str(component.value()); + result.push('/'); + } + result + } + + pub fn contains_dir(&self, other: &DirRepoPath) -> bool { + other.value.starts_with(&self.value) + } + + pub fn contains_file(&self, other: &FileRepoPath) -> bool { + other.dir.value.starts_with(&self.value) + } + + // TODO: consider making this return a Option or similar, + // where the slice would borrow from this instance. + pub fn parent(&self) -> Option { + match self.value.len() { + 0 => None, + n => Some(DirRepoPath { + value: self.value[..n - 1].to_vec(), + }), + } + } + + pub fn split(&self) -> Option<(DirRepoPath, DirRepoPathComponent)> { + match self.value.len() { + 0 => None, + n => Some(( + DirRepoPath { + value: self.value[..n - 1].to_vec(), + }, + self.value[n - 1].clone(), + )), + } + } + + pub fn components(&self) -> &Vec { + &self.value + } +} + +impl From<&str> for DirRepoPath { + fn from(value: &str) -> Self { + assert!(value.is_empty() || value.ends_with('/')); + let mut parts: Vec<&str> = value.split('/').collect(); + // remove the trailing empty string + parts.pop(); + + DirRepoPath { + value: parts + .iter() + .map(|x| DirRepoPathComponent { + value: x.to_string(), + }) + .collect(), + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct FileRepoPath { + dir: DirRepoPath, + basename: FileRepoPathComponent, +} + +impl Debug for FileRepoPath { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.write_fmt(format_args!("{:?}", &self.to_internal_string())) + } +} + +impl FileRepoPath { + /// The full string form used internally, not for presenting to users (where + /// we may want to use the platform's separator). + pub fn to_internal_string(&self) -> String { + self.dir.to_internal_string() + self.basename.value() + } + + pub fn dir(&self) -> &DirRepoPath { + &self.dir + } + + pub fn split(&self) -> (&DirRepoPath, &FileRepoPathComponent) { + (&self.dir, &self.basename) + } + + pub fn to_repo_path(&self) -> RepoPath { + RepoPath { + dir: self.dir.clone(), + basename: RepoPathComponent { + value: self.basename.value.clone(), + }, + } + } +} + +impl From<&str> for FileRepoPath { + fn from(value: &str) -> Self { + assert!(!value.ends_with('/')); + match value.rfind('/') { + None => FileRepoPath { + dir: DirRepoPath::root(), + basename: FileRepoPathComponent::from(value), + }, + Some(i) => FileRepoPath { + dir: DirRepoPath::from(&value[..=i]), + basename: FileRepoPathComponent::from(&value[i + 1..]), + }, + } + } +} + +pub trait RepoPathJoin { + type Result; + + fn join(&self, entry: &T) -> Self::Result; +} + +impl RepoPathJoin for DirRepoPath { + type Result = DirRepoPath; + + fn join(&self, entry: &DirRepoPathComponent) -> DirRepoPath { + let mut new_dir = self.value.clone(); + new_dir.push(entry.clone()); + DirRepoPath { value: new_dir } + } +} + +impl RepoPathJoin for DirRepoPath { + type Result = FileRepoPath; + + fn join(&self, entry: &FileRepoPathComponent) -> FileRepoPath { + FileRepoPath { + dir: self.clone(), + basename: entry.clone(), + } + } +} + +impl RepoPathJoin for DirRepoPath { + type Result = RepoPath; + + fn join(&self, entry: &RepoPathComponent) -> RepoPath { + RepoPath { + dir: self.clone(), + basename: entry.clone(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_root() { + assert_eq!(RepoPath::root().is_root(), true); + assert_eq!(RepoPath::from("").is_root(), true); + assert_eq!(RepoPath::from("foo").is_root(), false); + assert_eq!(DirRepoPath::root().is_root(), true); + assert_eq!(DirRepoPath::from("").is_root(), true); + assert_eq!(DirRepoPath::from("foo/").is_root(), false); + } + + #[test] + fn value() { + assert_eq!(RepoPath::root().to_internal_string(), ""); + assert_eq!(RepoPath::from("dir").to_internal_string(), "dir"); + assert_eq!(RepoPath::from("file").to_internal_string(), "file"); + assert_eq!(RepoPath::from("dir/file").to_internal_string(), "dir/file"); + assert_eq!(DirRepoPath::root().to_internal_string(), ""); + assert_eq!(DirRepoPath::from("dir/").to_internal_string(), "dir/"); + assert_eq!( + DirRepoPath::from("dir/subdir/").to_internal_string(), + "dir/subdir/" + ); + assert_eq!(FileRepoPath::from("file").to_internal_string(), "file"); + assert_eq!( + FileRepoPath::from("dir/file").to_internal_string(), + "dir/file" + ); + } + + #[test] + fn order() { + assert_eq!(DirRepoPath::root() < DirRepoPath::from("dir/"), true); + assert_eq!(DirRepoPath::from("dir/") < DirRepoPath::from("dirx/"), true); + // '#' < '/' + assert_eq!(DirRepoPath::from("dir/") < DirRepoPath::from("dir#/"), true); + assert_eq!( + DirRepoPath::from("dir/") < DirRepoPath::from("dir/sub/"), + true + ); + + assert_eq!( + FileRepoPath::from("abc") < FileRepoPath::from("dir/file"), + true + ); + assert_eq!( + FileRepoPath::from("dir") < FileRepoPath::from("dir/file"), + true + ); + assert_eq!( + FileRepoPath::from("dis") < FileRepoPath::from("dir/file"), + true + ); + assert_eq!( + FileRepoPath::from("xyz") < FileRepoPath::from("dir/file"), + true + ); + assert_eq!( + FileRepoPath::from("dir1/xyz") < FileRepoPath::from("dir2/abc"), + true + ); + } + + #[test] + fn join() { + let root = DirRepoPath::root(); + let dir_component = DirRepoPathComponent::from("dir"); + let subdir_component = DirRepoPathComponent::from("subdir"); + let file_component = FileRepoPathComponent::from("file"); + assert_eq!(root.join(&file_component), FileRepoPath::from("file")); + let dir = root.join(&dir_component); + assert_eq!(dir, DirRepoPath::from("dir/")); + assert_eq!(dir.join(&file_component), FileRepoPath::from("dir/file")); + let subdir = dir.join(&subdir_component); + assert_eq!(subdir, DirRepoPath::from("dir/subdir/")); + assert_eq!( + subdir.join(&file_component), + FileRepoPath::from("dir/subdir/file") + ); + } + + #[test] + fn parent() { + let root = DirRepoPath::root(); + let dir_component = DirRepoPathComponent::from("dir"); + let subdir_component = DirRepoPathComponent::from("subdir"); + + let dir = root.join(&dir_component); + let subdir = dir.join(&subdir_component); + + assert_eq!(root.parent(), None); + assert_eq!(dir.parent(), Some(root)); + assert_eq!(subdir.parent(), Some(dir)); + } + + #[test] + fn split_dir() { + let root = DirRepoPath::root(); + let dir_component = DirRepoPathComponent::from("dir"); + let subdir_component = DirRepoPathComponent::from("subdir"); + + let dir = root.join(&dir_component); + let subdir = dir.join(&subdir_component); + + assert_eq!(root.split(), None); + assert_eq!(dir.split(), Some((root, dir_component))); + assert_eq!(subdir.split(), Some((dir, subdir_component))); + } + + #[test] + fn split_file() { + let root = DirRepoPath::root(); + let dir_component = DirRepoPathComponent::from("dir"); + let file_component = FileRepoPathComponent::from("file"); + + let dir = root.join(&dir_component); + + assert_eq!( + root.join(&file_component).split(), + (&root, &file_component.clone()) + ); + assert_eq!(dir.join(&file_component).split(), (&dir, &file_component)); + } + + #[test] + fn dir() { + let root = DirRepoPath::root(); + let dir_component = DirRepoPathComponent::from("dir"); + let file_component = FileRepoPathComponent::from("file"); + + let dir = root.join(&dir_component); + + assert_eq!(root.join(&file_component).dir(), &root); + assert_eq!(dir.join(&file_component).dir(), &dir); + } + + #[test] + fn components() { + assert_eq!(DirRepoPath::root().components(), &vec![]); + assert_eq!( + DirRepoPath::from("dir/").components(), + &vec![DirRepoPathComponent::from("dir")] + ); + assert_eq!( + DirRepoPath::from("dir/subdir/").components(), + &vec![ + DirRepoPathComponent::from("dir"), + DirRepoPathComponent::from("subdir") + ] + ); + } + + #[test] + fn convert() { + assert_eq!(RepoPath::root().to_dir_repo_path(), DirRepoPath::root()); + assert_eq!( + RepoPath::from("dir").to_dir_repo_path(), + DirRepoPath::from("dir/") + ); + assert_eq!( + RepoPath::from("dir/subdir").to_dir_repo_path(), + DirRepoPath::from("dir/subdir/") + ); + assert_eq!( + RepoPath::from("file").to_file_repo_path(), + FileRepoPath::from("file") + ); + assert_eq!( + RepoPath::from("dir/file").to_file_repo_path(), + FileRepoPath::from("dir/file") + ); + } +} diff --git a/lib/src/rewrite.rs b/lib/src/rewrite.rs new file mode 100644 index 000000000..5146a1f11 --- /dev/null +++ b/lib/src/rewrite.rs @@ -0,0 +1,86 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::commit::Commit; +use crate::commit_builder::CommitBuilder; +use crate::dag_walk::common_ancestor; +use crate::repo_path::DirRepoPath; +use crate::settings::UserSettings; +use crate::store_wrapper::StoreWrapper; +use crate::transaction::Transaction; +use crate::tree::Tree; +use crate::trees::merge_trees; + +pub fn merge_commit_trees(store: &StoreWrapper, commits: &[Commit]) -> Tree { + if commits.is_empty() { + store + .get_tree(&DirRepoPath::root(), store.empty_tree_id()) + .unwrap() + } else { + let mut new_tree = commits[0].tree(); + for (i, other_commit) in commits.iter().enumerate().skip(1) { + let ancestor = common_ancestor(&commits[0..i], vec![other_commit]); + let new_tree_id = + merge_trees(&new_tree, &ancestor.tree(), &other_commit.tree()).unwrap(); + new_tree = store.get_tree(&DirRepoPath::root(), &new_tree_id).unwrap(); + } + new_tree + } +} + +pub fn rebase_commit( + settings: &UserSettings, + tx: &mut Transaction, + old_commit: &Commit, + new_parents: &[Commit], +) -> Commit { + let store = tx.store(); + let old_base_tree = merge_commit_trees(store, &old_commit.parents()); + let new_base_tree = merge_commit_trees(store, &new_parents); + // TODO: pass in labels for the merge parts + let new_tree_id = merge_trees(&new_base_tree, &old_base_tree, &old_commit.tree()).unwrap(); + let new_parent_ids = new_parents + .iter() + .map(|commit| commit.id().clone()) + .collect(); + CommitBuilder::for_rewrite_from(settings, store, &old_commit) + .set_parents(new_parent_ids) + .set_tree(new_tree_id) + .write_to_transaction(tx) +} + +pub fn back_out_commit( + settings: &UserSettings, + tx: &mut Transaction, + old_commit: &Commit, + new_parents: &[Commit], +) -> Commit { + let store = tx.store(); + let old_base_tree = merge_commit_trees(store, &old_commit.parents()); + let new_base_tree = merge_commit_trees(store, &new_parents); + // TODO: pass in labels for the merge parts + let new_tree_id = merge_trees(&new_base_tree, &old_commit.tree(), &old_base_tree).unwrap(); + let new_parent_ids = new_parents + .iter() + .map(|commit| commit.id().clone()) + .collect(); + // TODO: i18n the description based on repo language + CommitBuilder::for_new_commit(settings, store, new_tree_id) + .set_parents(new_parent_ids) + .set_description(format!( + "backout of commit {}", + hex::encode(&old_commit.id().0) + )) + .write_to_transaction(tx) +} diff --git a/lib/src/settings.rs b/lib/src/settings.rs new file mode 100644 index 000000000..4252e1780 --- /dev/null +++ b/lib/src/settings.rs @@ -0,0 +1,70 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::path::PathBuf; + +#[derive(Debug, Clone)] +pub struct UserSettings { + config: config::Config, +} + +#[derive(Debug, Clone)] +pub struct RepoSettings { + config: config::Config, +} + +impl UserSettings { + pub fn from_config(config: config::Config) -> Self { + UserSettings { config } + } + + pub fn for_user() -> Result { + let mut config = config::Config::new(); + + if let Some(home_dir) = dirs::home_dir() { + config.merge( + config::File::from(home_dir.join(".jjconfig")) + .required(false) + .format(config::FileFormat::Toml), + )?; + } + + Ok(UserSettings { config }) + } + + pub fn with_repo(&self, repo_path: &PathBuf) -> Result { + let mut config = self.config.clone(); + config.merge( + config::File::from(repo_path.join("config")) + .required(false) + .format(config::FileFormat::Toml), + )?; + + Ok(RepoSettings { config }) + } + + pub fn user_name(&self) -> String { + self.config.get_str("user.name").expect("no user.name set") + } + + pub fn user_email(&self) -> String { + self.config + .get_str("user.email") + .expect("no user.email set") + } + + pub fn config(&self) -> &config::Config { + &self.config + } +} diff --git a/lib/src/simple_op_store.rs b/lib/src/simple_op_store.rs new file mode 100644 index 000000000..2088fd8ce --- /dev/null +++ b/lib/src/simple_op_store.rs @@ -0,0 +1,208 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Debug; +use std::fs; +use std::fs::File; +use std::io::ErrorKind; +use std::io::Write; +use std::path::PathBuf; + +use blake2::{Blake2b, Digest}; +use protobuf::{Message, ProtobufError}; +use tempfile::{NamedTempFile, PersistError}; + +use crate::op_store::{ + OpStore, OpStoreError, OpStoreResult, Operation, OperationId, OperationMetadata, View, ViewId, +}; +use crate::store::{CommitId, MillisSinceEpoch, Timestamp}; + +impl From for OpStoreError { + fn from(err: std::io::Error) -> Self { + OpStoreError::Other(err.to_string()) + } +} + +impl From for OpStoreError { + fn from(err: PersistError) -> Self { + OpStoreError::Other(err.to_string()) + } +} + +impl From for OpStoreError { + fn from(err: ProtobufError) -> Self { + OpStoreError::Other(err.to_string()) + } +} + +#[derive(Debug)] +pub struct SimpleOpStore { + path: PathBuf, +} + +impl SimpleOpStore { + pub fn init(store_path: PathBuf) -> Self { + fs::create_dir(store_path.join("views")).unwrap(); + fs::create_dir(store_path.join("operations")).unwrap(); + Self::load(store_path) + } + + pub fn load(store_path: PathBuf) -> Self { + SimpleOpStore { path: store_path } + } + + fn view_path(&self, id: &ViewId) -> PathBuf { + self.path.join("views").join(id.hex()) + } + + fn operation_path(&self, id: &OperationId) -> PathBuf { + self.path.join("operations").join(id.hex()) + } +} + +fn not_found_to_store_error(err: std::io::Error) -> OpStoreError { + if err.kind() == ErrorKind::NotFound { + OpStoreError::NotFound + } else { + OpStoreError::from(err) + } +} + +impl OpStore for SimpleOpStore { + fn read_view(&self, id: &ViewId) -> OpStoreResult { + let path = self.view_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + + let proto: protos::op_store::View = protobuf::parse_from_reader(&mut file)?; + Ok(view_from_proto(&proto)) + } + + fn write_view(&self, view: &View) -> OpStoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + + let proto = view_to_proto(view); + let mut proto_bytes: Vec = Vec::new(); + proto.write_to_writer(&mut proto_bytes)?; + + temp_file.as_file().write_all(&proto_bytes)?; + + let id = ViewId(Blake2b::digest(&proto_bytes).to_vec()); + + temp_file.persist(self.view_path(&id))?; + Ok(id) + } + + fn read_operation(&self, id: &OperationId) -> OpStoreResult { + let path = self.operation_path(&id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + + let proto: protos::op_store::Operation = protobuf::parse_from_reader(&mut file)?; + Ok(operation_from_proto(&proto)) + } + + fn write_operation(&self, operation: &Operation) -> OpStoreResult { + let temp_file = NamedTempFile::new_in(&self.path)?; + + let proto = operation_to_proto(operation); + let mut proto_bytes: Vec = Vec::new(); + proto.write_to_writer(&mut proto_bytes)?; + + temp_file.as_file().write_all(&proto_bytes)?; + + let id = OperationId(Blake2b::digest(&proto_bytes).to_vec()); + + temp_file.persist(self.operation_path(&id))?; + Ok(id) + } +} + +fn timestamp_to_proto(timestamp: &Timestamp) -> protos::op_store::Timestamp { + let mut proto = protos::op_store::Timestamp::new(); + proto.set_millis_since_epoch(timestamp.timestamp.0); + proto.set_tz_offset(timestamp.tz_offset); + proto +} + +fn timestamp_from_proto(proto: &protos::op_store::Timestamp) -> Timestamp { + Timestamp { + timestamp: MillisSinceEpoch(proto.millis_since_epoch), + tz_offset: proto.tz_offset, + } +} + +fn operation_metadata_to_proto( + metadata: &OperationMetadata, +) -> protos::op_store::OperationMetadata { + let mut proto = protos::op_store::OperationMetadata::new(); + proto.set_start_time(timestamp_to_proto(&metadata.start_time)); + proto.set_end_time(timestamp_to_proto(&metadata.end_time)); + proto.set_description(metadata.description.clone()); + proto.set_hostname(metadata.hostname.clone()); + proto.set_username(metadata.username.clone()); + proto +} + +fn operation_metadata_from_proto(proto: &protos::op_store::OperationMetadata) -> OperationMetadata { + let start_time = timestamp_from_proto(proto.get_start_time()); + let end_time = timestamp_from_proto(proto.get_end_time()); + let description = proto.get_description().to_owned(); + let hostname = proto.get_hostname().to_owned(); + let username = proto.get_username().to_owned(); + OperationMetadata { + start_time, + end_time, + description, + hostname, + username, + } +} + +fn operation_to_proto(operation: &Operation) -> protos::op_store::Operation { + let mut proto = protos::op_store::Operation::new(); + proto.set_view_id(operation.view_id.0.clone()); + for parent in &operation.parents { + proto.parents.push(parent.0.clone()); + } + proto.set_metadata(operation_metadata_to_proto(&operation.metadata)); + proto +} + +fn operation_from_proto(proto: &protos::op_store::Operation) -> Operation { + let operation_id_from_proto = |parent: &Vec| OperationId(parent.clone()); + let parents = proto.parents.iter().map(operation_id_from_proto).collect(); + let view_id = ViewId(proto.view_id.to_vec()); + let metadata = operation_metadata_from_proto(proto.get_metadata()); + Operation { + view_id, + parents, + metadata, + } +} + +fn view_to_proto(view: &View) -> protos::op_store::View { + let mut proto = protos::op_store::View::new(); + proto.checkout = view.checkout.0.clone(); + for head_id in &view.head_ids { + proto.head_ids.push(head_id.0.clone()); + } + proto +} + +fn view_from_proto(proto: &protos::op_store::View) -> View { + let mut view = View::new(CommitId(proto.checkout.clone())); + for head_id_bytes in proto.head_ids.iter() { + view.head_ids.insert(CommitId(head_id_bytes.to_vec())); + } + view +} diff --git a/lib/src/store.rs b/lib/src/store.rs new file mode 100644 index 000000000..f06248dc7 --- /dev/null +++ b/lib/src/store.rs @@ -0,0 +1,354 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::BTreeMap; +use std::fmt::{Debug, Error, Formatter}; +use std::io::Read; +use std::result::Result; +use std::vec::Vec; + +use crate::repo_path::DirRepoPath; +use crate::repo_path::FileRepoPath; +use std::borrow::Borrow; +use thiserror::Error; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct CommitId(pub Vec); + +impl Debug for CommitId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("CommitId").field(&self.hex()).finish() + } +} + +impl CommitId { + pub fn from_hex(hex: &str) -> Self { + CommitId(hex::decode(hex).unwrap()) + } + + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct ChangeId(pub Vec); + +impl Debug for ChangeId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("ChangeId").field(&self.hex()).finish() + } +} + +impl ChangeId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct TreeId(pub Vec); + +impl Debug for TreeId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("TreeId").field(&self.hex()).finish() + } +} + +impl TreeId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct FileId(pub Vec); + +impl Debug for FileId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("FileId").field(&self.hex()).finish() + } +} + +impl FileId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct SymlinkId(pub Vec); + +impl Debug for SymlinkId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("SymlinkId").field(&self.hex()).finish() + } +} + +impl SymlinkId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct ConflictId(pub Vec); + +impl Debug for ConflictId { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_tuple("ConflictId").field(&self.hex()).finish() + } +} + +impl ConflictId { + pub fn hex(&self) -> String { + hex::encode(&self.0) + } +} + +pub enum Phase { + Public, + Draft, +} + +#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)] +pub struct MillisSinceEpoch(pub u64); + +#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)] +pub struct Timestamp { + pub timestamp: MillisSinceEpoch, + // time zone offset in minutes + pub tz_offset: i32, +} + +impl Timestamp { + pub fn now() -> Self { + let now = chrono::offset::Local::now(); + Self { + timestamp: MillisSinceEpoch(now.timestamp_millis() as u64), + tz_offset: now.offset().local_minus_utc() / 60, + } + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Signature { + pub name: String, + pub email: String, + pub timestamp: Timestamp, +} + +#[derive(Debug, Clone)] +pub struct Commit { + pub parents: Vec, + pub predecessors: Vec, + pub root_tree: TreeId, + pub change_id: ChangeId, + pub description: String, + pub author: Signature, + pub committer: Signature, + pub is_open: bool, + pub is_pruned: bool, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConflictPart { + // TODO: Store e.g. CommitId here too? Labels (theirs/ours/base)? Would those still be + // useful e.g. after rebasing this conflict? + pub value: TreeValue, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Conflict { + // A conflict is represented by a list of positive and negative states that need to be applied. + // In a simple 3-way merge of B and C with merge base A, the conflict will be { add: [B, C], + // remove: [A] }. Also note that a conflict of the form { add: [A], remove: [] } is the + // same as non-conflict A. + pub removes: Vec, + pub adds: Vec, +} + +impl Conflict { + // Returns (left,base,right) if this conflict is a 3-way conflict + pub fn to_three_way( + &self, + ) -> Option<( + Option, + Option, + Option, + )> { + if self.removes.len() == 1 && self.adds.len() == 2 { + // Regular (modify/modify) 3-way conflict + Some(( + Some(self.adds[0].clone()), + Some(self.removes[0].clone()), + Some(self.adds[1].clone()), + )) + } else if self.removes.is_empty() && self.adds.len() == 2 { + // Add/add conflict + Some((Some(self.adds[0].clone()), None, Some(self.adds[1].clone()))) + } else if self.removes.len() == 1 && self.adds.len() == 1 { + // Modify/delete conflict + Some(( + Some(self.adds[0].clone()), + Some(self.removes[0].clone()), + None, + )) + } else { + None + } + } +} + +impl Default for Conflict { + fn default() -> Self { + Conflict { + removes: Default::default(), + adds: Default::default(), + } + } +} + +#[derive(Debug, Error, PartialEq, Eq)] +pub enum StoreError { + #[error("Object not found")] + NotFound, + #[error("Error: {0}")] + Other(String), +} + +pub type StoreResult = Result; + +#[derive(Debug, PartialEq, Eq, Clone, Hash)] +pub enum TreeValue { + Normal { id: FileId, executable: bool }, + Symlink(SymlinkId), + Tree(TreeId), + GitSubmodule(CommitId), + Conflict(ConflictId), +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct TreeEntry<'a> { + name: &'a str, + value: &'a TreeValue, +} + +impl<'a> TreeEntry<'a> { + pub fn new(name: &'a str, value: &'a TreeValue) -> Self { + TreeEntry { name, value } + } + + pub fn name(&self) -> &'a str { + &self.name + } + + pub fn value(&self) -> &'a TreeValue { + &self.value + } +} + +pub struct TreeEntriesIter<'a> { + iter: std::collections::btree_map::Iter<'a, String, TreeValue>, +} + +impl<'a> Iterator for TreeEntriesIter<'a> { + type Item = TreeEntry<'a>; + + fn next(&mut self) -> Option { + self.iter + .next() + .map(|(name, value)| TreeEntry { name, value }) + } +} + +#[derive(Debug, Clone)] +pub struct Tree { + entries: BTreeMap, +} + +impl Default for Tree { + fn default() -> Self { + Self { + entries: BTreeMap::new(), + } + } +} + +impl Tree { + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + pub fn entries(&self) -> TreeEntriesIter { + TreeEntriesIter { + iter: self.entries.iter(), + } + } + + pub fn set(&mut self, name: String, value: TreeValue) { + self.entries.insert(name, value); + } + + pub fn remove(&mut self, name: &N) + where + N: Borrow + ?Sized, + { + self.entries.remove(name.borrow()); + } + + pub fn entry(&self, name: &N) -> Option + where + N: Borrow + ?Sized, + { + self.entries + .get_key_value(name.borrow()) + .map(|(name, value)| TreeEntry { name, value }) + } + + pub fn value(&self, name: &N) -> Option<&TreeValue> + where + N: Borrow + ?Sized, + { + self.entries.get(name.borrow()) + } +} + +pub trait Store: Send + Sync + Debug { + fn hash_length(&self) -> usize; + + fn read_file(&self, path: &FileRepoPath, id: &FileId) -> StoreResult>; + + fn write_file(&self, path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult; + + fn read_symlink(&self, path: &FileRepoPath, id: &SymlinkId) -> StoreResult; + + fn write_symlink(&self, path: &FileRepoPath, target: &str) -> StoreResult; + + fn empty_tree_id(&self) -> &TreeId; + + fn read_tree(&self, path: &DirRepoPath, id: &TreeId) -> StoreResult; + + fn write_tree(&self, path: &DirRepoPath, contents: &Tree) -> StoreResult; + + fn read_commit(&self, id: &CommitId) -> StoreResult; + + fn write_commit(&self, contents: &Commit) -> StoreResult; + + // TODO: Pass in the paths here too even though they are unused, just like for + // files and trees? + fn read_conflict(&self, id: &ConflictId) -> StoreResult; + + fn write_conflict(&self, contents: &Conflict) -> StoreResult; +} diff --git a/lib/src/store_wrapper.rs b/lib/src/store_wrapper.rs new file mode 100644 index 000000000..ddf071f2f --- /dev/null +++ b/lib/src/store_wrapper.rs @@ -0,0 +1,199 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::sync::{Arc, RwLock, Weak}; + +use crate::commit::Commit; +use crate::repo_path::{DirRepoPath, FileRepoPath}; +use crate::store; +use crate::store::{ + ChangeId, CommitId, Conflict, ConflictId, FileId, MillisSinceEpoch, Signature, Store, + StoreResult, SymlinkId, Timestamp, TreeId, +}; +use crate::tree::Tree; +use crate::tree_builder::TreeBuilder; +use std::io::Read; + +/// Wraps the low-level store and makes it return more convenient types. Also +/// adds the root commit and adds caching. +/// TODO: Come up with a better name, possibly by renaming the current Store +/// trait to something else. +#[derive(Debug)] +pub struct StoreWrapper { + weak_self: Option>, + store: Box, + root_commit_id: CommitId, + commit_cache: RwLock>>, + tree_cache: RwLock>>, +} + +impl StoreWrapper { + pub fn new(store: Box) -> Arc { + let root_commit_id = CommitId(vec![0; store.hash_length()]); + let mut wrapper = Arc::new(StoreWrapper { + weak_self: None, + store, + root_commit_id, + commit_cache: Default::default(), + tree_cache: Default::default(), + }); + let weak_self = Arc::downgrade(&wrapper); + let mut ref_mut = unsafe { Arc::get_mut_unchecked(&mut wrapper) }; + ref_mut.weak_self = Some(weak_self); + wrapper + } + + pub fn hash_length(&self) -> usize { + self.store.hash_length() + } + + pub fn empty_tree_id(&self) -> &TreeId { + self.store.empty_tree_id() + } + + pub fn root_commit_id(&self) -> &CommitId { + &self.root_commit_id + } + + pub fn root_commit(&self) -> Commit { + self.get_commit(&self.root_commit_id).unwrap() + } + + pub fn get_commit(&self, id: &CommitId) -> StoreResult { + let data = self.get_store_commit(id)?; + Ok(Commit::new( + self.weak_self.as_ref().unwrap().upgrade().unwrap(), + id.clone(), + data, + )) + } + + fn make_root_commit(&self) -> store::Commit { + let timestamp = Timestamp { + timestamp: MillisSinceEpoch(0), + tz_offset: 0, + }; + let signature = Signature { + name: String::new(), + email: String::new(), + timestamp, + }; + let change_id = ChangeId(vec![0; 16]); + store::Commit { + parents: vec![], + predecessors: vec![], + root_tree: self.store.empty_tree_id().clone(), + change_id, + description: String::new(), + author: signature.clone(), + committer: signature, + is_open: false, + is_pruned: false, + } + } + + fn get_store_commit(&self, id: &CommitId) -> StoreResult> { + { + let read_locked_cached = self.commit_cache.read().unwrap(); + if let Some(data) = read_locked_cached.get(id).cloned() { + return Ok(data); + } + } + let commit = if id == self.root_commit_id() { + self.make_root_commit() + } else { + self.store.read_commit(id)? + }; + let data = Arc::new(commit); + let mut write_locked_cache = self.commit_cache.write().unwrap(); + write_locked_cache.insert(id.clone(), data.clone()); + Ok(data) + } + + pub fn write_commit(&self, commit: store::Commit) -> Commit { + let commit_id = self.store.write_commit(&commit).unwrap(); + let data = Arc::new(commit); + { + let mut write_locked_cache = self.commit_cache.write().unwrap(); + write_locked_cache.insert(commit_id.clone(), data.clone()); + } + let commit = Commit::new( + self.weak_self.as_ref().unwrap().upgrade().unwrap(), + commit_id, + data, + ); + commit + } + + pub fn get_tree(&self, dir: &DirRepoPath, id: &TreeId) -> StoreResult { + let data = self.get_store_tree(dir, id)?; + Ok(Tree::new( + self.weak_self.as_ref().unwrap().upgrade().unwrap(), + dir.clone(), + id.clone(), + data, + )) + } + + fn get_store_tree(&self, dir: &DirRepoPath, id: &TreeId) -> StoreResult> { + let key = (dir.clone(), id.clone()); + { + let read_locked_cache = self.tree_cache.read().unwrap(); + if let Some(data) = read_locked_cache.get(&key).cloned() { + return Ok(data); + } + } + let data = Arc::new(self.store.read_tree(dir, id)?); + let mut write_locked_cache = self.tree_cache.write().unwrap(); + write_locked_cache.insert(key, data.clone()); + Ok(data) + } + + pub fn write_tree(&self, path: &DirRepoPath, contents: &store::Tree) -> StoreResult { + // TODO: This should also do caching like write_commit does. + self.store.write_tree(path, contents) + } + + pub fn read_file(&self, path: &FileRepoPath, id: &FileId) -> StoreResult> { + self.store.read_file(path, id) + } + + pub fn write_file(&self, path: &FileRepoPath, contents: &mut dyn Read) -> StoreResult { + self.store.write_file(path, contents) + } + + pub fn read_symlink(&self, path: &FileRepoPath, id: &SymlinkId) -> StoreResult { + self.store.read_symlink(path, id) + } + + pub fn write_symlink(&self, path: &FileRepoPath, contents: &str) -> StoreResult { + self.store.write_symlink(path, contents) + } + + pub fn read_conflict(&self, id: &ConflictId) -> StoreResult { + self.store.read_conflict(id) + } + + pub fn write_conflict(&self, contents: &Conflict) -> StoreResult { + self.store.write_conflict(contents) + } + + pub fn tree_builder(&self, base_tree_id: TreeId) -> TreeBuilder { + TreeBuilder::new( + self.weak_self.as_ref().unwrap().upgrade().unwrap(), + base_tree_id, + ) + } +} diff --git a/lib/src/testutils.rs b/lib/src/testutils.rs new file mode 100644 index 000000000..49b448339 --- /dev/null +++ b/lib/src/testutils.rs @@ -0,0 +1,123 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fs; +use std::fs::OpenOptions; +use std::io::Write; +use std::sync::Arc; + +use tempfile::TempDir; + +use crate::commit_builder::CommitBuilder; +use crate::repo::ReadonlyRepo; +use crate::repo_path::{DirRepoPath, FileRepoPath}; +use crate::settings::UserSettings; +use crate::store::{FileId, TreeId, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::tree::Tree; +use crate::tree_builder::TreeBuilder; + +pub fn user_settings() -> UserSettings { + let mut config = config::Config::new(); + config.set("user.name", "Test User").unwrap(); + config.set("user.email", "test.user@example.com").unwrap(); + UserSettings::from_config(config) +} + +pub fn init_repo(settings: &UserSettings, use_git: bool) -> (TempDir, Arc) { + let temp_dir = tempfile::tempdir().unwrap(); + + let wc_path = temp_dir.path().join("repo"); + fs::create_dir(&wc_path).unwrap(); + + let repo = if use_git { + let git_path = temp_dir.path().join("git-repo"); + git2::Repository::init(&git_path).unwrap(); + ReadonlyRepo::init_git(&settings, wc_path, git_path) + } else { + ReadonlyRepo::init_local(&settings, wc_path) + }; + + (temp_dir, repo) +} + +pub fn write_file(store: &StoreWrapper, path: &FileRepoPath, contents: &str) -> FileId { + store.write_file(path, &mut contents.as_bytes()).unwrap() +} + +pub fn write_normal_file(tree_builder: &mut TreeBuilder, path: &FileRepoPath, contents: &str) { + let id = write_file(tree_builder.repo(), path, contents); + tree_builder.set( + path.to_repo_path(), + TreeValue::Normal { + id, + executable: false, + }, + ); +} + +pub fn write_executable_file(tree_builder: &mut TreeBuilder, path: &FileRepoPath, contents: &str) { + let id = write_file(tree_builder.repo(), path, contents); + tree_builder.set( + path.to_repo_path(), + TreeValue::Normal { + id, + executable: true, + }, + ); +} + +pub fn write_symlink(tree_builder: &mut TreeBuilder, path: &FileRepoPath, target: &str) { + let id = tree_builder.repo().write_symlink(path, target).unwrap(); + tree_builder.set(path.to_repo_path(), TreeValue::Symlink(id)); +} + +pub fn create_tree(repo: &ReadonlyRepo, path_contents: &[(&FileRepoPath, &str)]) -> Tree { + let store = repo.store(); + let mut tree_builder = store.tree_builder(store.empty_tree_id().clone()); + for (path, contents) in path_contents { + write_normal_file(&mut tree_builder, path, contents); + } + let id = tree_builder.write_tree(); + store.get_tree(&DirRepoPath::root(), &id).unwrap() +} + +#[must_use] +pub fn create_random_tree(repo: &ReadonlyRepo) -> TreeId { + let mut tree_builder = repo + .store() + .tree_builder(repo.store().empty_tree_id().clone()); + let number = rand::random::(); + let path = FileRepoPath::from(format!("file{}", number).as_str()); + write_normal_file(&mut tree_builder, &path, "contents"); + tree_builder.write_tree() +} + +#[must_use] +pub fn create_random_commit(settings: &UserSettings, repo: &ReadonlyRepo) -> CommitBuilder { + let tree_id = create_random_tree(repo); + let number = rand::random::(); + CommitBuilder::for_new_commit(settings, repo.store(), tree_id) + .set_description(format!("random commit {}", number)) +} + +pub fn write_working_copy_file(repo: &ReadonlyRepo, path: &FileRepoPath, contents: &str) { + let mut file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(repo.working_copy_path().join(path.to_internal_string())) + .unwrap(); + file.write_all(contents.as_bytes()).unwrap(); +} diff --git a/lib/src/transaction.rs b/lib/src/transaction.rs new file mode 100644 index 000000000..3de8c6686 --- /dev/null +++ b/lib/src/transaction.rs @@ -0,0 +1,226 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::commit::Commit; +use crate::commit_builder::CommitBuilder; +use crate::conflicts; +use crate::evolution::{Evolution, MutableEvolution, ReadonlyEvolution}; +use crate::op_store; +use crate::operation::Operation; +use crate::repo::{ReadonlyRepo, Repo}; +use crate::settings::UserSettings; +use crate::store; +use crate::store::{CommitId, Timestamp, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::view::{MutableView, ReadonlyView, View}; +use std::io::Cursor; +use std::ops::Deref; +use std::sync::Arc; + +pub struct Transaction<'r> { + repo: Option>>, + description: String, + start_time: Timestamp, + closed: bool, +} + +pub struct MutableRepo<'r> { + repo: &'r ReadonlyRepo, + view: Option, + evolution: Option>, +} + +impl<'r> Transaction<'r> { + pub fn new( + repo: &'r ReadonlyRepo, + view: &ReadonlyView, + evolution: &ReadonlyEvolution<'r>, + description: &str, + ) -> Transaction<'r> { + let mut_view = view.start_modification(); + let internal = Arc::new(MutableRepo { + repo, + view: Some(mut_view), + evolution: None, + }); + let repo_ref: &MutableRepo = internal.as_ref(); + let static_lifetime_repo: &'static MutableRepo = unsafe { std::mem::transmute(repo_ref) }; + let mut tx = Transaction { + repo: Some(internal), + description: description.to_owned(), + start_time: Timestamp::now(), + closed: false, + }; + let mut_evolution: MutableEvolution<'_, '_> = + evolution.start_modification(static_lifetime_repo); + let static_lifetime_mut_evolution: MutableEvolution<'static, 'static> = + unsafe { std::mem::transmute(mut_evolution) }; + Arc::get_mut(tx.repo.as_mut().unwrap()).unwrap().evolution = + Some(static_lifetime_mut_evolution); + tx + } + + pub fn base_repo(&self) -> &'r ReadonlyRepo { + self.repo.as_ref().unwrap().repo + } + + pub fn store(&self) -> &Arc { + self.repo.as_ref().unwrap().repo.store() + } + + pub fn as_repo<'a: 'r>(&'a self) -> &(impl Repo + 'a) { + self.repo.as_ref().unwrap().deref() + } + + pub fn as_repo_mut(&mut self) -> &mut MutableRepo<'r> { + Arc::get_mut(self.repo.as_mut().unwrap()).unwrap() + } + + pub fn write_commit(&mut self, commit: store::Commit) -> Commit { + let commit = self + .repo + .as_ref() + .unwrap() + .repo + .store() + .write_commit(commit); + self.add_head(&commit); + commit + } + + pub fn check_out(&mut self, settings: &UserSettings, commit: &Commit) -> Commit { + let current_checkout_id = self.as_repo().view().checkout().clone(); + let current_checkout = self.store().get_commit(¤t_checkout_id).unwrap(); + assert!(current_checkout.is_open(), "current checkout is closed"); + if current_checkout.is_empty() + && !(current_checkout.is_pruned() + || self.as_repo().evolution().is_obsolete(¤t_checkout_id)) + { + // Prune the checkout we're leaving if it's empty. + // TODO: Also prune it if the only changes are conflicts that got materialized. + CommitBuilder::for_rewrite_from(settings, self.store(), ¤t_checkout) + .set_pruned(true) + .write_to_transaction(self); + } + let store = self.store(); + // Create a new tree with any conflicts resolved. + let mut tree_builder = store.tree_builder(commit.tree().id().clone()); + for (path, conflict_id) in commit.tree().conflicts() { + let conflict = store.read_conflict(&conflict_id).unwrap(); + let mut buf = vec![]; + conflicts::materialize_conflict(store, &path, &conflict, &mut buf); + let file_id = store + .write_file(&path.to_file_repo_path(), &mut Cursor::new(&buf)) + .unwrap(); + tree_builder.set( + path, + TreeValue::Normal { + id: file_id, + executable: false, + }, + ); + } + let tree_id = tree_builder.write_tree(); + let open_commit; + if !commit.is_open() { + // If the commit is closed, create a new open commit on top + open_commit = CommitBuilder::for_open_commit( + settings, + self.store(), + commit.id().clone(), + tree_id, + ) + .write_to_transaction(self); + } else if &tree_id != commit.tree().id() { + // If the commit is open but had conflicts, create a successor with the + // conflicts materialized. + open_commit = CommitBuilder::for_rewrite_from(settings, self.store(), commit) + .set_tree(tree_id) + .write_to_transaction(self); + } else { + // Otherwise the commit was open and didn't have any conflicts, so just use + // that commit as is. + open_commit = commit.clone(); + } + let id = open_commit.id().clone(); + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.view.as_mut().unwrap().set_checkout(id); + open_commit + } + + pub fn set_checkout(&mut self, id: CommitId) { + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.view.as_mut().unwrap().set_checkout(id); + } + + pub fn add_head(&mut self, head: &Commit) { + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.view.as_mut().unwrap().add_head(head); + mut_repo.evolution.as_mut().unwrap().invalidate(); + } + + pub fn remove_head(&mut self, head: &Commit) { + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.view.as_mut().unwrap().remove_head(head); + mut_repo.evolution.as_mut().unwrap().invalidate(); + } + + pub fn set_view(&mut self, data: op_store::View) { + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.view.as_mut().unwrap().set_view(data); + mut_repo.evolution.as_mut().unwrap().invalidate(); + } + + pub fn commit(mut self) -> Operation { + let mut_repo = Arc::get_mut(self.repo.as_mut().unwrap()).unwrap(); + mut_repo.evolution = None; + let mut internal = Arc::try_unwrap(self.repo.take().unwrap()).ok().unwrap(); + let view = internal.view.take().unwrap(); + let operation = view.save(self.description.clone(), self.start_time.clone()); + self.closed = true; + operation + } + + pub fn discard(mut self) { + self.closed = true; + } +} + +impl<'r> Drop for Transaction<'r> { + fn drop(&mut self) { + if !std::thread::panicking() { + assert!(self.closed); + } + } +} + +impl<'r> Repo for MutableRepo<'r> { + fn store(&self) -> &Arc { + self.repo.store() + } + + fn view(&self) -> &dyn View { + self.view.as_ref().unwrap() + } + + fn evolution(&self) -> &dyn Evolution { + self.evolution.as_ref().unwrap() + } +} + +impl MutableRepo<'_> { + pub fn evolution_mut(&mut self) -> &MutableEvolution { + self.evolution.as_mut().unwrap() + } +} diff --git a/lib/src/tree.rs b/lib/src/tree.rs new file mode 100644 index 000000000..35e03acc4 --- /dev/null +++ b/lib/src/tree.rs @@ -0,0 +1,199 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::borrow::Borrow; +use std::fmt::{Debug, Error, Formatter}; +use std::sync::Arc; + +use crate::matchers::AlwaysMatcher; +use crate::repo_path::{DirRepoPath, DirRepoPathComponent, FileRepoPath, RepoPath, RepoPathJoin}; +use crate::store; +use crate::store::{ConflictId, TreeEntriesIter, TreeEntry, TreeId, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::trees::{recursive_tree_diff, walk_entries, TreeValueDiff}; + +#[derive(Clone)] +pub struct Tree { + store: Arc, + dir: DirRepoPath, + id: TreeId, + data: Arc, +} + +impl Debug for Tree { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + f.debug_struct("Tree") + .field("dir", &self.dir) + .field("id", &self.id) + .finish() + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct DiffSummary { + pub modified: Vec, + pub added: Vec, + pub removed: Vec, +} + +impl Tree { + pub fn new( + store: Arc, + dir: DirRepoPath, + id: TreeId, + data: Arc, + ) -> Self { + Tree { + store, + dir, + id, + data, + } + } + + pub fn null(store: Arc, dir: DirRepoPath) -> Self { + Tree { + store, + dir, + id: TreeId(vec![]), + data: Arc::new(store::Tree::default()), + } + } + + pub fn store(&self) -> &Arc { + &self.store + } + + pub fn dir(&self) -> &DirRepoPath { + &self.dir + } + + pub fn id(&self) -> &TreeId { + &self.id + } + + pub fn data(&self) -> &store::Tree { + &self.data + } + + pub fn entries(&self) -> TreeEntriesIter { + self.data.entries() + } + + pub fn entry(&self, basename: &N) -> Option + where + N: Borrow + ?Sized, + { + self.data.entry(basename) + } + + pub fn value(&self, basename: &N) -> Option<&TreeValue> + where + N: Borrow + ?Sized, + { + self.data.value(basename) + } + + pub fn path_value(&self, path: &RepoPath) -> Option { + assert_eq!(self.dir(), &DirRepoPath::root()); + match path.split() { + Some((dir, basename)) => self + .sub_tree_recursive(dir.components()) + .and_then(|tree| tree.data.value(basename.value()).cloned()), + None => Some(TreeValue::Tree(self.id.clone())), + } + } + + pub fn sub_tree(&self, name: &DirRepoPathComponent) -> Option { + self.data + .value(name.value()) + .and_then(|sub_tree| match sub_tree { + TreeValue::Tree(sub_tree_id) => { + let subdir = self.dir.join(name); + Some(self.store.get_tree(&subdir, sub_tree_id).unwrap()) + } + _ => None, + }) + } + + pub fn known_sub_tree(&self, name: &DirRepoPathComponent, id: &TreeId) -> Tree { + let subdir = self.dir.join(name); + self.store.get_tree(&subdir, id).unwrap() + } + + fn sub_tree_recursive(&self, components: &[DirRepoPathComponent]) -> Option { + if components.is_empty() { + // TODO: It would be nice to be able to return a reference here, but + // then we would have to figure out how to share Tree instances + // across threads. + Some(Tree { + store: self.store.clone(), + dir: self.dir.clone(), + id: self.id.clone(), + data: self.data.clone(), + }) + } else { + match self.data.entry(components[0].value()) { + None => None, + Some(entry) => match entry.value() { + TreeValue::Tree(sub_tree_id) => { + let sub_tree = self + .known_sub_tree(&DirRepoPathComponent::from(entry.name()), sub_tree_id); + sub_tree.sub_tree_recursive(&components[1..]) + } + _ => None, + }, + } + } + } + + pub fn diff(&self, other: &Tree, callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff)) { + recursive_tree_diff(self.clone(), other.clone(), &AlwaysMatcher {}, callback); + } + + pub fn diff_summary(&self, other: &Tree) -> DiffSummary { + let mut modified = vec![]; + let mut added = vec![]; + let mut removed = vec![]; + self.diff(other, &mut |file, diff| match diff { + TreeValueDiff::Modified(_, _) => modified.push(file.clone()), + TreeValueDiff::Added(_) => added.push(file.clone()), + TreeValueDiff::Removed(_) => removed.push(file.clone()), + }); + modified.sort(); + added.sort(); + removed.sort(); + DiffSummary { + modified, + added, + removed, + } + } + + pub fn has_conflict(&self) -> bool { + !self.conflicts().is_empty() + } + + pub fn conflicts(&self) -> Vec<(RepoPath, ConflictId)> { + let mut conflicts = vec![]; + walk_entries(&self, &mut |name, value| -> Result<(), ()> { + if let TreeValue::Conflict(id) = value { + conflicts.push((name.clone(), id.clone())); + } + Ok(()) + }) + .unwrap(); + conflicts + } +} diff --git a/lib/src/tree_builder.rs b/lib/src/tree_builder.rs new file mode 100644 index 000000000..20953918c --- /dev/null +++ b/lib/src/tree_builder.rs @@ -0,0 +1,148 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::{BTreeMap, HashSet}; + +use crate::repo_path::{DirRepoPath, RepoPath, RepoPathJoin}; +use crate::store; +use crate::store::{TreeId, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::tree::Tree; +use std::sync::Arc; + +#[derive(Debug)] +enum Override { + Tombstone, + Replace(TreeValue), +} + +#[derive(Debug)] +pub struct TreeBuilder { + store: Arc, + base_tree_id: TreeId, + overrides: BTreeMap, +} + +impl TreeBuilder { + pub fn new(store: Arc, base_tree_id: TreeId) -> TreeBuilder { + let overrides = BTreeMap::new(); + TreeBuilder { + store, + base_tree_id, + overrides, + } + } + + pub fn repo(&self) -> &StoreWrapper { + self.store.as_ref() + } + + pub fn set(&mut self, path: RepoPath, value: TreeValue) { + self.overrides.insert(path, Override::Replace(value)); + } + + pub fn remove(&mut self, path: RepoPath) { + self.overrides.insert(path, Override::Tombstone); + } + + pub fn write_tree(mut self) -> TreeId { + let mut trees_to_write = self.get_base_trees(); + if trees_to_write.is_empty() { + return self.base_tree_id; + } + + // Update entries in parent trees for file overrides + for (path, file_override) in self.overrides { + if let Some((dir, basename)) = path.split() { + let tree = trees_to_write.get_mut(dir).unwrap(); + match file_override { + Override::Replace(value) => { + tree.set(basename.value().to_string(), value); + } + Override::Tombstone => { + tree.remove(basename.value()); + } + } + } + } + + // Write trees level by level, starting with trees without children. + let store = self.store.as_ref(); + loop { + let mut dirs_to_write: HashSet = + trees_to_write.keys().cloned().into_iter().collect(); + + for dir in trees_to_write.keys() { + if let Some(parent) = dir.parent() { + dirs_to_write.remove(&parent); + } + } + + for dir in dirs_to_write { + let tree = trees_to_write.remove(&dir).unwrap(); + + if let Some((parent, basename)) = dir.split() { + let parent_tree = trees_to_write.get_mut(&parent).unwrap(); + if tree.is_empty() { + parent_tree.remove(basename.value()); + } else { + let tree_id = store.write_tree(&dir, &tree).unwrap(); + parent_tree.set(basename.value().to_string(), TreeValue::Tree(tree_id)); + } + } else { + // We're writing the root tree. Write it even if empty. Return its id. + return store.write_tree(&dir, &tree).unwrap(); + } + } + } + } + + fn get_base_trees(&mut self) -> BTreeMap { + let mut tree_cache = BTreeMap::new(); + let mut base_trees = BTreeMap::new(); + let store = self.store.clone(); + + let mut populate_trees = |dir: &DirRepoPath| { + let mut current_dir = DirRepoPath::root(); + + if !tree_cache.contains_key(¤t_dir) { + let tree = store.get_tree(¤t_dir, &self.base_tree_id).unwrap(); + let store_tree = tree.data().clone(); + tree_cache.insert(current_dir.clone(), tree); + base_trees.insert(current_dir.clone(), store_tree); + } + + for component in dir.components() { + let next_dir = current_dir.join(component); + let current_tree = tree_cache.get(¤t_dir).unwrap(); + if !tree_cache.contains_key(&next_dir) { + let tree = current_tree + .sub_tree(component) + .unwrap_or_else(|| Tree::null(self.store.clone(), next_dir.clone())); + let store_tree = tree.data().clone(); + tree_cache.insert(next_dir.clone(), tree); + base_trees.insert(next_dir.clone(), store_tree); + } + current_dir = next_dir; + } + }; + for path in self.overrides.keys() { + if let Some(parent) = path.dir() { + populate_trees(&parent); + } + } + + base_trees + } +} diff --git a/lib/src/trees.rs b/lib/src/trees.rs new file mode 100644 index 000000000..54ac1b688 --- /dev/null +++ b/lib/src/trees.rs @@ -0,0 +1,496 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::files; +use crate::files::MergeResult; +use crate::matchers::Matcher; +use crate::repo_path::{ + DirRepoPath, DirRepoPathComponent, FileRepoPath, FileRepoPathComponent, RepoPath, + RepoPathComponent, RepoPathJoin, +}; +use crate::store::{Conflict, ConflictPart, StoreError, TreeId, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::tree::Tree; +use std::cmp::Ordering; + +pub fn walk_entries( + tree: &Tree, + callback: &mut impl FnMut(&RepoPath, &TreeValue) -> Result<(), E>, +) -> Result<(), E> { + for entry in tree.entries() { + let path = RepoPath::new(tree.dir().clone(), RepoPathComponent::from(entry.name())); + match entry.value() { + TreeValue::Tree(id) => { + let subtree = tree.known_sub_tree(&DirRepoPathComponent::from(entry.name()), id); + walk_entries(&subtree, callback)?; + } + other => { + callback(&path, other)?; + } + }; + } + Ok(()) +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Diff { + Modified(T, T), + Added(T), + Removed(T), +} + +pub type TreeValueDiff<'a> = Diff<&'a TreeValue>; + +fn diff_entries<'a, E>( + tree1: &'a Tree, + tree2: &'a Tree, + callback: &mut impl FnMut(&'a str, TreeValueDiff<'a>) -> Result<(), E>, +) -> Result<(), E> { + let mut it1 = tree1.entries(); + let mut it2 = tree2.entries(); + let mut entry1 = it1.next(); + let mut entry2 = it2.next(); + loop { + let name: &'a str; + let mut value_before: Option<&'a TreeValue> = None; + let mut value_after: Option<&'a TreeValue> = None; + match (&entry1, &entry2) { + (Some(before), Some(after)) => { + match before.name().cmp(after.name()) { + Ordering::Less => { + // entry removed + name = before.name(); + value_before = Some(before.value()); + } + Ordering::Greater => { + // entry added + name = after.name(); + value_after = Some(after.value()); + } + Ordering::Equal => { + // entry modified + name = before.name(); + value_before = Some(before.value()); + value_after = Some(after.value()); + } + } + } + (Some(before), None) => { + // second iterator exhausted + name = before.name(); + value_before = Some(before.value()); + } + (None, Some(after)) => { + // first iterator exhausted + name = after.name(); + value_after = Some(after.value()); + } + (None, None) => { + // both iterators exhausted + break; + } + } + + match (value_before, value_after) { + (Some(before), Some(after)) => { + if before != after { + callback(name, TreeValueDiff::Modified(before, after))?; + } + entry1 = it1.next(); + entry2 = it2.next(); + } + (Some(before), None) => { + callback(name, TreeValueDiff::Removed(before))?; + entry1 = it1.next(); + } + (None, Some(after)) => { + callback(name, TreeValueDiff::Added(after))?; + entry2 = it2.next(); + } + (None, None) => { + panic!("should have been handled above"); + } + } + } + Ok(()) +} + +pub fn recursive_tree_diff( + root1: Tree, + root2: Tree, + matcher: &M, + callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff), +) where + M: Matcher, +{ + internal_recursive_tree_diff(vec![(DirRepoPath::root(), root1, root2)], matcher, callback) +} + +fn internal_recursive_tree_diff( + work: Vec<(DirRepoPath, Tree, Tree)>, + _matcher: &M, + callback: &mut impl FnMut(&FileRepoPath, TreeValueDiff), +) where + M: Matcher, +{ + let mut new_work = Vec::new(); + // Diffs for which to invoke the callback after having visited subtrees. This is + // used for making sure that when a directory gets replaced by a file, we + // call the callback for the addition of the file after we call the callback + // for removing files in the directory. + let mut late_file_diffs = Vec::new(); + for (dir, tree1, tree2) in &work { + diff_entries(tree1, tree2, &mut |name, + diff: TreeValueDiff| + -> Result<(), ()> { + let file_path = dir.join(&FileRepoPathComponent::from(name)); + let subdir = DirRepoPathComponent::from(name); + let subdir_path = dir.join(&subdir); + // TODO: simplify this mess + match diff { + TreeValueDiff::Modified(TreeValue::Tree(id_before), TreeValue::Tree(id_after)) => { + new_work.push(( + subdir_path, + tree1.known_sub_tree(&subdir, &id_before), + tree2.known_sub_tree(&subdir, &id_after), + )); + } + TreeValueDiff::Modified(TreeValue::Tree(id_before), file_after) => { + new_work.push(( + subdir_path.clone(), + tree1.known_sub_tree(&subdir, &id_before), + Tree::null(tree2.store().clone(), subdir_path), + )); + late_file_diffs.push((file_path, TreeValueDiff::Added(file_after))); + } + TreeValueDiff::Modified(file_before, TreeValue::Tree(id_after)) => { + new_work.push(( + subdir_path.clone(), + Tree::null(tree1.store().clone(), subdir_path), + tree2.known_sub_tree(&subdir, &id_after), + )); + callback(&file_path, TreeValueDiff::Removed(file_before)); + } + TreeValueDiff::Modified(_, _) => { + callback(&file_path, diff); + } + TreeValueDiff::Added(TreeValue::Tree(id_after)) => { + new_work.push(( + subdir_path.clone(), + Tree::null(tree1.store().clone(), subdir_path), + tree2.known_sub_tree(&subdir, &id_after), + )); + } + TreeValueDiff::Added(_) => { + callback(&file_path, diff); + } + TreeValueDiff::Removed(TreeValue::Tree(id_before)) => { + new_work.push(( + subdir_path.clone(), + tree1.known_sub_tree(&subdir, &id_before), + Tree::null(tree2.store().clone(), subdir_path), + )); + } + TreeValueDiff::Removed(_) => { + callback(&file_path, diff); + } + }; + Ok(()) + }) + .unwrap(); // safe because the callback always returns Ok + } + if !new_work.is_empty() { + internal_recursive_tree_diff(new_work, _matcher, callback) + } + for (file_path, diff) in late_file_diffs { + callback(&file_path, diff); + } +} + +pub fn merge_trees( + side1_tree: &Tree, + base_tree: &Tree, + side2_tree: &Tree, +) -> Result { + let store = base_tree.store().as_ref(); + let dir = base_tree.dir(); + assert_eq!(side1_tree.dir(), dir); + assert_eq!(side2_tree.dir(), dir); + + if base_tree.id() == side1_tree.id() { + return Ok(side2_tree.id().clone()); + } + if base_tree.id() == side2_tree.id() || side1_tree.id() == side2_tree.id() { + return Ok(side1_tree.id().clone()); + } + + // Start with a tree identical to side 1 and modify based on changes from base + // to side 2. + let mut new_tree = side1_tree.data().clone(); + diff_entries(base_tree, side2_tree, &mut |basename, + diff| + -> Result<(), StoreError> { + let maybe_side1 = side1_tree.value(basename); + let (maybe_base, maybe_side2) = match diff { + TreeValueDiff::Modified(base, side2) => (Some(base), Some(side2)), + TreeValueDiff::Added(side2) => (None, Some(side2)), + TreeValueDiff::Removed(base) => (Some(base), None), + }; + if maybe_side1 == maybe_base { + // side 1 is unchanged: use the value from side 2 + match maybe_side2 { + None => new_tree.remove(basename), + Some(side2) => new_tree.set(basename.to_owned(), side2.clone()), + }; + } else if maybe_side1 == maybe_side2 { + // Both sides changed in the same way: new_tree already has the + // value + } else { + // The two sides changed in different ways + let new_value = + merge_tree_value(store, dir, basename, maybe_base, maybe_side1, maybe_side2)?; + match new_value { + None => new_tree.remove(basename), + Some(value) => new_tree.set(basename.to_owned(), value), + } + } + Ok(()) + })?; + store.write_tree(dir, &new_tree) +} + +fn merge_tree_value( + store: &StoreWrapper, + dir: &DirRepoPath, + basename: &str, + maybe_base: Option<&TreeValue>, + maybe_side1: Option<&TreeValue>, + maybe_side2: Option<&TreeValue>, +) -> Result, StoreError> { + // Resolve non-trivial conflicts: + // * resolve tree conflicts by recursing + // * try to resolve file conflicts by merging the file contents + // * leave other conflicts (e.g. file/dir conflicts, remove/modify conflicts) + // unresolved + Ok(match (maybe_base, maybe_side1, maybe_side2) { + ( + Some(TreeValue::Tree(base)), + Some(TreeValue::Tree(side1)), + Some(TreeValue::Tree(side2)), + ) => { + let subdir = dir.join(&DirRepoPathComponent::from(basename)); + let merged_tree_id = merge_trees( + &store.get_tree(&subdir, &side1).unwrap(), + &store.get_tree(&subdir, &base).unwrap(), + &store.get_tree(&subdir, &side2).unwrap(), + )?; + if &merged_tree_id == store.empty_tree_id() { + None + } else { + Some(TreeValue::Tree(merged_tree_id)) + } + } + _ => { + let maybe_merged = match (maybe_base, maybe_side1, maybe_side2) { + ( + Some(TreeValue::Normal { + id: base_id, + executable: base_executable, + }), + Some(TreeValue::Normal { + id: side1_id, + executable: side1_executable, + }), + Some(TreeValue::Normal { + id: side2_id, + executable: side2_executable, + }), + ) => { + let executable = if base_executable == side1_executable { + *side2_executable + } else if base_executable == side2_executable { + *side1_executable + } else { + assert_eq!(side1_executable, side2_executable); + *side1_executable + }; + + let filename = dir.join(&FileRepoPathComponent::from(basename)); + let mut base_content = vec![]; + store + .read_file(&filename, &base_id)? + .read_to_end(&mut base_content)?; + let mut side1_content = vec![]; + store + .read_file(&filename, &side1_id)? + .read_to_end(&mut side1_content)?; + let mut side2_content = vec![]; + store + .read_file(&filename, &side2_id)? + .read_to_end(&mut side2_content)?; + + let merge_result = files::merge(&base_content, &side1_content, &side2_content); + match merge_result { + MergeResult::Resolved(merged_content) => { + let id = store.write_file(&filename, &mut merged_content.as_slice())?; + Some(TreeValue::Normal { id, executable }) + } + MergeResult::Conflict(_) => None, + } + } + _ => None, + }; + match maybe_merged { + Some(merged) => Some(merged), + None => { + let mut conflict = Conflict::default(); + if let Some(base) = maybe_base { + conflict.removes.push(ConflictPart { + value: base.clone(), + }); + } + if let Some(side1) = maybe_side1 { + conflict.adds.push(ConflictPart { + value: side1.clone(), + }); + } + if let Some(side2) = maybe_side2 { + conflict.adds.push(ConflictPart { + value: side2.clone(), + }); + } + simplify_conflict(store, &conflict)? + } + } + } + }) +} + +fn conflict_part_to_conflict( + store: &StoreWrapper, + part: &ConflictPart, +) -> Result { + match &part.value { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id)?; + Ok(conflict) + } + other => Ok(Conflict { + removes: vec![], + adds: vec![ConflictPart { + value: other.clone(), + }], + }), + } +} + +fn simplify_conflict( + store: &StoreWrapper, + conflict: &Conflict, +) -> Result, StoreError> { + // Important cases to simplify: + // + // D + // | + // B C + // |/ + // A + // + // 1. rebase C to B, then back to A => there should be no conflict + // 2. rebase C to B, then to D => the conflict should not mention B + // 3. rebase B to C and D to B', then resolve the conflict in B' and rebase D' + // on top => the conflict should be between B'', B, and D; it should not + // mention the conflict in B' + + // Case 1 above: + // After first rebase, the conflict is {+B-A+C}. After rebasing back, + // the unsimplified conflict is {+A-B+{+B-A+C}}. Since the + // inner conflict is positive, we can simply move it into the outer conflict. We + // thus get {+A-B+B-A+C}, which we can then simplify to just C (because {+C} == + // C). + // + // Case 2 above: + // After first rebase, the conflict is {+B-A+C}. After rebasing to D, + // the unsimplified conflict is {+D-C+{+B-A+C}}. As in the + // previous case, the inner conflict can be moved into the outer one. We then + // get {+D-C+B-A+C}. That can be simplified to + // {+D+B-A}, which is the desired conflict. + // + // Case 3 above: + // TODO: describe this case + + // First expand any diffs with nested conflicts. + let mut new_removes = vec![]; + let mut new_adds = vec![]; + for part in &conflict.adds { + match part.value { + TreeValue::Conflict(_) => { + let conflict = conflict_part_to_conflict(&store, part)?; + new_removes.extend_from_slice(&conflict.removes); + new_adds.extend_from_slice(&conflict.adds); + } + _ => { + new_adds.push(part.clone()); + } + } + } + for part in &conflict.removes { + match part.value { + TreeValue::Conflict(_) => { + let conflict = conflict_part_to_conflict(&store, part)?; + new_removes.extend_from_slice(&conflict.adds); + new_adds.extend_from_slice(&conflict.removes); + } + _ => { + new_removes.push(part.clone()); + } + } + } + + // Remove pairs of entries that match in the removes and adds. + let mut add_index = 0; + while add_index < new_adds.len() { + let add = &new_adds[add_index]; + add_index += 1; + for (remove_index, remove) in new_removes.iter().enumerate() { + if remove.value == add.value { + new_removes.remove(remove_index); + add_index -= 1; + new_adds.remove(add_index); + break; + } + } + } + + // TODO: We should probably remove duplicate entries here too. So if we have + // {+A+A}, that would become just {+A}. Similarly {+B-A+B} would be just + // {+B-A}. + + if new_adds.is_empty() { + // If there are no values to add, then the path doesn't exist (so return None to + // indicate that). + return Ok(None); + } + + if new_removes.is_empty() && new_adds.len() == 1 { + // A single add means that the current state is that state. + return Ok(Some(new_adds[0].value.clone())); + } + + let conflict_id = store.write_conflict(&Conflict { + adds: new_adds, + removes: new_removes, + })?; + Ok(Some(TreeValue::Conflict(conflict_id))) +} diff --git a/lib/src/view.rs b/lib/src/view.rs new file mode 100644 index 000000000..21bdbde15 --- /dev/null +++ b/lib/src/view.rs @@ -0,0 +1,416 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::min; +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::Arc; + +use thiserror::Error; + +use crate::commit::Commit; +use crate::dag_walk; +use crate::lock::FileLock; +use crate::op_store; +use crate::op_store::{OpStore, OpStoreResult, OperationId, OperationMetadata}; +use crate::operation::Operation; +use crate::simple_op_store::SimpleOpStore; +use crate::store::{CommitId, Timestamp}; +use crate::store_wrapper::StoreWrapper; + +pub trait View { + fn checkout(&self) -> &CommitId; + fn heads<'a>(&'a self) -> Box + 'a>; + fn op_store(&self) -> Arc; + fn base_op_head_id(&self) -> &OperationId; + + fn get_operation(&self, id: &OperationId) -> OpStoreResult { + let data = self.op_store().read_operation(id)?; + Ok(Operation::new(self.op_store().clone(), id.clone(), data)) + } + + fn base_op_head(&self) -> Operation { + self.get_operation(self.base_op_head_id()).unwrap() + } +} + +pub struct ReadonlyView { + store: Arc, + path: PathBuf, + op_store: Arc, + op_id: OperationId, + data: op_store::View, +} + +pub struct MutableView { + store: Arc, + path: PathBuf, + op_store: Arc, + base_op_head_id: OperationId, + data: op_store::View, +} + +fn heads_of_set( + store: &StoreWrapper, + commit_ids: impl Iterator, +) -> HashSet { + let mut visited = HashSet::new(); + let mut work = vec![]; + let mut oldest = std::u64::MAX; + let mut heads: HashSet = commit_ids.collect(); + for commit_id in &heads { + let commit = store.get_commit(commit_id).unwrap(); + oldest = min(oldest, commit.committer().timestamp.timestamp.0); + work.push(commit); + } + // Assume clock skew less than a month: + // TODO: use generation numbers here + let threshold = oldest.saturating_sub(1000 * 3600 * 24 * 30); + while !work.is_empty() { + let commit = work.pop().unwrap(); + if visited.contains(commit.id()) { + continue; + } + visited.insert(commit.id().clone()); + + for parent in commit.parents() { + if parent.committer().timestamp.timestamp.0 < threshold { + continue; + } + heads.remove(parent.id()); + work.push(parent); + } + } + heads +} + +#[derive(Debug, Error, PartialEq, Eq)] +pub enum OpHeadResolutionError { + #[error("Operation log has no heads")] + NoHeads, +} + +fn add_op_head(op_heads_dir: &PathBuf, id: &OperationId) { + std::fs::write(op_heads_dir.join(id.hex()), "").unwrap(); +} + +fn remove_op_head(op_heads_dir: &PathBuf, id: &OperationId) { + // It's fine if the old head was not found. It probably means + // that we're on a distributed file system where the locking + // doesn't work. We'll probably end up with two current + // heads. We'll detect that next time we load the view. + std::fs::remove_file(op_heads_dir.join(id.hex())).ok(); +} + +fn get_op_heads(op_heads_dir: &PathBuf) -> Vec { + let mut op_heads = vec![]; + for op_head_entry in std::fs::read_dir(op_heads_dir).unwrap() { + let op_head_file_name = op_head_entry.unwrap().file_name(); + let op_head_file_name = op_head_file_name.to_str().unwrap(); + if let Ok(op_head) = hex::decode(op_head_file_name) { + op_heads.push(OperationId(op_head)); + } + } + op_heads +} + +pub fn merge_views( + store: &StoreWrapper, + left: &op_store::View, + base: &op_store::View, + right: &op_store::View, +) -> op_store::View { + let mut result = left.clone(); + if right.checkout == base.checkout || right.checkout == left.checkout { + // Keep the left side + } else if left.checkout == base.checkout { + result.checkout = right.checkout.clone(); + } else { + // TODO: Return an error here. Or should we just pick one of the sides + // and emit a warning? + } + + for removed_head in base.head_ids.difference(&right.head_ids) { + result.head_ids.remove(removed_head); + } + for added_head in right.head_ids.difference(&base.head_ids) { + result.head_ids.insert(added_head.clone()); + } + result.head_ids = heads_of_set(store, result.head_ids.into_iter()); + // TODO: Should it be considered a conflict if a commit-head is removed on one + // side while a child or successor is created on another side? Maybe a + // warning? + + result +} + +// TODO: Introduce context objects (like commit::Commit) so we won't have to +// pass around OperationId and Operation separately like we do here. +fn get_single_op_head( + store: &StoreWrapper, + op_store: &dyn OpStore, + op_heads_dir: &PathBuf, +) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> { + let mut op_heads = get_op_heads(&op_heads_dir); + + if op_heads.is_empty() { + return Err(OpHeadResolutionError::NoHeads); + } + + if op_heads.len() == 1 { + let operation_id = op_heads.pop().unwrap(); + let operation = op_store.read_operation(&operation_id).unwrap(); + let view = op_store.read_view(&operation.view_id).unwrap(); + return Ok((operation_id, operation, view)); + } + + // There are multiple heads. We take a lock, then check if there are still + // multiple heads (it's likely that another process was in the process of + // deleting on of them). If there are still multiple heads, we attempt to + // merge all the views into one. We then write that view and a corresponding + // operation to the op-store. + // Note that the locking isn't necessary for correctness; we take the lock + // only to avoid other concurrent processes from doing the same work (and + // producing another set of divergent heads). + let _lock = FileLock::lock(op_heads_dir.join("lock")); + let op_heads = get_op_heads(&op_heads_dir); + + if op_heads.is_empty() { + return Err(OpHeadResolutionError::NoHeads); + } + + if op_heads.len() == 1 { + let op_head_id = op_heads[0].clone(); + let op_head = op_store.read_operation(&op_head_id).unwrap(); + // Return early so we don't write a merge operation with a single parent + let view = op_store.read_view(&op_head.view_id).unwrap(); + return Ok((op_head_id, op_head, view)); + } + + let (merge_operation_id, merge_operation, merged_view) = + merge_op_heads(store, op_store, &op_heads)?; + add_op_head(&op_heads_dir, &merge_operation_id); + for old_op_head_id in op_heads { + // The merged one will be in the input to the merge if it's a "fast-forward" + // merge. + if old_op_head_id != merge_operation_id { + remove_op_head(&op_heads_dir, &old_op_head_id); + } + } + Ok((merge_operation_id, merge_operation, merged_view)) +} + +fn merge_op_heads( + store: &StoreWrapper, + op_store: &dyn OpStore, + op_heads: &[OperationId], +) -> Result<(OperationId, op_store::Operation, op_store::View), OpHeadResolutionError> { + let neighbors_fn = |op_id: &OperationId| op_store.read_operation(op_id).unwrap().parents; + // Remove ancestors so we don't create merge operation with an operation and its + // ancestor + let op_heads = dag_walk::unreachable( + op_heads.iter().cloned(), + &neighbors_fn, + &|op_id: &OperationId| op_id.clone(), + ); + let mut op_heads: Vec<_> = op_heads.into_iter().collect(); + op_heads.sort_by_key(|op_id| op_id.0.clone()); + let first_op_head = op_store.read_operation(&op_heads[0]).unwrap(); + let mut merged_view = op_store.read_view(&first_op_head.view_id).unwrap(); + + // Return without creating a merge operation + if op_heads.len() == 1 { + return Ok((op_heads[0].clone(), first_op_head, merged_view)); + } + + for (i, other_op_head_id) in op_heads.iter().enumerate().skip(1) { + let ancestor_op_id = dag_walk::closest_common_node( + op_heads[0..i].to_vec(), + vec![other_op_head_id.clone()], + &neighbors_fn, + &|op_id: &OperationId| op_id.clone(), + ) + .unwrap(); + let ancestor_op = op_store.read_operation(&ancestor_op_id).unwrap(); + let ancestor_view = op_store.read_view(&ancestor_op.view_id).unwrap(); + let other_op = op_store.read_operation(other_op_head_id).unwrap(); + let other_view = op_store.read_view(&other_op.view_id).unwrap(); + merged_view = merge_views(store, &merged_view, &ancestor_view, &other_view); + } + let merged_view_id = op_store.write_view(&merged_view).unwrap(); + let operation_metadata = OperationMetadata::new("resolve concurrent operations".to_string()); + let merge_operation = op_store::Operation { + view_id: merged_view_id, + parents: op_heads, + metadata: operation_metadata, + }; + let merge_operation_id = op_store.write_operation(&merge_operation).unwrap(); + Ok((merge_operation_id, merge_operation, merged_view)) +} + +impl View for ReadonlyView { + fn checkout(&self) -> &CommitId { + &self.data.checkout + } + + fn heads<'a>(&'a self) -> Box + 'a> { + Box::new(self.data.head_ids.iter()) + } + + fn op_store(&self) -> Arc { + self.op_store.clone() + } + + fn base_op_head_id(&self) -> &OperationId { + &self.op_id + } +} + +impl ReadonlyView { + pub fn init(store: Arc, path: PathBuf, checkout: CommitId) -> Self { + std::fs::create_dir(path.join("op_store")).unwrap(); + + let op_store = Arc::new(SimpleOpStore::init(path.join("op_store"))); + let mut root_view = op_store::View::new(checkout.clone()); + root_view.head_ids.insert(checkout); + let root_view_id = op_store.write_view(&root_view).unwrap(); + let operation_metadata = OperationMetadata::new("initialize repo".to_string()); + let init_operation = op_store::Operation { + view_id: root_view_id, + parents: vec![], + metadata: operation_metadata, + }; + let init_operation_id = op_store.write_operation(&init_operation).unwrap(); + + let op_heads_dir = path.join("op_heads"); + std::fs::create_dir(&op_heads_dir).unwrap(); + add_op_head(&op_heads_dir, &init_operation_id); + + ReadonlyView { + store, + path, + op_store, + op_id: init_operation_id, + data: root_view, + } + } + + pub fn load(store: Arc, path: PathBuf) -> Self { + let op_store = Arc::new(SimpleOpStore::load(path.join("op_store"))); + let op_heads_dir = path.join("op_heads"); + let (op_id, _operation, view) = + get_single_op_head(&store, op_store.as_ref(), &op_heads_dir).unwrap(); + ReadonlyView { + store, + path, + op_store, + op_id, + data: view, + } + } + + pub fn reload(&mut self) -> OperationId { + let op_heads_dir = self.path.join("op_heads"); + let (op_id, _operation, view) = + get_single_op_head(&self.store, self.op_store.as_ref(), &op_heads_dir).unwrap(); + self.op_id = op_id; + self.data = view; + self.op_id.clone() + } + + pub fn reload_at(&mut self, operation: &Operation) { + self.op_id = operation.id().clone(); + self.data = operation.view().take_store_view(); + } + + pub fn start_modification(&self) -> MutableView { + // TODO: Avoid the cloning of the sets here. + MutableView { + store: self.store.clone(), + path: self.path.clone(), + op_store: self.op_store.clone(), + base_op_head_id: self.op_id.clone(), + data: self.data.clone(), + } + } +} + +impl View for MutableView { + fn checkout(&self) -> &CommitId { + &self.data.checkout + } + + fn heads<'a>(&'a self) -> Box + 'a> { + Box::new(self.data.head_ids.iter()) + } + + fn op_store(&self) -> Arc { + self.op_store.clone() + } + + fn base_op_head_id(&self) -> &OperationId { + &self.base_op_head_id + } +} + +impl MutableView { + pub fn set_checkout(&mut self, id: CommitId) { + self.data.checkout = id; + } + + pub fn add_head(&mut self, head: &Commit) { + self.data.head_ids.insert(head.id().clone()); + for parent in head.parents() { + self.data.head_ids.remove(parent.id()); + } + } + + pub fn remove_head(&mut self, head: &Commit) { + self.data.head_ids.remove(head.id()); + for parent in head.parents() { + self.data.head_ids.insert(parent.id().clone()); + } + } + + pub fn set_view(&mut self, data: op_store::View) { + self.data = data; + } + + pub fn save(mut self, description: String, operation_start_time: Timestamp) -> Operation { + let op_heads_dir = self.path.join("op_heads"); + + // First write the current view whether or not there have been any concurrent + // operations. We'll later create a merge operation if necessary. + self.data.head_ids = heads_of_set(&self.store, self.heads().cloned()); + let view_id = self.op_store.write_view(&self.data).unwrap(); + let mut operation_metadata = OperationMetadata::new(description); + operation_metadata.start_time = operation_start_time; + let operation = op_store::Operation { + view_id, + parents: vec![self.base_op_head_id.clone()], + metadata: operation_metadata, + }; + let old_op_head_id = self.base_op_head_id.clone(); + let new_op_head_id = self.op_store.write_operation(&operation).unwrap(); + + // Update .jj/view/op_heads/. + { + let _op_heads_lock = FileLock::lock(op_heads_dir.join("lock")); + add_op_head(&op_heads_dir, &new_op_head_id); + remove_op_head(&op_heads_dir, &old_op_head_id); + } + + Operation::new(self.op_store, new_op_head_id, operation) + } +} diff --git a/lib/src/working_copy.rs b/lib/src/working_copy.rs new file mode 100644 index 000000000..bceb31f32 --- /dev/null +++ b/lib/src/working_copy.rs @@ -0,0 +1,667 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cell::{RefCell, RefMut}; +use std::collections::{BTreeMap, HashSet}; +use std::convert::TryInto; +use std::fs; +use std::fs::{File, OpenOptions}; +#[cfg(not(windows))] +use std::os::unix::fs::symlink; +#[cfg(not(windows))] +use std::os::unix::fs::PermissionsExt; +#[cfg(windows)] +use std::os::windows::fs::symlink_file; +use std::path::{Path, PathBuf}; +use std::time::UNIX_EPOCH; + +use protobuf::Message; +use tempfile::NamedTempFile; +use thiserror::Error; + +use crate::commit::Commit; +use crate::commit_builder::CommitBuilder; +use crate::lock::FileLock; +use crate::repo::ReadonlyRepo; +use crate::repo_path::{ + DirRepoPath, DirRepoPathComponent, FileRepoPath, FileRepoPathComponent, RepoPathJoin, +}; +use crate::settings::UserSettings; +use crate::store::{CommitId, FileId, MillisSinceEpoch, StoreError, SymlinkId, TreeId, TreeValue}; +use crate::store_wrapper::StoreWrapper; +use crate::trees::TreeValueDiff; +use std::sync::Arc; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum FileType { + Normal, + Executable, + Symlink, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct FileState { + pub file_type: FileType, + pub mtime: MillisSinceEpoch, + pub size: u64, + /* TODO: What else do we need here? Git stores a lot of fields. + * TODO: Could possibly handle case-insensitive file systems keeping an + * Option with the actual path here. */ +} + +impl FileState { + fn null() -> FileState { + FileState { + file_type: FileType::Normal, + mtime: MillisSinceEpoch(0), + size: 0, + } + } +} + +struct TreeState { + store: Arc, + path: PathBuf, + tree_id: TreeId, + file_states: BTreeMap, + read_time: MillisSinceEpoch, +} + +fn file_state_from_proto(proto: &protos::working_copy::FileState) -> FileState { + let file_type = match proto.file_type { + protos::working_copy::FileType::Normal => FileType::Normal, + protos::working_copy::FileType::Symlink => FileType::Symlink, + protos::working_copy::FileType::Executable => FileType::Executable, + }; + FileState { + file_type, + mtime: MillisSinceEpoch(proto.mtime_millis_since_epoch), + size: proto.size, + } +} + +fn file_state_to_proto(file_state: &FileState) -> protos::working_copy::FileState { + let mut proto = protos::working_copy::FileState::new(); + let file_type = match &file_state.file_type { + FileType::Normal => protos::working_copy::FileType::Normal, + FileType::Symlink => protos::working_copy::FileType::Symlink, + FileType::Executable => protos::working_copy::FileType::Executable, + }; + proto.file_type = file_type; + proto.mtime_millis_since_epoch = file_state.mtime.0; + proto.size = file_state.size; + proto +} + +fn file_states_from_proto( + proto: &protos::working_copy::TreeState, +) -> BTreeMap { + let mut file_states = BTreeMap::new(); + for (path_str, proto_file_state) in &proto.file_states { + let path = FileRepoPath::from(path_str.as_str()); + file_states.insert(path, file_state_from_proto(&proto_file_state)); + } + file_states +} + +fn create_parent_dirs(disk_path: &PathBuf) { + fs::create_dir_all(disk_path.parent().unwrap()) + .unwrap_or_else(|_| panic!("failed to create parent directories for {:?}", &disk_path)); +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct CheckoutStats { + pub updated_files: u32, + pub added_files: u32, + pub removed_files: u32, +} + +#[derive(Debug, Error, PartialEq, Eq)] +pub enum CheckoutError { + #[error("Update target not found")] + TargetNotFound, + // The current checkout was deleted, maybe by an overly aggressive GC that happened while + // the current process was running. + #[error("Current checkout not found")] + SourceNotFound, + // Another process checked out a commit while the current process was running (after the + // working copy was read by the current process). + #[error("Concurrent checkout")] + ConcurrentCheckout, + #[error("Internal error: {0:?}")] + InternalStoreError(StoreError), +} + +impl TreeState { + pub fn current_tree_id(&self) -> &TreeId { + &self.tree_id + } + + pub fn file_states(&self) -> &BTreeMap { + &self.file_states + } + + pub fn init(store: Arc, path: PathBuf) -> TreeState { + let mut wc = TreeState::empty(store, path); + wc.save(); + wc + } + + fn empty(store: Arc, path: PathBuf) -> TreeState { + let tree_id = store.empty_tree_id().clone(); + TreeState { + store, + path, + tree_id, + file_states: BTreeMap::new(), + read_time: MillisSinceEpoch(0), + } + } + + pub fn load(store: Arc, path: PathBuf) -> TreeState { + let maybe_file = File::open(path.join("tree_state")); + let file = match maybe_file { + Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => { + return TreeState::init(store, path); + } + result => result.unwrap(), + }; + + let mut wc = TreeState::empty(store, path); + wc.read(file); + wc + } + + fn update_read_time(&mut self) { + let own_file_state = self + .file_state(&self.path.join("tree_state")) + .unwrap_or_else(FileState::null); + self.read_time = own_file_state.mtime; + } + + fn read(&mut self, mut file: File) { + self.update_read_time(); + let proto: protos::working_copy::TreeState = + protobuf::parse_from_reader(&mut file).unwrap(); + self.tree_id = TreeId(proto.tree_id.clone()); + self.file_states = file_states_from_proto(&proto); + } + + fn save(&mut self) { + let mut proto = protos::working_copy::TreeState::new(); + proto.tree_id = self.tree_id.0.clone(); + for (file, file_state) in &self.file_states { + proto + .file_states + .insert(file.to_internal_string(), file_state_to_proto(file_state)); + } + + let mut temp_file = NamedTempFile::new_in(&self.path).unwrap(); + // update read time while we still have the file open for writes, so we know + // there is no unknown data in it + self.update_read_time(); + proto.write_to_writer(temp_file.as_file_mut()).unwrap(); + temp_file.persist(self.path.join("tree_state")).unwrap(); + } + + fn file_state(&self, path: &PathBuf) -> Option { + let metadata = path.symlink_metadata().ok()?; + let time = metadata.modified().unwrap(); + let since_epoch = time.duration_since(UNIX_EPOCH).unwrap(); + let mtime = MillisSinceEpoch(since_epoch.as_millis().try_into().unwrap()); + let size = metadata.len(); + let metadata_file_type = metadata.file_type(); + let file_type = if metadata_file_type.is_dir() { + panic!("expected file, not directory: {:?}", path); + } else if metadata_file_type.is_symlink() { + FileType::Symlink + } else { + let mode = metadata.permissions().mode(); + if mode & 0o111 != 0 { + FileType::Executable + } else { + FileType::Normal + } + }; + Some(FileState { + file_type, + mtime, + size, + }) + } + + fn write_file_to_store(&self, path: &FileRepoPath, disk_path: &PathBuf) -> FileId { + let file = File::open(disk_path).unwrap(); + self.store.write_file(path, &mut Box::new(file)).unwrap() + } + + fn write_symlink_to_store(&self, path: &FileRepoPath, disk_path: &PathBuf) -> SymlinkId { + let target = disk_path.read_link().unwrap(); + let str_target = target.to_str().unwrap(); + self.store.write_symlink(path, str_target).unwrap() + } + + // Look for changes to the working copy. If there are any changes, create + // a new tree from it and return it, and also update the dirstate on disk. + // TODO: respect ignores + pub fn write_tree(&mut self, working_copy_path: PathBuf) -> &TreeId { + let mut work = vec![(DirRepoPath::root(), working_copy_path)]; + let mut tree_builder = self.store.tree_builder(self.tree_id.clone()); + let mut deleted_files: HashSet<&FileRepoPath> = self.file_states.keys().collect(); + let mut modified_files = BTreeMap::new(); + while !work.is_empty() { + let (dir, disk_dir) = work.pop().unwrap(); + for maybe_entry in disk_dir.read_dir().unwrap() { + let entry = maybe_entry.unwrap(); + let file_type = entry.file_type().unwrap(); + let file_name = entry.file_name(); + let name = file_name.to_str().unwrap(); + if name == ".jj" { + continue; + } + if file_type.is_dir() { + let subdir = dir.join(&DirRepoPathComponent::from(name)); + let disk_subdir = disk_dir.join(file_name); + work.push((subdir, disk_subdir)); + } else { + let file = dir.join(&FileRepoPathComponent::from(name)); + deleted_files.remove(&file); + let new_file_state = self.file_state(&entry.path()).unwrap(); + let clean = match self.file_states.get(&file) { + None => false, // untracked + Some(current_entry) => { + current_entry == &new_file_state && current_entry.mtime < self.read_time + } + }; + if !clean { + let disk_file = disk_dir.join(file_name); + let file_value = match new_file_state.file_type { + FileType::Normal | FileType::Executable => { + let id = self.write_file_to_store(&file, &disk_file); + TreeValue::Normal { + id, + executable: new_file_state.file_type == FileType::Executable, + } + } + FileType::Symlink => { + let id = self.write_symlink_to_store(&file, &disk_file); + TreeValue::Symlink(id) + } + }; + tree_builder.set(file.to_repo_path(), file_value); + modified_files.insert(file, new_file_state); + } + } + } + } + + let deleted_files: Vec = deleted_files.iter().cloned().cloned().collect(); + + for file in &deleted_files { + self.file_states.remove(file); + tree_builder.remove(file.to_repo_path()); + } + for (file, file_state) in modified_files { + self.file_states.insert(file, file_state); + } + self.tree_id = tree_builder.write_tree(); + self.save(); + &self.tree_id + } + + fn write_file( + &self, + disk_path: &PathBuf, + path: &FileRepoPath, + id: &FileId, + executable: bool, + ) -> FileState { + create_parent_dirs(disk_path); + let mut file = OpenOptions::new() + .write(true) + .create_new(true) + .truncate(true) + .open(disk_path) + .unwrap_or_else(|_| panic!("failed to open {:?} for write", &disk_path)); + let mut contents = self.store.read_file(path, id).unwrap(); + std::io::copy(&mut contents, &mut file).unwrap(); + self.set_executable(disk_path, executable); + // Read the file state while we still have the write lock. That way there is no + // race with other processes modifying it. We know that the file exists, + // and we know that the stat information is accurate. (The mtime is set + // at write time and won't change when we close the file.) + self.file_state(&disk_path).unwrap() + } + + fn write_symlink(&self, disk_path: &PathBuf, path: &FileRepoPath, id: &SymlinkId) -> FileState { + create_parent_dirs(disk_path); + #[cfg(windows)] + { + unimplemented!(); + } + #[cfg(not(windows))] + { + let target = self.store.read_symlink(path, id).unwrap(); + let target = PathBuf::from(&target); + symlink(target, disk_path).unwrap(); + } + self.file_state(&disk_path).unwrap() + } + + fn set_executable(&self, disk_path: &PathBuf, executable: bool) { + let mode = if executable { 0o755 } else { 0o644 }; + fs::set_permissions(disk_path, fs::Permissions::from_mode(mode)).unwrap(); + } + + pub fn check_out( + &mut self, + tree_id: TreeId, + working_copy_path: &Path, + ) -> Result { + let old_tree = self + .store + .get_tree(&DirRepoPath::root(), &self.tree_id) + .map_err(|err| match err { + StoreError::NotFound => CheckoutError::SourceNotFound, + other => CheckoutError::InternalStoreError(other), + })?; + let new_tree = self + .store + .get_tree(&DirRepoPath::root(), &tree_id) + .map_err(|err| match err { + StoreError::NotFound => CheckoutError::TargetNotFound, + other => CheckoutError::InternalStoreError(other), + })?; + + let mut stats = CheckoutStats { + updated_files: 0, + added_files: 0, + removed_files: 0, + }; + + old_tree.diff(&new_tree, &mut |path, diff| { + let disk_path = working_copy_path.join(PathBuf::from(path.to_internal_string())); + + // TODO: Check that the file has not changed before overwriting/removing it. + match diff { + TreeValueDiff::Removed(_before) => { + fs::remove_file(&disk_path).ok(); + let mut parent_dir = disk_path.parent().unwrap(); + loop { + if fs::remove_dir(&parent_dir).is_err() { + break; + } + parent_dir = parent_dir.parent().unwrap(); + } + self.file_states.remove(&path); + stats.removed_files += 1; + } + TreeValueDiff::Added(after) => { + let file_state = match after { + TreeValue::Normal { id, executable } => { + self.write_file(&disk_path, path, id, *executable) + } + TreeValue::Symlink(id) => self.write_symlink(&disk_path, path, id), + TreeValue::GitSubmodule(_id) => { + println!("ignoring git submodule at {:?}", path); + return; + } + TreeValue::Tree(_id) => { + panic!("unexpected tree entry in diff at {:?}", path); + } + TreeValue::Conflict(_id) => { + panic!( + "conflicts cannot be represented in the working copy: {:?}", + path + ); + } + }; + self.file_states.insert(path.clone(), file_state); + stats.added_files += 1; + } + TreeValueDiff::Modified(before, after) => { + fs::remove_file(&disk_path).ok(); + let file_state = match (before, after) { + ( + TreeValue::Normal { + id: old_id, + executable: old_executable, + }, + TreeValue::Normal { id, executable }, + ) if id == old_id => { + // Optimization for when only the executable bit changed + assert_ne!(executable, old_executable); + self.set_executable(&disk_path, *executable); + let mut file_state = self.file_states.get(&path).unwrap().clone(); + file_state.file_type = if *executable { + FileType::Executable + } else { + FileType::Normal + }; + file_state + } + (_, TreeValue::Normal { id, executable }) => { + self.write_file(&disk_path, path, id, *executable) + } + (_, TreeValue::Symlink(id)) => self.write_symlink(&disk_path, path, id), + (_, TreeValue::GitSubmodule(_id)) => { + println!("ignoring git submodule at {:?}", path); + self.file_states.remove(path); + return; + } + (_, TreeValue::Tree(_id)) => { + panic!("unexpected tree entry in diff at {:?}", path); + } + (_, TreeValue::Conflict(_id)) => { + panic!( + "conflicts cannot be represented in the working copy: {:?}", + path + ); + } + }; + + self.file_states.insert(path.clone(), file_state); + stats.updated_files += 1; + } + } + }); + self.tree_id = tree_id; + self.save(); + Ok(stats) + } +} + +pub struct WorkingCopy { + store: Arc, + path: PathBuf, + commit_id: RefCell>, + tree_state: RefCell>, + // cached commit + commit: RefCell>, +} + +impl WorkingCopy { + pub fn init(store: Arc, path: PathBuf) -> WorkingCopy { + // Leave the commit_id empty so a subsequent call to check out the root revision + // will have an effect. + let proto = protos::working_copy::Checkout::new(); + let mut file = OpenOptions::new() + .create_new(true) + .write(true) + .open(path.join("checkout")) + .unwrap(); + proto.write_to_writer(&mut file).unwrap(); + WorkingCopy { + store, + path, + commit_id: RefCell::new(None), + tree_state: RefCell::new(None), + commit: RefCell::new(None), + } + } + + pub fn load(store: Arc, path: PathBuf) -> WorkingCopy { + WorkingCopy { + store, + path, + commit_id: RefCell::new(None), + tree_state: RefCell::new(None), + commit: RefCell::new(None), + } + } + + fn write_proto(&self, proto: protos::working_copy::Checkout) { + let mut temp_file = NamedTempFile::new_in(&self.path).unwrap(); + proto.write_to_writer(temp_file.as_file_mut()).unwrap(); + temp_file.persist(self.path.join("checkout")).unwrap(); + } + + fn read_proto(&self) -> protos::working_copy::Checkout { + let mut file = File::open(self.path.join("checkout")).unwrap(); + protobuf::parse_from_reader(&mut file).unwrap() + } + + /// The id of the commit that's currently checked out in the working copy. + /// Note that the View is the source of truth for which commit *should* + /// be checked out. That should be kept up to date within a Transaction. + /// The WorkingCopy is only updated at the end. + pub fn current_commit_id(&self) -> CommitId { + if self.commit_id.borrow().is_none() { + let proto = self.read_proto(); + let commit_id = CommitId(proto.commit_id); + self.commit_id.replace(Some(commit_id)); + } + + self.commit_id.borrow().as_ref().unwrap().clone() + } + + /// The commit that's currently checked out in the working copy. Note that + /// the View is the source of truth for which commit *should* be checked + /// out. That should be kept up to date within a Transaction. The + /// WorkingCopy is only updated at the end. + pub fn current_commit(&self) -> Commit { + let commit_id = self.current_commit_id(); + let stale = match self.commit.borrow().as_ref() { + None => true, + Some(value) => value.id() != &commit_id, + }; + if stale { + self.commit + .replace(Some(self.store.get_commit(&commit_id).unwrap())); + } + self.commit.borrow().as_ref().unwrap().clone() + } + + fn tree_state(&self) -> RefMut> { + if self.tree_state.borrow().is_none() { + self.tree_state + .replace(Some(TreeState::load(self.store.clone(), self.path.clone()))); + } + self.tree_state.borrow_mut() + } + + pub fn current_tree_id(&self) -> TreeId { + self.tree_state() + .as_ref() + .unwrap() + .current_tree_id() + .clone() + } + + pub fn file_states(&self) -> BTreeMap { + self.tree_state().as_ref().unwrap().file_states().clone() + } + + fn save(&self) { + let mut proto = protos::working_copy::Checkout::new(); + proto.commit_id = self.current_commit_id().0; + self.write_proto(proto); + } + + pub fn check_out( + &self, + repo: &ReadonlyRepo, + commit: Commit, + ) -> Result { + assert!(commit.is_open()); + let lock_path = self.path.join("working_copy.lock"); + let _lock = FileLock::lock(lock_path); + + // TODO: Write a "pending_checkout" file with the old and new TreeIds so we can + // continue an interrupted checkout if we find such a file. Write + // access to that file can also serve as lock so only one process + // at once can do a checkout. + + // Check if the current checkout has changed on disk after we read it. It's safe + // to check out another commit regardless, but it's probably not what + // the caller wanted, so we let them know. + // + // We could safely add a version of this function without the check if we see a + // need for it. + let current_proto = self.read_proto(); + if let Some(commit_id_at_read_time) = self.commit_id.borrow().as_ref() { + if current_proto.commit_id != commit_id_at_read_time.0 { + return Err(CheckoutError::ConcurrentCheckout); + } + } + + let stats = self + .tree_state() + .as_mut() + .unwrap() + .check_out(commit.tree().id().clone(), repo.working_copy_path())?; + + self.commit_id.replace(Some(commit.id().clone())); + self.commit.replace(Some(commit)); + + self.save(); + // TODO: Clear the "pending_checkout" file here. + Ok(stats) + } + + pub fn commit(&self, settings: &UserSettings, repo: &mut ReadonlyRepo) -> Commit { + let lock_path = self.path.join("working_copy.lock"); + let _lock = FileLock::lock(lock_path); + + // Check if the current checkout has changed on disk after we read it. It's fine + // if it has, but we'll want our new commit to be a successor of the one + // just created in that case, so we need to reset our state to have the new + // commit id. + let current_proto = self.read_proto(); + self.commit_id + .replace(Some(CommitId(current_proto.commit_id))); + let current_commit = self.current_commit(); + + let new_tree_id = self + .tree_state() + .as_mut() + .unwrap() + .write_tree(repo.working_copy_path().clone()) + .clone(); + if &new_tree_id != current_commit.tree().id() { + let mut tx = repo.start_transaction("commit working copy"); + let commit = CommitBuilder::for_rewrite_from(settings, repo.store(), ¤t_commit) + .set_tree(new_tree_id) + .write_to_transaction(&mut tx); + tx.set_checkout(commit.id().clone()); + let operation = tx.commit(); + repo.reload_at(&operation); + + self.commit_id.replace(Some(commit.id().clone())); + self.commit.replace(Some(commit)); + self.save(); + } + self.commit.borrow().as_ref().unwrap().clone() + } +} diff --git a/lib/tests/test_bad_locking.rs b/lib/tests/test_bad_locking.rs new file mode 100644 index 000000000..beefddfdb --- /dev/null +++ b/lib/tests/test_bad_locking.rs @@ -0,0 +1,179 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashSet; +use std::path::PathBuf; + +use tempfile::TempDir; + +use jj_lib::repo::{ReadonlyRepo, Repo}; +use jj_lib::testutils; +use std::sync::Arc; +use test_case::test_case; + +fn copy_directory(src: &PathBuf, dst: &PathBuf) { + std::fs::create_dir(dst).ok(); + for entry in std::fs::read_dir(src).unwrap() { + let child_src = entry.unwrap().path(); + let base_name = child_src.file_name().unwrap(); + let child_dst = dst.join(base_name); + if child_src.is_dir() { + copy_directory(&child_src, &child_dst) + } else { + std::fs::copy(&child_src, &child_dst).unwrap(); + } + } +} + +fn merge_directories(left: &PathBuf, base: &PathBuf, right: &PathBuf, output: &PathBuf) { + std::fs::create_dir(output).ok(); + let mut sub_dirs = vec![]; + // Walk the left side and copy to the output + for entry in std::fs::read_dir(left).unwrap() { + let path = entry.unwrap().path(); + let base_name = path.file_name().unwrap(); + let child_left = left.join(base_name); + let child_output = output.join(base_name); + if child_left.is_dir() { + sub_dirs.push(base_name.to_os_string()); + } else { + std::fs::copy(&child_left, &child_output).unwrap(); + } + } + // Walk the base and find files removed in the right side, then remove them in + // the output + for entry in std::fs::read_dir(base).unwrap() { + let path = entry.unwrap().path(); + let base_name = path.file_name().unwrap(); + let child_base = base.join(base_name); + let child_right = right.join(base_name); + let child_output = output.join(base_name); + if child_base.is_dir() { + sub_dirs.push(base_name.to_os_string()); + } else if !child_right.exists() { + std::fs::remove_file(child_output).ok(); + } + } + // Walk the right side and find files added in the right side, then add them in + // the output + for entry in std::fs::read_dir(right).unwrap() { + let path = entry.unwrap().path(); + let base_name = path.file_name().unwrap(); + let child_base = base.join(base_name); + let child_right = right.join(base_name); + let child_output = output.join(base_name); + if child_right.is_dir() { + sub_dirs.push(base_name.to_os_string()); + } else if !child_base.exists() { + // This overwrites the left side if that's been written. That's fine, since the + // point of the test is that it should be okay for either side to win. + std::fs::copy(&child_right, &child_output).unwrap(); + } + } + // Do the merge in subdirectories + for base_name in sub_dirs { + let child_base = base.join(&base_name); + let child_right = right.join(&base_name); + let child_left = left.join(&base_name); + let child_output = output.join(&base_name); + merge_directories(&child_left, &child_base, &child_right, &child_output); + } +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_bad_locking_children(use_git: bool) { + // Test that two new commits created on separate machines are both visible (not + // lost due to lack of locking) + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let initial = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![repo.store().root_commit_id().clone()]) + .write_to_new_transaction(&repo, "test"); + + // Simulate a write of a commit that happens on one machine + let machine1_path = TempDir::new().unwrap().into_path(); + copy_directory(repo.working_copy_path(), &machine1_path); + let machine1_repo = ReadonlyRepo::load(&settings, machine1_path); + let child1 = testutils::create_random_commit(&settings, &machine1_repo) + .set_parents(vec![initial.id().clone()]) + .write_to_new_transaction(&machine1_repo, "test"); + + // Simulate a write of a commit that happens on another machine + let machine2_path = TempDir::new().unwrap().into_path(); + copy_directory(repo.working_copy_path(), &machine2_path); + let machine2_repo = ReadonlyRepo::load(&settings, machine2_path); + let child2 = testutils::create_random_commit(&settings, &machine2_repo) + .set_parents(vec![initial.id().clone()]) + .write_to_new_transaction(&machine2_repo, "test"); + + // Simulate that the distributed file system now has received the changes from + // both machines + let merged_path = TempDir::new().unwrap().into_path(); + merge_directories( + machine1_repo.working_copy_path(), + repo.working_copy_path(), + machine2_repo.working_copy_path(), + &merged_path, + ); + let merged_repo = ReadonlyRepo::load(&settings, merged_path); + let heads: HashSet<_> = merged_repo.view().heads().cloned().collect(); + assert!(heads.contains(child1.id())); + assert!(heads.contains(child2.id())); + let op_head_id = merged_repo.view().base_op_head_id().clone(); + let op_head = merged_repo + .view() + .op_store() + .read_operation(&op_head_id) + .unwrap(); + assert_eq!(op_head.parents.len(), 2); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_bad_locking_interrupted(use_git: bool) { + // Test that an interrupted update of the op-heads resulting in on op-head + // that's a descendant of the other is resolved without creating a new + // operation. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let initial = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![repo.store().root_commit_id().clone()]) + .write_to_new_transaction(&repo, "test"); + Arc::get_mut(&mut repo).unwrap().reload(); + + // Simulate a crash that resulted in the old op-head left in place. We simulate + // it somewhat hackily by copying the view/op_heads/ directory before the + // operation and then copying that back afterwards, leaving the existing + // op-head(s) in place. + let op_heads_dir = repo.repo_path().join("view").join("op_heads"); + let backup_path = TempDir::new().unwrap().into_path(); + copy_directory(&op_heads_dir, &backup_path); + let mut tx = repo.start_transaction("test"); + testutils::create_random_commit(&settings, &repo) + .set_parents(vec![initial.id().clone()]) + .write_to_transaction(&mut tx); + let op_head_id = tx.commit().id().clone(); + + copy_directory(&backup_path, &op_heads_dir); + // Reload the repo and check that only the new head is present. + let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id); + // Reload once more to make sure that the view/op_heads/ directory was updated + // correctly. + let reloaded_repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + assert_eq!(reloaded_repo.view().base_op_head_id(), &op_head_id); +} diff --git a/lib/tests/test_commit_builder.rs b/lib/tests/test_commit_builder.rs new file mode 100644 index 000000000..a064bb829 --- /dev/null +++ b/lib/tests/test_commit_builder.rs @@ -0,0 +1,135 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::repo_path::FileRepoPath; +use jj_lib::settings::UserSettings; +use jj_lib::testutils; +use jj_lib::tree::DiffSummary; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_initial(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let root_file_path = FileRepoPath::from("file"); + let dir_file_path = FileRepoPath::from("dir/file"); + let tree = testutils::create_tree( + &repo, + &[ + (&root_file_path, "file contents"), + (&dir_file_path, "dir/file contents"), + ], + ); + + let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone()) + .set_parents(vec![store.root_commit_id().clone()]) + .write_to_new_transaction(&repo, "test"); + + assert_eq!(commit.parents(), vec![store.root_commit()]); + assert_eq!(commit.predecessors(), vec![]); + assert_eq!(commit.is_open(), false); + assert_eq!(commit.description(), ""); + assert_eq!(commit.author().name, settings.user_name()); + assert_eq!(commit.author().email, settings.user_email()); + assert_eq!(commit.committer().name, settings.user_name()); + assert_eq!(commit.committer().email, settings.user_email()); + assert_eq!( + store.root_commit().tree().diff_summary(&commit.tree()), + DiffSummary { + modified: vec![], + added: vec![root_file_path, dir_file_path], + removed: vec![] + } + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_rewrite(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let root_file_path = FileRepoPath::from("file"); + let dir_file_path = FileRepoPath::from("dir/file"); + let initial_tree = testutils::create_tree( + &repo, + &[ + (&root_file_path, "file contents"), + (&dir_file_path, "dir/file contents"), + ], + ); + + let initial_commit = CommitBuilder::for_new_commit(&settings, store, initial_tree.id().clone()) + .set_parents(vec![store.root_commit_id().clone()]) + .write_to_new_transaction(&repo, "test"); + + let rewritten_tree = testutils::create_tree( + &repo, + &[ + (&root_file_path, "file contents"), + (&dir_file_path, "updated dir/file contents"), + ], + ); + + let mut config = config::Config::new(); + config.set("user.name", "Rewrite User").unwrap(); + config + .set("user.email", "rewrite.user@example.com") + .unwrap(); + let rewrite_settings = UserSettings::from_config(config); + let rewritten_commit = + CommitBuilder::for_rewrite_from(&rewrite_settings, store, &initial_commit) + .set_tree(rewritten_tree.id().clone()) + .write_to_new_transaction(&repo, "test"); + assert_eq!(rewritten_commit.parents(), vec![store.root_commit()]); + assert_eq!( + rewritten_commit.predecessors(), + vec![initial_commit.clone()] + ); + assert_eq!(rewritten_commit.is_open(), false); + assert_eq!(rewritten_commit.author().name, settings.user_name()); + assert_eq!(rewritten_commit.author().email, settings.user_email()); + assert_eq!( + rewritten_commit.committer().name, + rewrite_settings.user_name() + ); + assert_eq!( + rewritten_commit.committer().email, + rewrite_settings.user_email() + ); + assert_eq!( + store + .root_commit() + .tree() + .diff_summary(&rewritten_commit.tree()), + DiffSummary { + modified: vec![], + added: vec![root_file_path, dir_file_path.clone()], + removed: vec![] + } + ); + assert_eq!( + initial_commit.tree().diff_summary(&rewritten_commit.tree()), + DiffSummary { + modified: vec![dir_file_path], + added: vec![], + removed: vec![] + } + ); +} diff --git a/lib/tests/test_commit_concurrent.rs b/lib/tests/test_commit_concurrent.rs new file mode 100644 index 000000000..a78480769 --- /dev/null +++ b/lib/tests/test_commit_concurrent.rs @@ -0,0 +1,101 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::thread; + +use jj_lib::dag_walk; +use jj_lib::repo::{ReadonlyRepo, Repo}; +use jj_lib::testutils; +use std::sync::Arc; +use test_case::test_case; + +fn verify_view(repo: &ReadonlyRepo) { + let view = repo.view(); + let op_store = view.op_store(); + let op_head_id = view.base_op_head_id().clone(); + let mut num_ops = 0; + + // Count non-merge commits + for op_id in dag_walk::bfs( + vec![op_head_id], + Box::new(|op_id| op_id.clone()), + Box::new(|op_id| op_store.read_operation(&op_id).unwrap().parents), + ) { + if op_store.read_operation(&op_id).unwrap().parents.len() <= 1 { + num_ops += 1; + } + } + // One operation for initializing the repo (containing the root id and the + // initial working copy commit). + assert_eq!(num_ops, 101); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_commit_parallel(use_git: bool) { + // This loads a Repo instance and creates and commits many concurrent + // transactions from it. It then reloads the repo. That should merge all the + // operations and all commits should be visible. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut threads = vec![]; + for _ in 0..100 { + let settings = settings.clone(); + let repo = repo.clone(); + let handle = thread::spawn(move || { + testutils::create_random_commit(&settings, &repo) + .write_to_new_transaction(&repo, "test"); + }); + threads.push(handle); + } + for thread in threads { + thread.join().ok().unwrap(); + } + Arc::get_mut(&mut repo).unwrap().reload(); + // One commit per thread plus the commit from the initial checkout on top of the + // root commit + assert_eq!(repo.view().heads().count(), 101); + + verify_view(&repo); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_commit_parallel_instances(use_git: bool) { + // Like the test above but creates a new repo instance for every thread, which + // makes it behave very similar to separate processes. + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let mut threads = vec![]; + for _ in 0..100 { + let settings = settings.clone(); + let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + let handle = thread::spawn(move || { + testutils::create_random_commit(&settings, &repo) + .write_to_new_transaction(&repo, "test"); + }); + threads.push(handle); + } + for thread in threads { + thread.join().ok().unwrap(); + } + // One commit per thread plus the commit from the initial checkout on top of the + // root commit + let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + assert_eq!(repo.view().heads().count(), 101); + + verify_view(&repo); +} diff --git a/lib/tests/test_diff_summary.rs b/lib/tests/test_diff_summary.rs new file mode 100644 index 000000000..05bf03c20 --- /dev/null +++ b/lib/tests/test_diff_summary.rs @@ -0,0 +1,152 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::repo_path::FileRepoPath; +use jj_lib::testutils; +use jj_lib::tree::DiffSummary; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_types(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let clean_path = FileRepoPath::from("clean"); + let modified_path = FileRepoPath::from("modified"); + let added_path = FileRepoPath::from("added"); + let removed_path = FileRepoPath::from("removed"); + + let tree1 = testutils::create_tree( + &repo, + &[ + (&clean_path, "clean"), + (&modified_path, "contents before"), + (&removed_path, "removed contents"), + ], + ); + + let tree2 = testutils::create_tree( + &repo, + &[ + (&clean_path, "clean"), + (&modified_path, "contents after"), + (&added_path, "added contents"), + ], + ); + + assert_eq!( + tree1.diff_summary(&tree2), + DiffSummary { + modified: vec![modified_path], + added: vec![added_path], + removed: vec![removed_path] + } + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_tree_file_transition(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let dir_file_path = FileRepoPath::from("dir/file"); + let dir_path = FileRepoPath::from("dir"); + + let tree1 = testutils::create_tree(&repo, &[(&dir_file_path, "contents")]); + let tree2 = testutils::create_tree(&repo, &[(&dir_path, "contents")]); + + assert_eq!( + tree1.diff_summary(&tree2), + DiffSummary { + modified: vec![], + added: vec![dir_path.clone()], + removed: vec![dir_file_path.clone()] + } + ); + assert_eq!( + tree2.diff_summary(&tree1), + DiffSummary { + modified: vec![], + added: vec![dir_file_path], + removed: vec![dir_path] + } + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_sorting(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let a_path = FileRepoPath::from("a"); + let b_path = FileRepoPath::from("b"); + let f_a_path = FileRepoPath::from("f/a"); + let f_b_path = FileRepoPath::from("f/b"); + let f_f_a_path = FileRepoPath::from("f/f/a"); + let f_f_b_path = FileRepoPath::from("f/f/b"); + let n_path = FileRepoPath::from("n"); + let s_b_path = FileRepoPath::from("s/b"); + let z_path = FileRepoPath::from("z"); + + let tree1 = testutils::create_tree( + &repo, + &[ + (&a_path, "before"), + (&f_a_path, "before"), + (&f_f_a_path, "before"), + ], + ); + + let tree2 = testutils::create_tree( + &repo, + &[ + (&a_path, "after"), + (&b_path, "after"), + (&f_a_path, "after"), + (&f_b_path, "after"), + (&f_f_a_path, "after"), + (&f_f_b_path, "after"), + (&n_path, "after"), + (&s_b_path, "after"), + (&z_path, "after"), + ], + ); + + assert_eq!( + tree1.diff_summary(&tree2), + DiffSummary { + modified: vec![a_path.clone(), f_a_path.clone(), f_f_a_path.clone()], + added: vec![ + b_path.clone(), + n_path.clone(), + z_path.clone(), + f_b_path.clone(), + f_f_b_path.clone(), + s_b_path.clone(), + ], + removed: vec![] + } + ); + assert_eq!( + tree2.diff_summary(&tree1), + DiffSummary { + modified: vec![a_path, f_a_path, f_f_a_path], + added: vec![], + removed: vec![b_path, n_path, z_path, f_b_path, f_f_b_path, s_b_path,] + } + ); +} diff --git a/lib/tests/test_evolution.rs b/lib/tests/test_evolution.rs new file mode 100644 index 000000000..4ca8abe17 --- /dev/null +++ b/lib/tests/test_evolution.rs @@ -0,0 +1,630 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::commit::Commit; +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::evolution::evolve; +use jj_lib::evolution::EvolveListener; +use jj_lib::repo::{ReadonlyRepo, Repo}; +use jj_lib::repo_path::FileRepoPath; +use jj_lib::settings::UserSettings; +use jj_lib::testutils; +use test_case::test_case; + +#[must_use] +fn child_commit(settings: &UserSettings, repo: &ReadonlyRepo, commit: &Commit) -> CommitBuilder { + testutils::create_random_commit(&settings, repo).set_parents(vec![commit.id().clone()]) +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_obsolete_and_orphan(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // A commit without successors should not be obsolete and not an orphan. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + assert!(!tx.as_repo().evolution().is_obsolete(original.id())); + assert!(!tx.as_repo().evolution().is_orphan(original.id())); + + // A commit with a successor with a different change_id should not be obsolete. + let child = child_commit(&settings, &repo, &original).write_to_transaction(&mut tx); + let grandchild = child_commit(&settings, &repo, &child).write_to_transaction(&mut tx); + let cherry_picked = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert!(!tx.as_repo().evolution().is_obsolete(original.id())); + assert!(!tx.as_repo().evolution().is_orphan(original.id())); + assert!(!tx.as_repo().evolution().is_obsolete(child.id())); + assert!(!tx.as_repo().evolution().is_orphan(child.id())); + + // A commit with a successor with the same change_id should be obsolete. + let rewritten = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + assert!(tx.as_repo().evolution().is_obsolete(original.id())); + assert!(!tx.as_repo().evolution().is_obsolete(child.id())); + assert!(tx.as_repo().evolution().is_orphan(child.id())); + assert!(tx.as_repo().evolution().is_orphan(grandchild.id())); + assert!(!tx.as_repo().evolution().is_obsolete(cherry_picked.id())); + assert!(!tx.as_repo().evolution().is_orphan(cherry_picked.id())); + assert!(!tx.as_repo().evolution().is_obsolete(rewritten.id())); + assert!(!tx.as_repo().evolution().is_orphan(rewritten.id())); + + // It should no longer be obsolete if we remove the successor. + tx.remove_head(&rewritten); + assert!(!tx.as_repo().evolution().is_obsolete(original.id())); + assert!(!tx.as_repo().evolution().is_orphan(child.id())); + assert!(!tx.as_repo().evolution().is_orphan(grandchild.id())); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_divergent(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // A single commit should not be divergent + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + assert!(!tx.as_repo().evolution().is_obsolete(original.id())); + + // Successors with different change id are not divergent + let cherry_picked1 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let cherry_picked2 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert!(!tx.as_repo().evolution().is_divergent(original.change_id())); + assert!(!tx + .as_repo() + .evolution() + .is_divergent(cherry_picked1.change_id())); + assert!(!tx + .as_repo() + .evolution() + .is_divergent(cherry_picked2.change_id())); + + // Commits with the same change id are divergent, including the original commit + // (it's the change that's is divergent) + let rewritten1 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + assert!(tx.as_repo().evolution().is_divergent(original.change_id())); + assert!(tx + .as_repo() + .evolution() + .is_divergent(rewritten1.change_id())); + assert!(tx + .as_repo() + .evolution() + .is_divergent(rewritten2.change_id())); + tx.discard(); +} + +// TODO: Create a #[repo_test] proc macro that injects the `settings` and `repo` +// variables into the test function +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_rewritten(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // After a simple rewrite, the new parent is the successor. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![rewritten.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_cherry_picked(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // A successor with a different change id has no effect. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _cherry_picked = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![original.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_is_pruned(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit's successor is pruned, the new parent is the parent of the + // pruned commit. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten = child_commit(&settings, &repo, &new_parent) + .set_pruned(true) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![new_parent.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_divergent(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit has multiple successors, then they will all be returned. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten3 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![ + rewritten1.id().clone(), + rewritten2.id().clone(), + rewritten3.id().clone() + ] + .into_iter() + .collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_divergent_one_not_pruned(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit has multiple successors, then they will all be returned, even if + // all but one are pruned (the parents of the pruned commits, not the pruned + // commits themselves). + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &root_commit) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let parent2 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten2 = child_commit(&settings, &repo, &parent2) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .set_pruned(true) + .write_to_transaction(&mut tx); + let parent3 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten3 = child_commit(&settings, &repo, &parent3) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .set_pruned(true) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![ + rewritten1.id().clone(), + parent2.id().clone(), + parent3.id().clone() + ] + .into_iter() + .collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_divergent_all_pruned(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit has multiple successors, then they will all be returned, even if + // they are all pruned (the parents of the pruned commits, not the pruned + // commits themselves). + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let parent1 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten1 = child_commit(&settings, &repo, &parent1) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .set_pruned(true) + .write_to_transaction(&mut tx); + let parent2 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten2 = child_commit(&settings, &repo, &parent2) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .set_pruned(true) + .write_to_transaction(&mut tx); + let parent3 = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let _rewritten3 = child_commit(&settings, &repo, &parent3) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .set_pruned(true) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![ + parent1.id().clone(), + parent2.id().clone(), + parent3.id().clone() + ] + .into_iter() + .collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_split(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit was split, the new parent is the tip-most rewritten + // commit. Here we let the middle commit inherit the change id, but it shouldn't + // matter which one inherits it. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &new_parent) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &rewritten1) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten3 = child_commit(&settings, &repo, &rewritten2) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![rewritten3.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_split_pruned_descendant(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit was split and the tip-most successor became pruned, + // we use that that descendant's parent. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &new_parent) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &rewritten1) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let rewritten3 = child_commit(&settings, &repo, &rewritten2) + .set_pruned(true) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let _rewritten4 = child_commit(&settings, &repo, &rewritten3) + .set_pruned(true) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![rewritten2.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_split_forked(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit was split and the successors were split up across topological + // branches, we return only the descendants from the branch with the same + // change id (we can't tell a split from two unrelated rewrites and cherry-picks + // anyway). + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &new_parent) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &rewritten1) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let rewritten3 = child_commit(&settings, &repo, &rewritten1) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let _rewritten4 = child_commit(&settings, &repo, &original) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![rewritten2.id().clone(), rewritten3.id().clone()] + .into_iter() + .collect() + ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_new_parent_split_forked_pruned(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + + // If a commit was split and the successors were split up across topological + // branches and some commits were pruned, we won't return a parent of the pruned + // commit if the parent is an ancestor of another commit we'd return. + let original = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let new_parent = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let rewritten1 = child_commit(&settings, &repo, &new_parent) + .set_predecessors(vec![original.id().clone()]) + .set_change_id(original.change_id().clone()) + .write_to_transaction(&mut tx); + let rewritten2 = child_commit(&settings, &repo, &rewritten1) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let rewritten3 = child_commit(&settings, &repo, &rewritten2) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + let _rewritten4 = child_commit(&settings, &repo, &rewritten1) + .set_pruned(true) + .set_predecessors(vec![original.id().clone()]) + .write_to_transaction(&mut tx); + assert_eq!( + tx.as_repo().evolution().new_parent(original.id()), + vec![rewritten3.id().clone()].into_iter().collect() + ); + tx.discard(); +} + +struct RecordingEvolveListener { + evolved_orphans: Vec<(Commit, Commit)>, + evolved_divergents: Vec<(Vec, Commit)>, +} + +impl Default for RecordingEvolveListener { + fn default() -> Self { + RecordingEvolveListener { + evolved_orphans: Default::default(), + evolved_divergents: Default::default(), + } + } +} + +impl EvolveListener for RecordingEvolveListener { + fn orphan_evolved(&mut self, orphan: &Commit, new_commit: &Commit) { + self.evolved_orphans + .push((orphan.clone(), new_commit.clone())); + } + + fn orphan_target_ambiguous(&mut self, _orphan: &Commit) { + // TODO: Record this too and add tests + panic!("unexpected call to orphan_target_ambiguous"); + } + + fn divergent_resolved(&mut self, sources: &[Commit], resolved: &Commit) { + self.evolved_divergents + .push((sources.iter().cloned().collect(), resolved.clone())); + } + + fn divergent_no_common_predecessor(&mut self, _commit1: &Commit, _commit2: &Commit) { + // TODO: Record this too and add tests + panic!("unexpected call to divergent_no_common_predecessor"); + } +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_evolve_orphan(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + + let mut tx = repo.start_transaction("test"); + let initial = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let child = child_commit(&settings, &repo, &initial).write_to_transaction(&mut tx); + let grandchild = child_commit(&settings, &repo, &child).write_to_transaction(&mut tx); + + let rewritten = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial) + .set_description("rewritten".to_string()) + .write_to_transaction(&mut tx); + + let mut listener = RecordingEvolveListener::default(); + evolve(&settings, &mut tx, &mut listener); + assert_eq!(listener.evolved_divergents.len(), 0); + assert_eq!(listener.evolved_orphans.len(), 2); + assert_eq!(&listener.evolved_orphans[0].0, &child); + assert_eq!(&listener.evolved_orphans[0].1.parents(), &vec![rewritten]); + assert_eq!(&listener.evolved_orphans[1].0, &grandchild); + // TODO: the grandchild currently doesn't get rebased onto the rewritten child + // assert_eq!( + // &listener.evolved_orphans[1].1.parents(), + // &vec![listener.evolved_orphans[0].1.clone()] + // ); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_evolve_pruned_orphan(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let root_commit = repo.store().root_commit(); + + let mut tx = repo.start_transaction("test"); + let initial = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + // Create a pruned child and a non-pruned child to show that the pruned one does + // not get evolved (the non-pruned one is there to show that the setup is not + // broken). + let child = child_commit(&settings, &repo, &initial).write_to_transaction(&mut tx); + let _pruned_child = child_commit(&settings, &repo, &initial) + .set_pruned(true) + .write_to_transaction(&mut tx); + let _rewritten = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial) + .set_description("rewritten".to_string()) + .write_to_transaction(&mut tx); + + let mut listener = RecordingEvolveListener::default(); + evolve(&settings, &mut tx, &mut listener); + assert_eq!(listener.evolved_divergents.len(), 0); + assert_eq!(listener.evolved_orphans.len(), 1); + assert_eq!(listener.evolved_orphans[0].0.id(), child.id()); + + tx.discard(); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_evolve_divergent(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + let root_commit = store.root_commit(); + + let mut tx = repo.start_transaction("test"); + + // Set up a repo like this: + // + // x 6 add files X and Z (divergent commit 2) + // o 5 add file A, contents C + // | x 4 add files X and Y (divergent commit 1) + // | o 3 add file A, contents B + // |/ + // | x 2 add file X (source of divergence) + // | o 1 add file A, contents A + // |/ + // o root + // + // Resolving the divergence should result in a new commit on top of 5 (because + // commit 6 has a later commit time than commit 4). It should have files C, + // X, Y, Z. + + let path_a = FileRepoPath::from("A"); + let path_x = FileRepoPath::from("X"); + let path_y = FileRepoPath::from("Y"); + let path_z = FileRepoPath::from("Z"); + let tree1 = testutils::create_tree(&repo, &[(&path_a, "A")]); + let tree2 = testutils::create_tree(&repo, &[(&path_a, "A"), (&path_x, "X")]); + let tree3 = testutils::create_tree(&repo, &[(&path_a, "B")]); + let tree4 = testutils::create_tree(&repo, &[(&path_a, "B"), (&path_x, "X"), (&path_y, "Y")]); + let tree5 = testutils::create_tree(&repo, &[(&path_a, "C")]); + let tree6 = testutils::create_tree(&repo, &[(&path_a, "C"), (&path_x, "X"), (&path_z, "Z")]); + + let commit1 = CommitBuilder::for_new_commit(&settings, repo.store(), tree1.id().clone()) + .set_parents(vec![root_commit.id().clone()]) + .set_description("add file A, contents A".to_string()) + .write_to_transaction(&mut tx); + let commit3 = CommitBuilder::for_new_commit(&settings, repo.store(), tree3.id().clone()) + .set_parents(vec![root_commit.id().clone()]) + .set_description("add file A, contents B".to_string()) + .write_to_transaction(&mut tx); + let commit5 = CommitBuilder::for_new_commit(&settings, repo.store(), tree5.id().clone()) + .set_parents(vec![root_commit.id().clone()]) + .set_description("add file A, contents C".to_string()) + .write_to_transaction(&mut tx); + let commit2 = CommitBuilder::for_new_commit(&settings, repo.store(), tree2.id().clone()) + .set_parents(vec![commit1.id().clone()]) + .set_description("add file X".to_string()) + .write_to_transaction(&mut tx); + let commit4 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &commit2) + .set_parents(vec![commit3.id().clone()]) + .set_tree(tree4.id().clone()) + .set_description("add files X and Y".to_string()) + .write_to_transaction(&mut tx); + let mut later_time = commit4.committer().clone(); + later_time.timestamp.timestamp.0 += 1; + let commit6 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &commit2) + .set_parents(vec![commit5.id().clone()]) + .set_tree(tree6.id().clone()) + .set_description("add files X and Z".to_string()) + .set_committer(later_time) + .write_to_transaction(&mut tx); + + let mut listener = RecordingEvolveListener::default(); + evolve(&settings, &mut tx, &mut listener); + assert_eq!(listener.evolved_orphans.len(), 0); + assert_eq!(listener.evolved_divergents.len(), 1); + assert_eq!( + listener.evolved_divergents[0].0, + &[commit6.clone(), commit4.clone()] + ); + let resolved = listener.evolved_divergents[0].1.clone(); + assert_eq!(resolved.predecessors(), &[commit6.clone(), commit4.clone()]); + + let tree = resolved.tree(); + let entries: Vec<_> = tree.entries().collect(); + assert_eq!(entries.len(), 4); + assert_eq!(tree.value("A").unwrap(), tree5.value("A").unwrap()); + assert_eq!(tree.value("X").unwrap(), tree2.value("X").unwrap()); + assert_eq!(tree.value("Y").unwrap(), tree4.value("Y").unwrap()); + assert_eq!(tree.value("Z").unwrap(), tree6.value("Z").unwrap()); + + tx.discard(); +} diff --git a/lib/tests/test_index.rs b/lib/tests/test_index.rs new file mode 100644 index 000000000..359713c7a --- /dev/null +++ b/lib/tests/test_index.rs @@ -0,0 +1,390 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::commit::Commit; +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::index::CompositeIndex; +use jj_lib::repo::ReadonlyRepo; +use jj_lib::settings::UserSettings; +use jj_lib::store::CommitId; +use jj_lib::testutils; +use std::sync::Arc; +use test_case::test_case; + +#[must_use] +fn child_commit(settings: &UserSettings, repo: &ReadonlyRepo, commit: &Commit) -> CommitBuilder { + testutils::create_random_commit(&settings, repo).set_parents(vec![commit.id().clone()]) +} + +// Helper just to reduce line wrapping +fn generation_number(index: &CompositeIndex, commit_id: &CommitId) -> u32 { + index.entry_by_id(commit_id).unwrap().generation_number() +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_empty_repo(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit + assert_eq!(index.num_commits(), 2); + + // Check the generation numbers of the root and the working copy + assert_eq!(generation_number(&index, repo.store().root_commit_id()), 0); + assert_eq!( + generation_number(&index, &repo.working_copy_locked().current_commit_id()), + 1 + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_standard_cases(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + // o H + // o | G + // o | F + // |\| + // | o E + // | o D + // | o C + // o | B + // |/ + // o A + // | o working copy + // |/ + // o root + + let root_commit = repo.store().root_commit(); + let wc_commit = repo.working_copy_locked().current_commit(); + let mut tx = repo.start_transaction("test"); + let commit_a = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx); + let commit_c = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx); + let commit_d = child_commit(&settings, &repo, &commit_c).write_to_transaction(&mut tx); + let commit_e = child_commit(&settings, &repo, &commit_d).write_to_transaction(&mut tx); + let commit_f = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![commit_b.id().clone(), commit_e.id().clone()]) + .write_to_transaction(&mut tx); + let commit_g = child_commit(&settings, &repo, &commit_f).write_to_transaction(&mut tx); + let commit_h = child_commit(&settings, &repo, &commit_e).write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 8 more + assert_eq!(index.num_commits(), 2 + 8); + + let stats = index.stats(); + assert_eq!(stats.num_commits, 2 + 8); + assert_eq!(stats.num_merges, 1); + assert_eq!(stats.max_generation_number, 6); + + assert_eq!(generation_number(&index, root_commit.id()), 0); + assert_eq!(generation_number(&index, wc_commit.id()), 1); + assert_eq!(generation_number(&index, commit_a.id()), 1); + assert_eq!(generation_number(&index, commit_b.id()), 2); + assert_eq!(generation_number(&index, commit_c.id()), 2); + assert_eq!(generation_number(&index, commit_d.id()), 3); + assert_eq!(generation_number(&index, commit_e.id()), 4); + assert_eq!(generation_number(&index, commit_f.id()), 5); + assert_eq!(generation_number(&index, commit_g.id()), 6); + assert_eq!(generation_number(&index, commit_h.id()), 5); + + assert!(index.is_ancestor(root_commit.id(), commit_a.id())); + assert!(!index.is_ancestor(commit_a.id(), root_commit.id())); + + assert!(index.is_ancestor(root_commit.id(), commit_b.id())); + assert!(!index.is_ancestor(commit_b.id(), root_commit.id())); + + assert!(!index.is_ancestor(commit_b.id(), commit_c.id())); + + assert!(index.is_ancestor(commit_a.id(), commit_b.id())); + assert!(index.is_ancestor(commit_a.id(), commit_e.id())); + assert!(index.is_ancestor(commit_a.id(), commit_f.id())); + assert!(index.is_ancestor(commit_a.id(), commit_g.id())); + assert!(index.is_ancestor(commit_a.id(), commit_h.id())); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_criss_cross(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let num_generations = 50; + let root_commit = repo.store().root_commit(); + + // Create a long chain of criss-crossed merges. If they were traversed without + // keeping track of visited nodes, it would be 2^50 visits, so if this test + // finishes in reasonable time, we know that we don't do a naive traversal. + let mut tx = repo.start_transaction("test"); + let mut left_commits = + vec![child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx)]; + let mut right_commits = + vec![child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx)]; + for gen in 1..num_generations { + let new_left = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![ + left_commits[gen - 1].id().clone(), + right_commits[gen - 1].id().clone(), + ]) + .write_to_transaction(&mut tx); + let new_right = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![ + left_commits[gen - 1].id().clone(), + right_commits[gen - 1].id().clone(), + ]) + .write_to_transaction(&mut tx); + left_commits.push(new_left); + right_commits.push(new_right); + } + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should the root commit and the working copy commit, plus 2 for each + // generation + assert_eq!(index.num_commits(), 2 + 2 * (num_generations as u32)); + + let stats = index.stats(); + assert_eq!(stats.num_commits, 2 + 2 * (num_generations as u32)); + // The first generations are not merges + assert_eq!(stats.num_merges, 2 * (num_generations as u32 - 1)); + assert_eq!(stats.max_generation_number, num_generations as u32); + + // Check generation numbers + for gen in 0..num_generations { + assert_eq!( + generation_number(&index, left_commits[gen].id()), + (gen as u32) + 1 + ); + assert_eq!( + generation_number(&index, right_commits[gen].id()), + (gen as u32) + 1 + ); + } + + // The left and right commits of the same generation should not be ancestors of + // each other + for gen in 0..num_generations { + assert!(!index.is_ancestor(left_commits[gen].id(), right_commits[gen].id())); + assert!(!index.is_ancestor(right_commits[gen].id(), left_commits[gen].id())); + } + + // Both sides of earlier generations should be ancestors. Check a few different + // earlier generations. + for gen in 1..num_generations { + for ancestor_side in &[&left_commits, &right_commits] { + for descendant_side in &[&left_commits, &right_commits] { + assert!(index.is_ancestor(ancestor_side[0].id(), descendant_side[gen].id())); + assert!(index.is_ancestor(ancestor_side[gen - 1].id(), descendant_side[gen].id())); + assert!(index.is_ancestor(ancestor_side[gen / 2].id(), descendant_side[gen].id())); + } + } + } + + assert_eq!( + index + .walk_revs(&[left_commits[num_generations - 1].id().clone()], &[]) + .count(), + 2 * num_generations + ); + assert_eq!( + index + .walk_revs(&[right_commits[num_generations - 1].id().clone()], &[]) + .count(), + 2 * num_generations + ); + assert_eq!( + index + .walk_revs( + &[left_commits[num_generations - 1].id().clone()], + &[left_commits[num_generations - 2].id().clone()] + ) + .count(), + 2 + ); + assert_eq!( + index + .walk_revs( + &[right_commits[num_generations - 1].id().clone()], + &[right_commits[num_generations - 2].id().clone()] + ) + .count(), + 2 + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_previous_operations(use_git: bool) { + // Test that commits visible only in previous operations are indexed. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + // Remove commit B and C in one operation and make sure they're still + // visible in the index after that operation. + // o C + // o B + // o A + // | o working copy + // |/ + // o root + + let root_commit = repo.store().root_commit(); + let mut tx = repo.start_transaction("test"); + let commit_a = child_commit(&settings, &repo, &root_commit).write_to_transaction(&mut tx); + let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx); + let commit_c = child_commit(&settings, &repo, &commit_b).write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + tx.remove_head(&commit_c); + tx.remove_head(&commit_b); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + // Delete index from disk + let index_operations_dir = repo + .working_copy_path() + .join(".jj") + .join("index") + .join("operations"); + assert!(index_operations_dir.is_dir()); + std::fs::remove_dir_all(&index_operations_dir).unwrap(); + std::fs::create_dir(&index_operations_dir).unwrap(); + + let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 3 more + assert_eq!(index.num_commits(), 2 + 3); + + let stats = index.stats(); + assert_eq!(stats.num_commits, 2 + 3); + assert_eq!(stats.num_merges, 0); + assert_eq!(stats.max_generation_number, 3); + + assert_eq!(generation_number(&index, commit_a.id()), 1); + assert_eq!(generation_number(&index, commit_b.id()), 2); + assert_eq!(generation_number(&index, commit_c.id()), 3); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_incremental(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + // Create A in one operation, then B and C in another. Check that the index is + // valid after. + // o C + // o B + // o A + // | o working copy + // |/ + // o root + + let root_commit = repo.store().root_commit(); + let commit_a = + child_commit(&settings, &repo, &root_commit).write_to_new_transaction(&repo, "test"); + Arc::get_mut(&mut repo).unwrap().reload(); + + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 1 more + assert_eq!(index.num_commits(), 2 + 1); + + let mut tx = repo.start_transaction("test"); + let commit_b = child_commit(&settings, &repo, &commit_a).write_to_transaction(&mut tx); + let commit_c = child_commit(&settings, &repo, &commit_b).write_to_transaction(&mut tx); + tx.commit(); + + let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 3 more + assert_eq!(index.num_commits(), 2 + 3); + + let stats = index.stats(); + assert_eq!(stats.num_commits, 2 + 3); + assert_eq!(stats.num_merges, 0); + assert_eq!(stats.max_generation_number, 3); + assert_eq!(stats.levels.len(), 2); + assert_eq!(stats.levels[0].num_commits, 2); + assert_eq!(stats.levels[1].num_commits, 3); + + assert_eq!(generation_number(&index, root_commit.id()), 0); + assert_eq!(generation_number(&index, commit_a.id()), 1); + assert_eq!(generation_number(&index, commit_b.id()), 2); + assert_eq!(generation_number(&index, commit_c.id()), 3); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_index_commits_incremental_empty_transaction(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + // Create A in one operation, then just an empty transaction. Check that the + // index is valid after. + // o A + // | o working copy + // |/ + // o root + + let root_commit = repo.store().root_commit(); + let commit_a = + child_commit(&settings, &repo, &root_commit).write_to_new_transaction(&repo, "test"); + Arc::get_mut(&mut repo).unwrap().reload(); + + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 1 more + assert_eq!(index.num_commits(), 2 + 1); + + repo.start_transaction("test").commit(); + + let repo = ReadonlyRepo::load(&settings, repo.working_copy_path().clone()); + let index = repo.index().index_file(); + let index = index.as_composite(); + // There should be the root commit and the working copy commit, plus + // 1 more + assert_eq!(index.num_commits(), 2 + 1); + + let stats = index.stats(); + assert_eq!(stats.num_commits, 2 + 1); + assert_eq!(stats.num_merges, 0); + assert_eq!(stats.max_generation_number, 1); + assert_eq!(stats.levels.len(), 2); + assert_eq!(stats.levels[0].num_commits, 0); + assert_eq!(stats.levels[1].num_commits, 3); + + assert_eq!(generation_number(&index, root_commit.id()), 0); + assert_eq!(generation_number(&index, commit_a.id()), 1); +} diff --git a/lib/tests/test_merge_trees.rs b/lib/tests/test_merge_trees.rs new file mode 100644 index 000000000..66270d8aa --- /dev/null +++ b/lib/tests/test_merge_trees.rs @@ -0,0 +1,473 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::repo_path::{DirRepoPath, FileRepoPath, RepoPath}; +use jj_lib::store::{ConflictPart, TreeValue}; +use jj_lib::testutils; +use jj_lib::tree::Tree; +use jj_lib::trees; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_same_type(use_git: bool) { + // Tests all possible cases where the entry type is unchanged, specifically + // using only normal files in all trees (no symlinks, no trees, etc.). + + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + // The file name encodes the state in the base and in each side ("_" means + // missing) + let files = vec![ + "__a", // side 2 added + "_a_", // side 1 added + "_aa", // both sides added, same content + "_ab", // both sides added, different content + "a__", // both sides removed + "a_a", // side 1 removed + "a_b", // side 1 removed, side 2 modified + "aa_", // side 2 removed + "aaa", // no changes + "aab", // side 2 modified + "ab_", // side 1 modified, side 2 removed + "aba", // side 1 modified + "abb", // both sides modified, same content + "abc", // both sides modified, different content + ]; + + let write_tree = |index: usize| -> Tree { + let mut tree_builder = store.tree_builder(store.empty_tree_id().clone()); + for path in &files { + let contents = &path[index..index + 1]; + if contents != "_" { + testutils::write_normal_file( + &mut tree_builder, + &FileRepoPath::from(*path), + contents, + ); + } + } + let tree_id = tree_builder.write_tree(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + + let base_tree = write_tree(0); + let side1_tree = write_tree(1); + let side2_tree = write_tree(2); + + // Created the merged tree + let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap(); + let merged_tree = store + .get_tree(&DirRepoPath::root(), &merged_tree_id) + .unwrap(); + + // Check that we have exactly the paths we expect in the merged tree + let names: Vec<&str> = merged_tree + .entries() + .map(|entry| entry.name().as_ref()) + .collect(); + assert_eq!( + names, + vec!["__a", "_a_", "_aa", "_ab", "a_b", "aaa", "aab", "ab_", "aba", "abb", "abc",] + ); + + // Check that the simple, non-conflicting cases were resolved correctly + assert_eq!(merged_tree.value("__a"), side2_tree.value("__a")); + assert_eq!(merged_tree.value("_a_"), side1_tree.value("_a_")); + assert_eq!(merged_tree.value("_aa"), side1_tree.value("_aa")); + assert_eq!(merged_tree.value("aaa"), side1_tree.value("aaa")); + assert_eq!(merged_tree.value("aab"), side2_tree.value("aab")); + assert_eq!(merged_tree.value("aba"), side1_tree.value("aba")); + assert_eq!(merged_tree.value("abb"), side1_tree.value("abb")); + + // Check the conflicting cases + match merged_tree.value("_ab").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: side1_tree.value("_ab").cloned().unwrap() + }, + ConflictPart { + value: side2_tree.value("_ab").cloned().unwrap() + } + ] + ); + assert!(conflict.removes.is_empty()); + } + _ => panic!("unexpected value"), + }; + match merged_tree.value("a_b").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("a_b").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ConflictPart { + value: side2_tree.value("a_b").cloned().unwrap() + }] + ); + } + _ => panic!("unexpected value"), + }; + match merged_tree.value("ab_").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("ab_").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ConflictPart { + value: side1_tree.value("ab_").cloned().unwrap() + }] + ); + } + _ => panic!("unexpected value"), + }; + match merged_tree.value("abc").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("abc").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: side1_tree.value("abc").cloned().unwrap() + }, + ConflictPart { + value: side2_tree.value("abc").cloned().unwrap() + } + ] + ); + } + _ => panic!("unexpected value"), + }; +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_subtrees(use_git: bool) { + // Tests that subtrees are merged. + + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let write_tree = |paths: Vec<&str>| -> Tree { + let mut tree_builder = store.tree_builder(store.empty_tree_id().clone()); + for path in paths { + testutils::write_normal_file( + &mut tree_builder, + &FileRepoPath::from(path), + &format!("contents of {:?}", path), + ); + } + let tree_id = tree_builder.write_tree(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + + let base_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/f1", "d1/d1/d1/f1"]); + let side1_tree = write_tree(vec![ + "f1", + "f2", + "d1/f1", + "d1/f2", + "d1/d1/f1", + "d1/d1/d1/f1", + ]); + let side2_tree = write_tree(vec![ + "f1", + "d1/f1", + "d1/d1/f1", + "d1/d1/d1/f1", + "d1/d1/d1/f2", + ]); + + let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap(); + let merged_tree = store + .get_tree(&DirRepoPath::root(), &merged_tree_id) + .unwrap(); + let entries: Vec<_> = merged_tree.entries().collect(); + + let expected_tree = write_tree(vec![ + "f1", + "f2", + "d1/f1", + "d1/f2", + "d1/d1/f1", + "d1/d1/d1/f1", + "d1/d1/d1/f2", + ]); + let expected_entries: Vec<_> = expected_tree.entries().collect(); + assert_eq!(entries, expected_entries); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_subtree_becomes_empty(use_git: bool) { + // Tests that subtrees that become empty are removed from the parent tree. + + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let write_tree = |paths: Vec<&str>| -> Tree { + let mut tree_builder = store.tree_builder(store.empty_tree_id().clone()); + for path in paths { + testutils::write_normal_file( + &mut tree_builder, + &FileRepoPath::from(path), + &format!("contents of {:?}", path), + ); + } + let tree_id = tree_builder.write_tree(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + + let base_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/d1/f1", "d1/d1/d1/f2"]); + let side1_tree = write_tree(vec!["f1", "d1/f1", "d1/d1/d1/f1"]); + let side2_tree = write_tree(vec!["d1/d1/d1/f2"]); + + let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap(); + let merged_tree = store + .get_tree(&DirRepoPath::root(), &merged_tree_id) + .unwrap(); + assert_eq!(merged_tree.id(), store.empty_tree_id()); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_types(use_git: bool) { + // Tests conflicts between different types. This is mostly to test that the + // conflicts survive the roundtrip to the store. + + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let mut base_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + let mut side1_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + let mut side2_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + testutils::write_normal_file( + &mut base_tree_builder, + &FileRepoPath::from("normal_executable_symlink"), + "contents", + ); + testutils::write_executable_file( + &mut side1_tree_builder, + &FileRepoPath::from("normal_executable_symlink"), + "contents", + ); + testutils::write_symlink( + &mut side2_tree_builder, + &FileRepoPath::from("normal_executable_symlink"), + "contents", + ); + let tree_id = store.empty_tree_id().clone(); + base_tree_builder.set( + RepoPath::from("tree_normal_symlink"), + TreeValue::Tree(tree_id), + ); + testutils::write_normal_file( + &mut side1_tree_builder, + &FileRepoPath::from("tree_normal_symlink"), + "contents", + ); + testutils::write_symlink( + &mut side2_tree_builder, + &FileRepoPath::from("tree_normal_symlink"), + "contents", + ); + let base_tree_id = base_tree_builder.write_tree(); + let base_tree = store.get_tree(&DirRepoPath::root(), &base_tree_id).unwrap(); + let side1_tree_id = side1_tree_builder.write_tree(); + let side1_tree = store + .get_tree(&DirRepoPath::root(), &side1_tree_id) + .unwrap(); + let side2_tree_id = side2_tree_builder.write_tree(); + let side2_tree = store + .get_tree(&DirRepoPath::root(), &side2_tree_id) + .unwrap(); + + // Created the merged tree + let merged_tree_id = trees::merge_trees(&side1_tree, &base_tree, &side2_tree).unwrap(); + let merged_tree = store + .get_tree(&DirRepoPath::root(), &merged_tree_id) + .unwrap(); + + // Check the conflicting cases + match merged_tree.value("normal_executable_symlink").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(&id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree + .value("normal_executable_symlink") + .cloned() + .unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: side1_tree + .value("normal_executable_symlink") + .cloned() + .unwrap() + }, + ConflictPart { + value: side2_tree + .value("normal_executable_symlink") + .cloned() + .unwrap() + }, + ] + ); + } + _ => panic!("unexpected value"), + }; + match merged_tree.value("tree_normal_symlink").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("tree_normal_symlink").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: side1_tree.value("tree_normal_symlink").cloned().unwrap() + }, + ConflictPart { + value: side2_tree.value("tree_normal_symlink").cloned().unwrap() + }, + ] + ); + } + _ => panic!("unexpected value"), + }; +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_simplify_conflict(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let write_tree = |contents: &str| -> Tree { + testutils::create_tree(&repo, &[(&FileRepoPath::from("file"), contents)]) + }; + + let base_tree = write_tree("base contents"); + let branch_tree = write_tree("branch contents"); + let upstream1_tree = write_tree("upstream1 contents"); + let upstream2_tree = write_tree("upstream2 contents"); + + let merge_trees = |base: &Tree, side1: &Tree, side2: &Tree| -> Tree { + let tree_id = trees::merge_trees(&side1, &base, &side2).unwrap(); + store.get_tree(&DirRepoPath::root(), &tree_id).unwrap() + }; + + // Rebase the branch tree to the first upstream tree + let rebased1_tree = merge_trees(&base_tree, &branch_tree, &upstream1_tree); + // Make sure we have a conflict (testing the test setup) + match rebased1_tree.value("file").unwrap() { + TreeValue::Conflict(_) => { + // expected + } + _ => panic!("unexpected value"), + }; + + // Rebase the rebased tree back to the base. The conflict should be gone. Try + // both directions. + let rebased_back_tree = merge_trees(&upstream1_tree, &rebased1_tree, &base_tree); + assert_eq!(rebased_back_tree.value("file"), branch_tree.value("file")); + let rebased_back_tree = merge_trees(&upstream1_tree, &base_tree, &rebased1_tree); + assert_eq!(rebased_back_tree.value("file"), branch_tree.value("file")); + + // Rebase the rebased tree further upstream. The conflict should be simplified + // to not mention the contents from the first rebase. + let further_rebased_tree = merge_trees(&upstream1_tree, &rebased1_tree, &upstream2_tree); + match further_rebased_tree.value("file").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("file").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: branch_tree.value("file").cloned().unwrap() + }, + ConflictPart { + value: upstream2_tree.value("file").cloned().unwrap() + }, + ] + ); + } + _ => panic!("unexpected value"), + }; + let further_rebased_tree = merge_trees(&upstream1_tree, &upstream2_tree, &rebased1_tree); + match further_rebased_tree.value("file").unwrap() { + TreeValue::Conflict(id) => { + let conflict = store.read_conflict(id).unwrap(); + assert_eq!( + conflict.removes, + vec![ConflictPart { + value: base_tree.value("file").cloned().unwrap() + }] + ); + assert_eq!( + conflict.adds, + vec![ + ConflictPart { + value: upstream2_tree.value("file").cloned().unwrap() + }, + ConflictPart { + value: branch_tree.value("file").cloned().unwrap() + }, + ] + ); + } + _ => panic!("unexpected value"), + }; +} diff --git a/lib/tests/test_operations.rs b/lib/tests/test_operations.rs new file mode 100644 index 000000000..3f754e635 --- /dev/null +++ b/lib/tests/test_operations.rs @@ -0,0 +1,171 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::repo::Repo; +use jj_lib::store::CommitId; +use jj_lib::testutils; +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; +use test_case::test_case; + +fn list_dir(dir: &Path) -> Vec { + std::fs::read_dir(dir) + .unwrap() + .map(|entry| entry.unwrap().file_name().to_str().unwrap().to_owned()) + .collect() +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_consecutive_operations(use_git: bool) { + // Test that consecutive operations result in a single op-head on disk after + // each operation + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let op_heads_dir = repo.repo_path().join("view").join("op_heads"); + let op_head_id0 = repo.view().base_op_head_id().clone(); + assert_eq!( + list_dir(&op_heads_dir), + vec![repo.view().base_op_head_id().hex()] + ); + + let mut tx1 = repo.start_transaction("transaction 1"); + testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1); + let op_head_id1 = tx1.commit().id().clone(); + assert_ne!(op_head_id1, op_head_id0); + assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]); + + Arc::get_mut(&mut repo).unwrap().reload(); + let mut tx2 = repo.start_transaction("transaction 2"); + testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2); + let op_head_id2 = tx2.commit().id().clone(); + assert_ne!(op_head_id2, op_head_id0); + assert_ne!(op_head_id2, op_head_id1); + assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]); + + // Reloading the repo makes no difference (there are no conflicting operations + // to resolve). + Arc::get_mut(&mut repo).unwrap().reload(); + assert_eq!(list_dir(&op_heads_dir), vec![op_head_id2.hex()]); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_concurrent_operations(use_git: bool) { + // Test that consecutive operations result in multiple op-heads on disk until + // the repo has been reloaded (which currently happens right away). + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let op_heads_dir = repo.repo_path().join("view").join("op_heads"); + let op_head_id0 = repo.view().base_op_head_id().clone(); + assert_eq!( + list_dir(&op_heads_dir), + vec![repo.view().base_op_head_id().hex()] + ); + + let mut tx1 = repo.start_transaction("transaction 1"); + testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx1); + let op_head_id1 = tx1.commit().id().clone(); + assert_ne!(op_head_id1, op_head_id0); + assert_eq!(list_dir(&op_heads_dir), vec![op_head_id1.hex()]); + + // After both transactions have committed, we should have two op-heads on disk, + // since they were run in parallel. + let mut tx2 = repo.start_transaction("transaction 2"); + testutils::create_random_commit(&settings, &repo).write_to_transaction(&mut tx2); + let op_head_id2 = tx2.commit().id().clone(); + assert_ne!(op_head_id2, op_head_id0); + assert_ne!(op_head_id2, op_head_id1); + let mut actual_heads_on_disk = list_dir(&op_heads_dir); + actual_heads_on_disk.sort(); + let mut expected_heads_on_disk = vec![op_head_id1.hex(), op_head_id2.hex()]; + expected_heads_on_disk.sort(); + assert_eq!(actual_heads_on_disk, expected_heads_on_disk); + + // Reloading the repo causes the operations to be merged + Arc::get_mut(&mut repo).unwrap().reload(); + let merged_op_head_id = repo.view().base_op_head_id().clone(); + assert_ne!(merged_op_head_id, op_head_id0); + assert_ne!(merged_op_head_id, op_head_id1); + assert_ne!(merged_op_head_id, op_head_id2); + assert_eq!(list_dir(&op_heads_dir), vec![merged_op_head_id.hex()]); +} + +fn assert_heads(repo: &impl Repo, expected: Vec<&CommitId>) { + let actual: HashSet<_> = { + let locked_heads = repo.view(); + locked_heads.heads().cloned().collect() + }; + let expected = expected.iter().cloned().cloned().collect(); + assert_eq!(actual, expected); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_isolation(use_git: bool) { + // Test that two concurrent transactions don't see each other's changes. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let wc_id = repo.working_copy_locked().current_commit_id(); + let initial = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![repo.store().root_commit_id().clone()]) + .write_to_new_transaction(&repo, "test"); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx1 = repo.start_transaction("transaction 1"); + let mut tx2 = repo.start_transaction("transaction 2"); + + assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]); + assert_heads(tx1.as_repo(), vec![&wc_id, initial.id()]); + assert_heads(tx2.as_repo(), vec![&wc_id, initial.id()]); + assert!(!repo.evolution().is_obsolete(initial.id())); + assert!(!tx1.as_repo().evolution().is_obsolete(initial.id())); + assert!(!tx2.as_repo().evolution().is_obsolete(initial.id())); + + let rewrite1 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial) + .set_description("rewrite1".to_string()) + .write_to_transaction(&mut tx1); + let rewrite2 = CommitBuilder::for_rewrite_from(&settings, repo.store(), &initial) + .set_description("rewrite2".to_string()) + .write_to_transaction(&mut tx2); + + // Neither transaction has committed yet, so each transaction sees its own + // commit. + assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]); + assert_heads(tx1.as_repo(), vec![&wc_id, initial.id(), rewrite1.id()]); + assert_heads(tx2.as_repo(), vec![&wc_id, initial.id(), rewrite2.id()]); + assert!(!repo.evolution().is_obsolete(initial.id())); + assert!(tx1.as_repo().evolution().is_obsolete(initial.id())); + assert!(tx2.as_repo().evolution().is_obsolete(initial.id())); + + // The base repo and tx2 don't see the commits from tx1. + tx1.commit(); + assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]); + assert_heads(tx2.as_repo(), vec![&wc_id, initial.id(), rewrite2.id()]); + + // The base repo still doesn't see the commits after both transactions commit. + tx2.commit(); + assert_heads(repo.as_ref(), vec![&wc_id, initial.id()]); + // After reload, the base repo sees both rewrites. + Arc::get_mut(&mut repo).unwrap().reload(); + assert_heads( + repo.as_ref(), + vec![&wc_id, initial.id(), rewrite1.id(), rewrite2.id()], + ); +} diff --git a/lib/tests/test_transaction.rs b/lib/tests/test_transaction.rs new file mode 100644 index 000000000..db81f2d30 --- /dev/null +++ b/lib/tests/test_transaction.rs @@ -0,0 +1,303 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::repo::Repo; +use jj_lib::repo_path::FileRepoPath; +use jj_lib::store::{Conflict, ConflictId, ConflictPart, TreeValue}; +use jj_lib::store_wrapper::StoreWrapper; +use jj_lib::testutils; +use std::sync::Arc; +use test_case::test_case; + +// TODO Many of the tests here are not run with Git because they end up creating +// two commits with the same contents. + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_open(use_git: bool) { + // Test that Transaction::check_out() uses the requested commit if it's open + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let requested_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let actual_checkout = tx.check_out(&settings, &requested_checkout); + assert_eq!(actual_checkout.id(), requested_checkout.id()); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + assert_eq!(repo.view().checkout(), actual_checkout.id()); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_closed(use_git: bool) { + // Test that Transaction::check_out() creates a child if the requested commit is + // closed + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let requested_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(false) + .write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let actual_checkout = tx.check_out(&settings, &requested_checkout); + assert_eq!(actual_checkout.tree().id(), requested_checkout.tree().id()); + assert_eq!(actual_checkout.parents().len(), 1); + assert_eq!(actual_checkout.parents()[0].id(), requested_checkout.id()); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + assert_eq!(repo.view().checkout(), actual_checkout.id()); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_open_with_conflict(use_git: bool) { + // Test that Transaction::check_out() creates a successor if the requested + // commit is open and has conflicts + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let file_path = FileRepoPath::from("file"); + let conflict_id = write_conflict(store, &file_path); + let mut tree_builder = repo + .store() + .tree_builder(repo.store().empty_tree_id().clone()); + tree_builder.set(file_path.to_repo_path(), TreeValue::Conflict(conflict_id)); + let tree_id = tree_builder.write_tree(); + + let mut tx = repo.start_transaction("test"); + let requested_checkout = CommitBuilder::for_new_commit(&settings, store, tree_id) + .set_open(true) + .write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let actual_checkout = tx.check_out(&settings, &requested_checkout); + let file_value = actual_checkout.tree().path_value(&file_path.to_repo_path()); + match file_value { + Some(TreeValue::Normal { + id: _, + executable: false, + }) => {} + _ => panic!("unexpected tree value: {:?}", file_value), + } + assert_eq!(actual_checkout.predecessors().len(), 1); + assert_eq!( + actual_checkout.predecessors()[0].id(), + requested_checkout.id() + ); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + assert_eq!(repo.view().checkout(), actual_checkout.id()); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_closed_with_conflict(use_git: bool) { + // Test that Transaction::check_out() creates a child if the requested commit is + // closed and has conflicts + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let file_path = FileRepoPath::from("file"); + let conflict_id = write_conflict(store, &file_path); + let mut tree_builder = repo + .store() + .tree_builder(repo.store().empty_tree_id().clone()); + tree_builder.set(file_path.to_repo_path(), TreeValue::Conflict(conflict_id)); + let tree_id = tree_builder.write_tree(); + + let mut tx = repo.start_transaction("test"); + let requested_checkout = CommitBuilder::for_new_commit(&settings, store, tree_id) + .set_open(false) + .write_to_transaction(&mut tx); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let actual_checkout = tx.check_out(&settings, &requested_checkout); + let file_value = actual_checkout.tree().path_value(&file_path.to_repo_path()); + match file_value { + Some(TreeValue::Normal { + id: _, + executable: false, + }) => {} + _ => panic!("unexpected tree value: {:?}", file_value), + } + assert_eq!(actual_checkout.parents().len(), 1); + assert_eq!(actual_checkout.parents()[0].id(), requested_checkout.id()); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + assert_eq!(repo.view().checkout(), actual_checkout.id()); +} + +fn write_conflict(store: &Arc, file_path: &FileRepoPath) -> ConflictId { + let file_id1 = testutils::write_file(store, &file_path, "a\n"); + let file_id2 = testutils::write_file(store, &file_path, "b\n"); + let file_id3 = testutils::write_file(store, &file_path, "c\n"); + let conflict = Conflict { + removes: vec![ConflictPart { + value: TreeValue::Normal { + id: file_id1, + executable: false, + }, + }], + adds: vec![ + ConflictPart { + value: TreeValue::Normal { + id: file_id2, + executable: false, + }, + }, + ConflictPart { + value: TreeValue::Normal { + id: file_id3, + executable: false, + }, + }, + ], + }; + store.write_conflict(&conflict).unwrap() +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_previous_not_empty(use_git: bool) { + // Test that Transaction::check_out() does not usually prune the previous + // commit. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let old_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &old_checkout); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let new_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &new_checkout); + assert!(!tx.as_repo().evolution().is_obsolete(old_checkout.id())); + tx.discard(); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_previous_empty(use_git: bool) { + // Test that Transaction::check_out() prunes the previous commit if it was + // empty. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let old_checkout = CommitBuilder::for_open_commit( + &settings, + repo.store(), + repo.store().root_commit_id().clone(), + repo.store().empty_tree_id().clone(), + ) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &old_checkout); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let new_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &new_checkout); + assert!(tx.as_repo().evolution().is_obsolete(old_checkout.id())); + tx.discard(); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_previous_empty_and_obsolete(use_git: bool) { + // Test that Transaction::check_out() does not unnecessarily prune the previous + // commit if it was empty but already obsolete. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let old_checkout = CommitBuilder::for_open_commit( + &settings, + repo.store(), + repo.store().root_commit_id().clone(), + repo.store().empty_tree_id().clone(), + ) + .write_to_transaction(&mut tx); + let successor = CommitBuilder::for_rewrite_from(&settings, repo.store(), &old_checkout) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &old_checkout); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let new_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &new_checkout); + let successors = tx.as_repo().evolution().successors(old_checkout.id()); + assert_eq!(successors.len(), 1); + assert_eq!(successors.iter().next().unwrap(), successor.id()); + tx.discard(); +} + +#[test_case(false ; "local store")] +// #[test_case(true ; "git store")] +fn test_checkout_previous_empty_and_pruned(use_git: bool) { + // Test that Transaction::check_out() does not unnecessarily prune the previous + // commit if it was empty but already obsolete. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let mut tx = repo.start_transaction("test"); + let old_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .set_pruned(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &old_checkout); + tx.commit(); + Arc::get_mut(&mut repo).unwrap().reload(); + + let mut tx = repo.start_transaction("test"); + let new_checkout = testutils::create_random_commit(&settings, &repo) + .set_open(true) + .write_to_transaction(&mut tx); + tx.check_out(&settings, &new_checkout); + assert!(tx + .as_repo() + .evolution() + .successors(old_checkout.id()) + .is_empty()); + tx.discard(); +} diff --git a/lib/tests/test_view.rs b/lib/tests/test_view.rs new file mode 100644 index 000000000..7e4e9e19d --- /dev/null +++ b/lib/tests/test_view.rs @@ -0,0 +1,93 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj_lib::repo::Repo; +use jj_lib::store::CommitId; +use jj_lib::testutils; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_heads_empty(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + + let heads = repo.view(); + let wc = repo.working_copy_locked(); + let all_heads: Vec = heads.heads().cloned().collect(); + assert_eq!(all_heads, vec![wc.current_commit_id()]); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_heads_fork(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let mut tx = repo.start_transaction("test"); + + let initial = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![repo.store().root_commit_id().clone()]) + .write_to_transaction(&mut tx); + let child1 = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![initial.id().clone()]) + .write_to_transaction(&mut tx); + let child2 = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![initial.id().clone()]) + .write_to_transaction(&mut tx); + + let heads = tx.as_repo().view(); + let wc = repo.working_copy_locked(); + let mut actual_all_heads: Vec = heads.heads().cloned().collect(); + actual_all_heads.sort(); + let mut expected_all_heads = vec![ + wc.current_commit_id(), + child1.id().clone(), + child2.id().clone(), + ]; + expected_all_heads.sort(); + assert_eq!(actual_all_heads, expected_all_heads); + drop(heads); + tx.discard(); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_heads_merge(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let mut tx = repo.start_transaction("test"); + + let initial = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![repo.store().root_commit_id().clone()]) + .write_to_transaction(&mut tx); + let child1 = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![initial.id().clone()]) + .write_to_transaction(&mut tx); + let child2 = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![initial.id().clone()]) + .write_to_transaction(&mut tx); + let merge = testutils::create_random_commit(&settings, &repo) + .set_parents(vec![child1.id().clone(), child2.id().clone()]) + .write_to_transaction(&mut tx); + + let heads = tx.as_repo().view(); + let wc = repo.working_copy_locked(); + let mut actual_all_heads: Vec = heads.heads().cloned().collect(); + actual_all_heads.sort(); + let mut expected_all_heads = vec![wc.current_commit_id(), merge.id().clone()]; + expected_all_heads.sort(); + assert_eq!(actual_all_heads, expected_all_heads); + drop(heads); + tx.discard(); +} diff --git a/lib/tests/test_working_copy.rs b/lib/tests/test_working_copy.rs new file mode 100644 index 000000000..d55a3b564 --- /dev/null +++ b/lib/tests/test_working_copy.rs @@ -0,0 +1,267 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(not(windows))] +use std::os::unix::fs::PermissionsExt; + +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::repo::{ReadonlyRepo, Repo}; +use jj_lib::repo_path::{FileRepoPath, RepoPath}; +use jj_lib::settings::UserSettings; +use jj_lib::store::TreeValue; +use jj_lib::testutils; +use jj_lib::tree_builder::TreeBuilder; +use std::fs::OpenOptions; +use std::io::Write; +use std::sync::Arc; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_root(use_git: bool) { + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + assert_eq!(&wc.current_commit_id(), repo.view().checkout()); + assert_ne!(&wc.current_commit_id(), repo.store().root_commit_id()); + let wc_commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap()); + assert_eq!(wc_commit.id(), repo.view().checkout()); + assert_eq!(wc_commit.tree().id(), repo.store().empty_tree_id()); + assert_eq!(wc_commit.store_commit().parents, vec![]); + assert_eq!(wc_commit.predecessors(), vec![]); + assert_eq!(wc_commit.description(), ""); + assert_eq!(wc_commit.is_open(), true); + assert_eq!(wc_commit.author().name, settings.user_name()); + assert_eq!(wc_commit.author().email, settings.user_email()); + assert_eq!(wc_commit.committer().name, settings.user_name()); + assert_eq!(wc_commit.committer().email, settings.user_email()); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_checkout_file_transitions(use_git: bool) { + // Tests switching between commits where a certain path is of one type in one + // commit and another type in the other. Includes a "missing" type, so we cover + // additions and removals as well. + + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + let store = repo.store().clone(); + + #[derive(Debug, Clone, Copy)] + enum Kind { + Missing, + Normal, + Executable, + Symlink, + Tree, + GitSubmodule, + }; + + fn write_path( + settings: &UserSettings, + repo: &ReadonlyRepo, + tree_builder: &mut TreeBuilder, + kind: Kind, + path: &str, + ) { + let store = repo.store(); + let value = match kind { + Kind::Missing => { + return; + } + Kind::Normal => { + let id = + testutils::write_file(store, &FileRepoPath::from(path), "normal file contents"); + TreeValue::Normal { + id, + executable: false, + } + } + Kind::Executable => { + let id = testutils::write_file( + store, + &FileRepoPath::from(path), + "executable file contents", + ); + TreeValue::Normal { + id, + executable: true, + } + } + Kind::Symlink => { + let id = store + .write_symlink(&FileRepoPath::from(path), "target") + .unwrap(); + TreeValue::Symlink(id) + } + Kind::Tree => { + let mut sub_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + let file_path = path.to_owned() + "/file"; + write_path( + settings, + repo, + &mut sub_tree_builder, + Kind::Normal, + &file_path, + ); + let id = sub_tree_builder.write_tree(); + TreeValue::Tree(id) + } + Kind::GitSubmodule => { + let id = testutils::create_random_commit(&settings, &repo) + .write_to_new_transaction(&repo, "test") + .id() + .clone(); + TreeValue::GitSubmodule(id) + } + }; + tree_builder.set(RepoPath::from(path), value); + }; + + let mut kinds = vec![ + Kind::Missing, + Kind::Normal, + Kind::Executable, + Kind::Symlink, + Kind::Tree, + ]; + if use_git { + kinds.push(Kind::GitSubmodule); + } + let mut left_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + let mut right_tree_builder = store.tree_builder(store.empty_tree_id().clone()); + let mut files = vec![]; + for left_kind in &kinds { + for right_kind in &kinds { + let path = format!("{:?}_{:?}", left_kind, right_kind); + write_path(&settings, &repo, &mut left_tree_builder, *left_kind, &path); + write_path( + &settings, + &repo, + &mut right_tree_builder, + *right_kind, + &path, + ); + files.push((*left_kind, *right_kind, path)); + } + } + let left_tree_id = left_tree_builder.write_tree(); + let right_tree_id = right_tree_builder.write_tree(); + + let left_commit = CommitBuilder::for_new_commit(&settings, repo.store(), left_tree_id) + .set_parents(vec![store.root_commit_id().clone()]) + .set_open(true) + .write_to_new_transaction(&repo, "test"); + let right_commit = CommitBuilder::for_new_commit(&settings, repo.store(), right_tree_id) + .set_parents(vec![store.root_commit_id().clone()]) + .set_open(true) + .write_to_new_transaction(&repo, "test"); + + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + wc.check_out(&repo, left_commit).unwrap(); + wc.commit(&settings, Arc::get_mut(&mut repo).unwrap()); + wc.check_out(&repo, right_commit.clone()).unwrap(); + + // Check that the working copy is clean. + let after_commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap()); + let diff_summary = right_commit.tree().diff_summary(&after_commit.tree()); + assert_eq!(diff_summary.modified, vec![]); + assert_eq!(diff_summary.added, vec![]); + assert_eq!(diff_summary.removed, vec![]); + + for (_left_kind, right_kind, path) in &files { + let wc_path = repo.working_copy_path().join(path); + let maybe_metadata = wc_path.symlink_metadata(); + match right_kind { + Kind::Missing => { + assert_eq!(maybe_metadata.is_ok(), false, "{:?} should not exist", path); + } + Kind::Normal => { + assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path); + let metadata = maybe_metadata.unwrap(); + assert_eq!(metadata.is_file(), true, "{:?} should be a file", path); + assert_eq!( + metadata.permissions().mode() & 0o111, + 0, + "{:?} should not be executable", + path + ); + } + Kind::Executable => { + assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path); + let metadata = maybe_metadata.unwrap(); + assert_eq!(metadata.is_file(), true, "{:?} should be a file", path); + assert_ne!( + metadata.permissions().mode() & 0o111, + 0, + "{:?} should be executable", + path + ); + } + Kind::Symlink => { + assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path); + let metadata = maybe_metadata.unwrap(); + assert_eq!( + metadata.file_type().is_symlink(), + true, + "{:?} should be a symlink", + path + ); + } + Kind::Tree => { + assert_eq!(maybe_metadata.is_ok(), true, "{:?} should exist", path); + let metadata = maybe_metadata.unwrap(); + assert_eq!(metadata.is_dir(), true, "{:?} should be a directory", path); + } + Kind::GitSubmodule => { + // Not supported for now + assert_eq!(maybe_metadata.is_ok(), false, "{:?} should not exist", path); + } + }; + } +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_commit_racy_timestamps(use_git: bool) { + // Tests that file modifications are detected even if they happen the same + // millisecond as the updated working copy state. + + let settings = testutils::user_settings(); + let (_temp_dir, mut repo) = testutils::init_repo(&settings, use_git); + + let file_path = repo.working_copy_path().join("file"); + let mut previous_tree_id = repo.store().empty_tree_id().clone(); + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + for i in 0..100 { + { + let mut file = OpenOptions::new() + .create(true) + .write(true) + .open(&file_path) + .unwrap(); + file.write_all(format!("contents {}", i).as_bytes()) + .unwrap(); + } + let commit = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap()); + let new_tree_id = commit.tree().id().clone(); + assert_ne!(new_tree_id, previous_tree_id); + previous_tree_id = new_tree_id; + } +} diff --git a/lib/tests/test_working_copy_concurrent.rs b/lib/tests/test_working_copy_concurrent.rs new file mode 100644 index 000000000..18ca637fb --- /dev/null +++ b/lib/tests/test_working_copy_concurrent.rs @@ -0,0 +1,155 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::thread; + +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::repo::ReadonlyRepo; +use jj_lib::repo_path::FileRepoPath; +use jj_lib::store::CommitId; +use jj_lib::testutils; +use jj_lib::working_copy::CheckoutError; +use std::collections::HashSet; +use std::sync::Arc; +use test_case::test_case; + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_concurrent_checkout(use_git: bool) { + // Test that we error out if a concurrent checkout is detected (i.e. if the + // current checkout changed on disk after we read it). + let settings = testutils::user_settings(); + let (_temp_dir, repo1) = testutils::init_repo(&settings, use_git); + + let commit1 = testutils::create_random_commit(&settings, &repo1) + .set_open(true) + .write_to_new_transaction(&repo1, "test"); + let commit2 = testutils::create_random_commit(&settings, &repo1) + .set_open(true) + .write_to_new_transaction(&repo1, "test"); + let commit3 = testutils::create_random_commit(&settings, &repo1) + .set_open(true) + .write_to_new_transaction(&repo1, "test"); + + // Check out commit1 + let wc1 = repo1.working_copy_locked(); + wc1.check_out(&repo1, commit1).unwrap(); + + // Check out commit2 from another process (simulated by another repo instance) + let repo2 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone()); + repo2 + .working_copy_locked() + .check_out(&repo2, commit2.clone()) + .unwrap(); + + // Checking out another commit (via the first repo instance) should now fail. + assert_eq!( + wc1.check_out(&repo1, commit3), + Err(CheckoutError::ConcurrentCheckout) + ); + + // Check that the commit2 is still checked out on disk. + let repo3 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone()); + assert_eq!( + repo3.working_copy_locked().current_tree_id(), + commit2.tree().id().clone() + ); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_concurrent_commit(use_git: bool) { + // Test that concurrent working copy commits result in a chain of successors + // instead of divergence. + let settings = testutils::user_settings(); + let (_temp_dir, mut repo1) = testutils::init_repo(&settings, use_git); + + let owned_wc1 = repo1.working_copy().clone(); + let wc1 = owned_wc1.lock().unwrap(); + let commit1 = wc1.current_commit(); + + // Commit from another process (simulated by another repo instance) + let mut repo2 = ReadonlyRepo::load(&settings, repo1.working_copy_path().clone()); + testutils::write_working_copy_file(&repo2, &FileRepoPath::from("file2"), "contents2"); + let owned_wc2 = repo2.working_copy().clone(); + let wc2 = owned_wc2.lock().unwrap(); + let commit2 = wc2.commit(&settings, Arc::get_mut(&mut repo2).unwrap()); + + assert_eq!(commit2.predecessors(), vec![commit1]); + + // Creating another commit (via the first repo instance) should result in a + // successor of the commit created from the other process. + testutils::write_working_copy_file(&repo1, &FileRepoPath::from("file3"), "contents3"); + let commit3 = wc1.commit(&settings, Arc::get_mut(&mut repo1).unwrap()); + assert_eq!(commit3.predecessors(), vec![commit2]); +} + +#[test_case(false ; "local store")] +#[test_case(true ; "git store")] +fn test_checkout_parallel(use_git: bool) { + // Test that concurrent checkouts by different processes (simulated by using + // different repo instances) is safe. + let settings = testutils::user_settings(); + let (_temp_dir, repo) = testutils::init_repo(&settings, use_git); + let store = repo.store(); + + let mut commit_ids = vec![]; + for i in 0..100 { + let path = FileRepoPath::from(format!("file{}", i).as_str()); + let tree = testutils::create_tree(&repo, &[(&path, "contents")]); + let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone()) + .set_open(true) + .write_to_new_transaction(&repo, "test"); + commit_ids.push(commit.id().clone()); + } + + // Create another commit just so we can test the update stats reliably from the + // first update + let tree = testutils::create_tree(&repo, &[(&FileRepoPath::from("other file"), "contents")]); + let mut tx = repo.start_transaction("test"); + let commit = CommitBuilder::for_new_commit(&settings, store, tree.id().clone()) + .set_open(true) + .write_to_transaction(&mut tx); + repo.working_copy_locked().check_out(&repo, commit).unwrap(); + tx.commit(); + + let mut threads = vec![]; + let commit_ids_set: HashSet = commit_ids.iter().cloned().collect(); + for commit_id in &commit_ids { + let commit_ids_set = commit_ids_set.clone(); + let commit_id = commit_id.clone(); + let settings = settings.clone(); + let working_copy_path = repo.working_copy_path().clone(); + let handle = thread::spawn(move || { + let mut repo = ReadonlyRepo::load(&settings, working_copy_path); + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + let commit = repo.store().get_commit(&commit_id).unwrap(); + let stats = wc.check_out(&repo, commit).unwrap(); + assert_eq!(stats.updated_files, 0); + assert_eq!(stats.added_files, 1); + assert_eq!(stats.removed_files, 1); + // Check that the working copy contains one of the commits. We may see a + // different commit than the one we just checked out, but since + // commit() should take the same lock as check_out(), commit() + // should never produce a different tree (resulting in a different commit). + let commit_after = wc.commit(&settings, Arc::get_mut(&mut repo).unwrap()); + assert!(commit_ids_set.contains(commit_after.id())); + }); + threads.push(handle); + } + for thread in threads { + thread.join().ok().unwrap(); + } +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..8b287f1a7 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +max_width = 100 +wrap_comments = true +error_on_line_overflow = true diff --git a/src/commands.rs b/src/commands.rs new file mode 100644 index 000000000..91cf0e764 --- /dev/null +++ b/src/commands.rs @@ -0,0 +1,1903 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate chrono; +extern crate clap; +extern crate config; + +use std::collections::{HashSet, VecDeque}; +use std::ffi::OsString; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Write}; +use std::process::Command; +use std::sync::Arc; + +use clap::{App, Arg, ArgMatches, SubCommand}; + +use criterion::Criterion; + +use pest::Parser; + +use jj_lib::commit::Commit; +use jj_lib::commit_builder::CommitBuilder; +use jj_lib::conflicts; +use jj_lib::dag_walk::{common_ancestor, topo_order_reverse, walk_ancestors}; +use jj_lib::evolution::evolve; +use jj_lib::evolution::EvolveListener; +use jj_lib::files; +use jj_lib::files::DiffLine; +use jj_lib::op_store::{OpStoreError, OperationId}; +use jj_lib::repo::{ReadonlyRepo, Repo}; +use jj_lib::repo_path::RepoPath; +use jj_lib::rewrite::{back_out_commit, merge_commit_trees, rebase_commit}; +use jj_lib::settings::UserSettings; +use jj_lib::store::{CommitId, Timestamp}; +use jj_lib::store::{StoreError, TreeValue}; +use jj_lib::tree::Tree; +use jj_lib::trees::{walk_entries, TreeValueDiff}; +use jj_lib::working_copy::{CheckoutStats, WorkingCopy}; + +use self::chrono::{FixedOffset, TimeZone, Utc}; +use crate::graphlog::{AsciiGraphDrawer, Edge}; +use crate::styler::{ColorStyler, Styler}; +use crate::template_parser::TemplateParser; +use crate::templater::Template; +use crate::ui::Ui; +use jj_lib::index::{HexPrefix, PrefixResolution}; +use jj_lib::operation::Operation; +use jj_lib::transaction::Transaction; +use jj_lib::view::merge_views; + +enum CommandError { + UserError(String), + InternalError(String), +} + +fn get_repo(ui: &Ui, matches: &ArgMatches) -> Result, CommandError> { + let repo_path_str = matches.value_of("repository").unwrap(); + let repo_path = ui.cwd().join(repo_path_str); + let mut repo = ReadonlyRepo::load(ui.settings(), repo_path); + if let Some(op_str) = matches.value_of("at_op") { + let op = resolve_single_op(&repo, op_str)?; + Arc::get_mut(&mut repo).unwrap().reload_at(&op); + } + Ok(repo) +} + +fn resolve_commit_id_prefix( + repo: &ReadonlyRepo, + prefix: &HexPrefix, +) -> Result { + let index = repo.index().index_file(); + match index.as_composite().resolve_prefix(prefix) { + PrefixResolution::NoMatch => Err(CommandError::UserError(String::from("No such commit"))), + PrefixResolution::AmbiguousMatch => { + Err(CommandError::UserError(String::from("Ambiguous prefix"))) + } + PrefixResolution::SingleMatch(id) => Ok(id), + } +} + +fn resolve_revision_arg( + ui: &Ui, + repo: &mut ReadonlyRepo, + matches: &ArgMatches, +) -> Result { + resolve_single_rev(ui, repo, matches.value_of("revision").unwrap()) +} + +fn resolve_single_rev( + ui: &Ui, + repo: &mut ReadonlyRepo, + revision_str: &str, +) -> Result { + if revision_str == "@" { + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + // TODO: Avoid committing every time this function is called. + Ok(wc.commit(ui.settings(), repo)) + } else if revision_str == "@^" { + let commit = repo.store().get_commit(repo.view().checkout()).unwrap(); + assert!(commit.is_open()); + let parents = commit.parents(); + Ok(parents[0].clone()) + } else if revision_str == "root" { + Ok(repo.store().root_commit()) + } else if revision_str.starts_with("desc(") && revision_str.ends_with(')') { + let needle = revision_str[5..revision_str.len() - 1].to_string(); + let mut matches = vec![]; + let heads: HashSet = repo + .view() + .heads() + .map(|commit_id| repo.store().get_commit(commit_id).unwrap()) + .collect(); + let heads = skip_uninteresting_heads(repo, heads); + for commit in walk_ancestors(heads) { + if commit.description().contains(&needle) { + matches.push(commit); + } + } + matches + .pop() + .ok_or_else(|| CommandError::UserError(String::from("No matching commit"))) + } else { + if let Ok(binary_commit_id) = hex::decode(revision_str) { + let commit_id = CommitId(binary_commit_id); + match repo.store().get_commit(&commit_id) { + Ok(commit) => return Ok(commit), + Err(StoreError::NotFound) => {} // fall through + Err(err) => { + return Err(CommandError::InternalError(format!( + "Failed to read commit: {}", + err + ))) + } + } + } + let id = resolve_commit_id_prefix(repo, &HexPrefix::new(revision_str.to_owned()))?; + Ok(repo.store().get_commit(&id).unwrap()) + } +} + +fn rev_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name("revision") + .long("revision") + .short("r") + .takes_value(true) + .default_value("@") +} + +fn op_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name("operation") + .long("operation") + .alias("op") + .short("o") + .takes_value(true) + .default_value("@") +} + +fn resolve_single_op(repo: &ReadonlyRepo, op_str: &str) -> Result { + let view = repo.view(); + if op_str == "@" { + Ok(view.base_op_head()) + } else if let Ok(binary_op_id) = hex::decode(op_str) { + let op_id = OperationId(binary_op_id); + match view.get_operation(&op_id) { + Ok(operation) => Ok(operation), + Err(OpStoreError::NotFound) => Err(CommandError::UserError(format!( + "Operation id not found: {}", + op_str + ))), + Err(err) => Err(CommandError::InternalError(format!( + "Failed to read commit: {:?}", + err + ))), + } + } else { + Err(CommandError::UserError(format!( + "Invalid operation id: {}", + op_str + ))) + } +} + +fn update_working_copy( + ui: &mut Ui, + repo: &mut ReadonlyRepo, + wc: &WorkingCopy, +) -> Result, CommandError> { + repo.reload(); + let old_commit = wc.current_commit(); + let new_commit = repo.store().get_commit(repo.view().checkout()).unwrap(); + if old_commit == new_commit { + return Ok(None); + } + ui.write("leaving: "); + ui.write_commit_summary(repo, &old_commit); + ui.write("\n"); + // TODO: CheckoutError::ConcurrentCheckout should probably just result in a + // warning for most commands (but be an error for the checkout command) + let stats = wc.check_out(repo, new_commit.clone()).map_err(|err| { + CommandError::InternalError(format!( + "Failed to check out commit {}: {}", + new_commit.id().hex(), + err + )) + })?; + ui.write("now at: "); + ui.write_commit_summary(repo, &new_commit); + ui.write("\n"); + Ok(Some(stats)) +} + +fn update_checkout_after_rewrite(ui: &mut Ui, tx: &mut Transaction) { + // TODO: Perhaps this method should be in Transaction. + let repo = tx.as_repo(); + let new_checkout_candidates = repo.evolution().new_parent(repo.view().checkout()); + if new_checkout_candidates.is_empty() { + return; + } + // Filter out heads that already existed. + // TODO: Filter out *commits* that already existed (so we get updated to an + // appropriate new non-head) + let old_heads: HashSet<_> = tx.base_repo().view().heads().cloned().collect(); + let new_checkout_candidates: HashSet<_> = new_checkout_candidates + .difference(&old_heads) + .cloned() + .collect(); + if new_checkout_candidates.is_empty() { + return; + } + if new_checkout_candidates.len() > 1 { + ui.write( + "There are several candidates for updating the checkout to -- picking arbitrarily\n", + ); + } + let new_checkout = new_checkout_candidates.iter().min().unwrap(); + let new_commit = repo.store().get_commit(new_checkout).unwrap(); + tx.check_out(ui.settings(), &new_commit); +} + +fn get_app<'a, 'b>() -> App<'a, 'b> { + App::new("Jujube") + .global_setting(clap::AppSettings::ColoredHelp) + .version("0.0.1") + .author("Martin von Zweigbergk ") + .about("My source control tool") + .arg( + Arg::with_name("repository") + .long("repository") + .short("R") + .takes_value(true) + .default_value("."), + ) + .arg(Arg::with_name("at_op").long("at-operation").alias("at-op").takes_value(true)) + .subcommand( + SubCommand::with_name("init") + .about("initialize a repo") + .arg(Arg::with_name("destination").index(1).default_value(".")) + .arg( + Arg::with_name("git-store") + .long("git-store") + .takes_value(true) + .help("path to a .git backing store"), + ), + ) + .subcommand( + SubCommand::with_name("checkout") + .alias("co") + .about("update the working copy to another commit") + .arg(Arg::with_name("revision").index(1).required(true)), + ) + .subcommand( + SubCommand::with_name("files") + .about("list files") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("diff") + .about("show modified files") + .arg( + Arg::with_name("summary") + .long("summary") + .short("s") + .help("show only the diff type (modified/added/removed)"), + ) + .arg(Arg::with_name("revision") + .long("revision") + .short("r") + .takes_value(true) + ) + .arg(Arg::with_name("from").long("from").takes_value(true)) + .arg(Arg::with_name("to").long("to").takes_value(true)), + ) + .subcommand( + SubCommand::with_name("status") + .alias("st") + .about("show repo status"), + ) + .subcommand( + SubCommand::with_name("log") + .about("show commit history") + .arg( + Arg::with_name("template") + .long("template") + .short("T") + .takes_value(true), + ) + .arg(Arg::with_name("all").long("all")) + .arg(Arg::with_name("no-graph").long("no-graph")), + ) + .subcommand( + SubCommand::with_name("obslog") + .about("show how a commit has evolved") + .arg(rev_arg()) + .arg( + Arg::with_name("template") + .long("template") + .short("T") + .takes_value(true), + ) + .arg(Arg::with_name("no-graph").long("no-graph")), + ) + .subcommand( + SubCommand::with_name("describe") + .about("edit the commit description") + .arg(rev_arg()) + .arg(Arg::with_name("text").long("text").takes_value(true)) + .arg(Arg::with_name("stdin").long("stdin")), + ) + .subcommand( + SubCommand::with_name("close") + .about("mark a commit closed, making new work go into a new commit") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("open") + .about("mark a commit open, making new work be added to it") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("duplicate") + .about("create a copy of the commit with a new change id") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("prune") + .about("create an empty successor of a commit") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("new") + .about("create a new, empty commit") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("squash") + .about("squash a commit into its parent") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("discard") + .about("discard a commit (and its descendants)") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("restore") + .about("restore paths from another revision") + .arg( + Arg::with_name("source") + .long("source") + .short("s") + .takes_value(true) + .default_value("@^"), + ) + .arg( + Arg::with_name("destination") + .long("destination") + .short("d") + .takes_value(true) + .default_value("@"), + ) + .arg( + Arg::with_name("paths") + .index(1) + .required(true) + .multiple(true), + ), + ) + .subcommand( + SubCommand::with_name("merge") + .about("merge work from multiple branches") + .arg( + Arg::with_name("revisions") + .index(1) + .required(true) + .multiple(true), + ), + ) + .subcommand( + SubCommand::with_name("rebase") + .about("move a commit to a different parent") + .arg(rev_arg()) + .arg( + Arg::with_name("destination") + .long("destination") + .short("d") + .takes_value(true) + .required(true) + .multiple(true), + ), + ) + .subcommand( + SubCommand::with_name("backout") + .about("apply the reverse of a commit on top of another commit") + .arg(rev_arg()) + .arg( + Arg::with_name("destination") + .long("destination") + .short("d") + .takes_value(true) + .default_value("@") + .multiple(true), + ), + ) + .subcommand( + SubCommand::with_name("evolve").about("resolve problems with the repo's meta-history"), + ) + .subcommand( + SubCommand::with_name("operation") + .alias("op") + .about("commands for working with the operation log") + .subcommand(SubCommand::with_name("log").about("show the operation log")) + .subcommand( + SubCommand::with_name("undo") + .about("undo an operation") + .arg(op_arg()), + ) + .subcommand( + SubCommand::with_name("restore") + .about("restore to the state at an operation") + .arg(op_arg()), + ), + ) + .subcommand( + SubCommand::with_name("bench") + .about("commands for benchmarking internal operations") + .subcommand( + SubCommand::with_name("commonancestors") + .about("finds the common ancestor(s) of a set of commits") + .arg(Arg::with_name("revision1").index(1).required(true)) + .arg(Arg::with_name("revision2").index(2).required(true)), + ) + .subcommand( + SubCommand::with_name("isancestor") + .about("checks if the first commit is an ancestor of the second commit") + .arg(Arg::with_name("ancestor").index(1).required(true)) + .arg(Arg::with_name("descendant").index(2).required(true)), + ) + .subcommand( + SubCommand::with_name("walkrevs") + .about("walks revisions that are ancestors of the second argument but not ancestors of the first") + .arg(Arg::with_name("unwanted").index(1).required(true)) + .arg(Arg::with_name("wanted").index(2).required(true)), + ) + .subcommand( + SubCommand::with_name("resolveprefix") + .about("resolve a commit id prefix") + .arg(Arg::with_name("prefix").index(1).required(true)), + ), + ) + .subcommand( + SubCommand::with_name("debug") + .about("low-level commands not intended for users") + .subcommand( + SubCommand::with_name("resolverev") + .about("resolves a revision identifier to its full id") + .arg(rev_arg()), + ) + .subcommand( + SubCommand::with_name("workingcopy") + .about("show information about the working copy state"), + ) + .subcommand( + SubCommand::with_name("writeworkingcopy") + .about("write a tree from the working copy state"), + ) + .subcommand( + SubCommand::with_name("template") + .about("parse a template") + .arg(Arg::with_name("template").index(1).required(true)), + ) + .subcommand(SubCommand::with_name("index").about("show commit index stats")) + .subcommand(SubCommand::with_name("reindex").about("rebuild commit index")), + ) +} + +fn cmd_init( + ui: &mut Ui, + _matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let wc_path_str = sub_matches.value_of("destination").unwrap(); + let wc_path = ui.cwd().join(wc_path_str); + if wc_path.exists() { + assert!(wc_path.is_dir()); + } else { + fs::create_dir(&wc_path).unwrap(); + } + + let repo; + if let Some(git_store_str) = sub_matches.value_of("git-store") { + let git_store_path = ui.cwd().join(git_store_str); + repo = ReadonlyRepo::init_git(ui.settings(), wc_path, git_store_path); + } else { + repo = ReadonlyRepo::init_local(ui.settings(), wc_path); + } + writeln!(ui, "Initialized repo in {:?}", repo.working_copy_path()); + Ok(()) +} + +fn cmd_checkout( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let new_commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let wc = owned_wc.lock().unwrap(); + wc.commit(ui.settings(), mut_repo); + let mut tx = repo.start_transaction(&format!("check out commit {}", new_commit.id().hex())); + tx.check_out(ui.settings(), &new_commit); + tx.commit(); + let stats = update_working_copy(ui, Arc::get_mut(&mut repo).unwrap(), &wc)?; + match stats { + None => ui.write("already on that commit\n"), + Some(stats) => writeln!( + ui, + "added {} files, modified {} files, removed {} files", + stats.added_files, stats.updated_files, stats.removed_files + ), + } + Ok(()) +} + +fn cmd_files( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + walk_entries(&commit.tree(), &mut |name, _value| { + writeln!(ui, "{}", name.to_internal_string()); + Ok(()) + }) +} + +fn print_diff(left: &[u8], right: &[u8], styler: &mut dyn Styler) { + let num_context_lines = 3; + let mut context = VecDeque::new(); + // Have we printed "..." for any skipped context? + let mut skipped_context = false; + // Are the lines in `context` to be printed before the next modified line? + let mut context_before = true; + files::diff(left, right, &mut |diff_line| { + if diff_line.is_unmodified() { + context.push_back(diff_line.clone()); + if context.len() > num_context_lines { + if context_before { + context.pop_front(); + } else { + context.pop_back(); + } + if !context_before { + for line in &context { + print_diff_line(styler, line); + } + context.clear(); + context_before = true; + } + if !skipped_context { + styler.write_bytes(b" ...\n"); + skipped_context = true; + } + } + } else { + if context_before { + for line in &context { + print_diff_line(styler, line); + } + } + context.clear(); + print_diff_line(styler, diff_line); + context_before = false; + skipped_context = false; + } + }); + if !context_before { + for line in &context { + print_diff_line(styler, line); + } + } +} + +fn print_diff_line(styler: &mut dyn Styler, diff_line: &DiffLine) { + if diff_line.has_left_content { + styler.add_label(String::from("left")); + styler.write_bytes(format!("{:>4}", diff_line.left_line_number).as_bytes()); + styler.remove_label(); + styler.write_bytes(b" "); + } else { + styler.write_bytes(b" "); + } + if diff_line.has_right_content { + styler.add_label(String::from("right")); + styler.write_bytes(format!("{:>4}", diff_line.right_line_number).as_bytes()); + styler.remove_label(); + styler.write_bytes(b": "); + } else { + styler.write_bytes(b" : "); + } + for hunk in &diff_line.hunks { + match hunk { + files::DiffHunk::Unmodified(data) => { + styler.write_bytes(data.as_slice()); + } + files::DiffHunk::Removed(data) => { + styler.add_label(String::from("left")); + styler.write_bytes(data.as_slice()); + styler.remove_label(); + } + files::DiffHunk::Added(data) => { + styler.add_label(String::from("right")); + styler.write_bytes(data.as_slice()); + styler.remove_label(); + } + } + } +} + +fn cmd_diff( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + if sub_matches.is_present("revision") + && (sub_matches.is_present("from") || sub_matches.is_present("to")) + { + return Err(CommandError::UserError(String::from( + "--revision cannot be used with --from or --to", + ))); + } + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + if sub_matches.is_present("from") || sub_matches.is_present("to") {} + let from_tree; + let to_tree; + if sub_matches.is_present("from") || sub_matches.is_present("to") { + from_tree = + resolve_single_rev(ui, mut_repo, sub_matches.value_of("from").unwrap_or("@"))?.tree(); + to_tree = + resolve_single_rev(ui, mut_repo, sub_matches.value_of("to").unwrap_or("@"))?.tree(); + } else { + let commit = resolve_single_rev( + ui, + mut_repo, + sub_matches.value_of("revision").unwrap_or("@"), + )?; + let parents = commit.parents(); + from_tree = merge_commit_trees(repo.store(), &parents); + to_tree = commit.tree() + } + if sub_matches.is_present("summary") { + show_diff_summary(ui, &from_tree, &to_tree); + } else { + let mut styler = ui.styler(); + styler.add_label(String::from("diff")); + from_tree.diff(&to_tree, &mut |path, diff| match diff { + TreeValueDiff::Added(TreeValue::Normal { + id, + executable: false, + }) => { + styler.add_label(String::from("header")); + styler.write_str(&format!("added file {}:\n", path.to_internal_string())); + styler.remove_label(); + + let mut file_reader = repo.store().read_file(path, id).unwrap(); + styler.write_from_reader(&mut file_reader); + } + TreeValueDiff::Modified( + TreeValue::Normal { + id: id_left, + executable: false, + }, + TreeValue::Normal { + id: id_right, + executable: false, + }, + ) => { + styler.add_label(String::from("header")); + styler.write_str(&format!("modified file {}:\n", path.to_internal_string())); + styler.remove_label(); + + let mut file_reader_left = repo.store().read_file(path, id_left).unwrap(); + let mut buffer_left = vec![]; + file_reader_left.read_to_end(&mut buffer_left).unwrap(); + let mut file_reader_right = repo.store().read_file(path, id_right).unwrap(); + let mut buffer_right = vec![]; + file_reader_right.read_to_end(&mut buffer_right).unwrap(); + + print_diff( + buffer_left.as_slice(), + buffer_right.as_slice(), + styler.as_mut(), + ); + } + TreeValueDiff::Modified( + TreeValue::Conflict(id_left), + TreeValue::Normal { + id: id_right, + executable: false, + }, + ) => { + styler.add_label(String::from("header")); + styler.write_str(&format!( + "resolved conflict in file {}:\n", + path.to_internal_string() + )); + styler.remove_label(); + + let conflict_left = repo.store().read_conflict(id_left).unwrap(); + let mut buffer_left = vec![]; + conflicts::materialize_conflict( + repo.store(), + &path.to_repo_path(), + &conflict_left, + &mut buffer_left, + ); + let mut file_reader_right = repo.store().read_file(path, id_right).unwrap(); + let mut buffer_right = vec![]; + file_reader_right.read_to_end(&mut buffer_right).unwrap(); + + print_diff( + buffer_left.as_slice(), + buffer_right.as_slice(), + styler.as_mut(), + ); + } + TreeValueDiff::Modified( + TreeValue::Normal { + id: id_left, + executable: false, + }, + TreeValue::Conflict(id_right), + ) => { + styler.add_label(String::from("header")); + styler.write_str(&format!( + "new conflict in file {}:\n", + path.to_internal_string() + )); + styler.remove_label(); + + let mut file_reader_left = repo.store().read_file(path, id_left).unwrap(); + let mut buffer_left = vec![]; + file_reader_left.read_to_end(&mut buffer_left).unwrap(); + let conflict_right = repo.store().read_conflict(id_right).unwrap(); + let mut buffer_right = vec![]; + conflicts::materialize_conflict( + repo.store(), + &path.to_repo_path(), + &conflict_right, + &mut buffer_right, + ); + + print_diff( + buffer_left.as_slice(), + buffer_right.as_slice(), + styler.as_mut(), + ); + } + TreeValueDiff::Removed(TreeValue::Normal { + id, + executable: false, + }) => { + styler.add_label(String::from("header")); + styler.write_str(&format!("removed file {}:\n", path.to_internal_string())); + styler.remove_label(); + + let mut file_reader = repo.store().read_file(path, id).unwrap(); + styler.write_from_reader(&mut file_reader); + } + other => { + writeln!( + styler, + "unhandled diff case in path {:?}: {:?}", + path, other + ) + .unwrap(); + } + }); + styler.remove_label(); + } + Ok(()) +} + +fn show_diff_summary(ui: &mut Ui, from: &Tree, to: &Tree) { + let summary = from.diff_summary(&to); + for file in summary.modified { + writeln!(ui, "M {}", file.to_internal_string()); + } + for file in summary.added { + writeln!(ui, "A {}", file.to_internal_string()); + } + for file in summary.removed { + writeln!(ui, "R {}", file.to_internal_string()); + } +} + +fn cmd_status( + ui: &mut Ui, + matches: &ArgMatches, + _sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let wc = owned_wc.lock().unwrap(); + let commit = wc.commit(ui.settings(), mut_repo); + ui.write("Working copy : "); + ui.write_commit_summary(repo.as_ref(), &commit); + ui.write("\n"); + ui.write("Parent commit: "); + ui.write_commit_summary(repo.as_ref(), &commit.parents()[0]); + ui.write("\n"); + ui.write("Diff summary:\n"); + show_diff_summary(ui, &commit.parents()[0].tree(), &commit.tree()); + Ok(()) +} + +fn log_template(settings: &UserSettings) -> String { + let default_template = r#" + label(if(open, "open"), + "commit: " commit_id "\n" + "change: " change_id "\n" + "author: " author.name() " <" author.email() ">\n" + "committer: " committer.name() " <" committer.email() ">\n" + "open: " open "\n" + "pruned: " pruned "\n" + "obsolete: " obsolete "\n" + "orphan: " orphan "\n" + "divergent: " divergent "\n" + "has conflict: " conflict "\n" + description "\n" + )"#; + settings + .config() + .get_str("template.log") + .unwrap_or_else(|_| String::from(default_template)) +} + +fn graph_log_template(settings: &UserSettings) -> String { + // TODO: define a method on boolean values, so we can get auto-coloring + // with e.g. `obsolete.then("obsolete")` + let default_template = r#" + if(current_checkout, "<-- ") + label(if(open, "open"), + commit_id.short() + " " change_id.short() + " " author.email() + " " committer.email() + if(pruned, label("pruned", " pruned")) + if(obsolete, label("obsolete", " obsolete")) + if(orphan, label("orphan", " orphan")) + if(divergent, label("divergent", " divergent")) + if(conflict, label("conflict", " conflict")) + "\n" + description.first_line() + "\n" + )"#; + settings + .config() + .get_str("template.log.graph") + .unwrap_or_else(|_| String::from(default_template)) +} + +fn skip_uninteresting_heads(repo: &ReadonlyRepo, heads: HashSet) -> HashSet { + let checkout_id = repo.view().checkout().clone(); + let mut result = HashSet::new(); + let mut work: Vec<_> = heads.into_iter().collect(); + let evolution = repo.evolution(); + while !work.is_empty() { + let commit = work.pop().unwrap(); + if result.contains(&commit) { + continue; + } + if (!commit.is_pruned() && !evolution.is_obsolete(commit.id())) + || commit.id() == &checkout_id + { + result.insert(commit); + } else { + for parent in commit.parents() { + work.push(parent); + } + } + } + result +} + +fn cmd_log( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + + let use_graph = !sub_matches.is_present("no-graph"); + if use_graph { + // Commit so the latest working copy is reflected in the visible heads + owned_wc.lock().unwrap().commit(ui.settings(), mut_repo); + } + + let template_string = match sub_matches.value_of("template") { + Some(value) => value.to_string(), + None => { + if use_graph { + graph_log_template(ui.settings()) + } else { + log_template(ui.settings()) + } + } + }; + let template = crate::template_parser::parse_commit_template(repo.as_ref(), &template_string); + + let mut styler = ui.styler(); + let mut styler = styler.as_mut(); + styler.add_label(String::from("log")); + + let mut heads: HashSet<_> = repo + .view() + .heads() + .map(|id| repo.store().get_commit(id).unwrap()) + .collect(); + if !sub_matches.is_present("all") { + heads = skip_uninteresting_heads(&repo, heads); + }; + let mut heads: Vec<_> = heads.into_iter().collect(); + heads.sort(); + + let commits = topo_order_reverse( + heads, + Box::new(|commit: &Commit| commit.id().clone()), + Box::new(|commit: &Commit| commit.parents()), + ); + if use_graph { + let mut graph = AsciiGraphDrawer::new(&mut styler); + for commit in commits { + let mut edges = vec![]; + for parent in commit.parents() { + edges.push(Edge::direct(parent.id().clone())); + } + let mut buffer = vec![]; + // TODO: only use color if requested + { + let writer = Box::new(&mut buffer); + let mut styler = ColorStyler::new(writer, ui.settings()); + template.format(&commit, &mut styler); + } + if !buffer.ends_with(b"\n") { + buffer.push(b'\n'); + } + graph.add_node(commit.id(), &edges, b"o", &buffer); + } + } else { + for commit in commits { + template.format(&commit, styler); + } + } + + Ok(()) +} + +fn cmd_obslog( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + + let use_graph = !sub_matches.is_present("no-graph"); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let start_commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + + let template_string = match sub_matches.value_of("template") { + Some(value) => value.to_string(), + None => { + if use_graph { + graph_log_template(ui.settings()) + } else { + log_template(ui.settings()) + } + } + }; + let template = crate::template_parser::parse_commit_template(repo.as_ref(), &template_string); + + let mut styler = ui.styler(); + let mut styler = styler.as_mut(); + styler.add_label(String::from("log")); + + let commits = topo_order_reverse( + vec![start_commit], + Box::new(|commit: &Commit| commit.id().clone()), + Box::new(|commit: &Commit| commit.predecessors()), + ); + if use_graph { + let mut graph = AsciiGraphDrawer::new(&mut styler); + for commit in commits { + let mut edges = vec![]; + for predecessor in commit.predecessors() { + edges.push(Edge::direct(predecessor.id().clone())); + } + let mut buffer = vec![]; + // TODO: only use color if requested + { + let writer = Box::new(&mut buffer); + let mut styler = ColorStyler::new(writer, ui.settings()); + template.format(&commit, &mut styler); + } + if !buffer.ends_with(b"\n") { + buffer.push(b'\n'); + } + graph.add_node(commit.id(), &edges, b"o", &buffer); + } + } else { + for commit in commits { + template.format(&commit, styler); + } + } + + Ok(()) +} + +fn edit_description(repo: &ReadonlyRepo, commit: &Commit) -> String { + // TODO: Where should this file live? The current location prevents two + // concurrent `jj describe` calls. + let description_file_path = repo.repo_path().join("description"); + { + let mut description_file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&description_file_path) + .unwrap_or_else(|_| panic!("failed to open {:?} for write", &description_file_path)); + description_file + .write_all(commit.description().as_bytes()) + .unwrap(); + } + + let exit_status = Command::new("pico") + .arg(&description_file_path) + .status() + .expect("failed to run editor"); + if !exit_status.success() { + panic!("failed to run editor"); + } + + let mut description_file = OpenOptions::new() + .read(true) + .open(&description_file_path) + .unwrap_or_else(|_| panic!("failed to open {:?} for read", &description_file_path)); + let mut buf = vec![]; + description_file.read_to_end(&mut buf).unwrap(); + String::from_utf8(buf).unwrap() +} + +fn cmd_describe( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let description; + if sub_matches.is_present("stdin") { + let mut buffer = String::new(); + io::stdin().read_to_string(&mut buffer).unwrap(); + description = buffer; + } else if sub_matches.is_present("text") { + description = sub_matches.value_of("text").unwrap().to_owned() + } else { + description = edit_description(&repo, &commit); + } + let mut tx = repo.start_transaction(&format!("describe commit {}", commit.id().hex())); + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &commit) + .set_description(description) + .write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_open( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut tx = repo.start_transaction(&format!("open commit {}", commit.id().hex())); + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &commit) + .set_open(true) + .write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_close( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut commit_builder = + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &commit).set_open(false); + if commit.description().is_empty() { + let description = edit_description(&repo, &commit); + commit_builder = commit_builder.set_description(description); + } + let mut tx = repo.start_transaction(&format!("close commit {}", commit.id().hex())); + commit_builder.write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_duplicate( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let predecessor = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut tx = repo.start_transaction(&format!("duplicate commit {}", predecessor.id().hex())); + let new_commit = CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &predecessor) + .generate_new_change_id() + .write_to_transaction(&mut tx); + ui.write("created: "); + ui.write_commit_summary(tx.as_repo(), &new_commit); + ui.write("\n"); + tx.commit(); + Ok(()) +} + +fn cmd_prune( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let predecessor = resolve_revision_arg(ui, mut_repo, sub_matches)?; + if predecessor.id() == repo.store().root_commit_id() { + return Err(CommandError::UserError(String::from( + "Cannot prune the root commit", + ))); + } + let mut tx = repo.start_transaction(&format!("prune commit {}", predecessor.id().hex())); + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &predecessor) + .set_pruned(true) + .write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_new( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let parent = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let commit_builder = CommitBuilder::for_open_commit( + ui.settings(), + repo.store(), + parent.id().clone(), + parent.tree().id().clone(), + ); + let mut tx = repo.start_transaction("new empty commit"); + let new_commit = commit_builder.write_to_transaction(&mut tx); + if tx.as_repo().view().checkout() == parent.id() { + tx.check_out(ui.settings(), &new_commit); + } + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_squash( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let parents = commit.parents(); + if parents.len() != 1 { + return Err(CommandError::UserError(String::from( + "Cannot squash merge commits", + ))); + } + let parent = &parents[0]; + if parent.id() == repo.store().root_commit_id() { + return Err(CommandError::UserError(String::from( + "Cannot squash into the root commit", + ))); + } + let mut tx = repo.start_transaction(&format!("squash commit {}", commit.id().hex())); + let squashed_commit = CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &parent) + .set_tree(commit.tree().id().clone()) + .set_predecessors(vec![parent.id().clone(), commit.id().clone()]) + .write_to_transaction(&mut tx); + // Commit the remainder on top of the new commit (always empty in the + // non-interactive case), so the squashed-in commit becomes obsolete, and so + // descendants evolve correctly. + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &commit) + .set_parents(vec![squashed_commit.id().clone()]) + .set_pruned(true) + .write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + Ok(()) +} + +fn cmd_discard( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut tx = repo.start_transaction(&format!("discard commit {}", commit.id().hex())); + tx.remove_head(&commit); + // TODO: also remove descendants + tx.commit(); + // TODO: check out parent/ancestor if the current commit got hidden + Ok(()) +} + +fn cmd_restore( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let source_commit = resolve_single_rev(ui, mut_repo, sub_matches.value_of("source").unwrap())?; + let destination_commit = + resolve_single_rev(ui, mut_repo, sub_matches.value_of("destination").unwrap())?; + let paths = sub_matches.values_of("paths").unwrap(); + let mut tree_builder = repo + .store() + .tree_builder(destination_commit.tree().id().clone()); + for path in paths { + let repo_path = RepoPath::from(path); + match source_commit.tree().path_value(&repo_path) { + Some(value) => { + tree_builder.set(repo_path, value); + } + None => { + tree_builder.remove(repo_path); + } + } + } + let tree_id = tree_builder.write_tree(); + if &tree_id == destination_commit.tree().id() { + ui.write("Nothing changed.\n"); + } else { + let mut tx = repo.start_transaction(&format!( + "restore into commit {}", + destination_commit.id().hex() + )); + let new_commit = + CommitBuilder::for_rewrite_from(ui.settings(), repo.store(), &destination_commit) + .set_tree(tree_id) + .write_to_transaction(&mut tx); + ui.write("Created "); + ui.write_commit_summary(tx.as_repo(), &new_commit); + ui.write("\n"); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + } + Ok(()) +} + +fn cmd_merge( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let revision_args = sub_matches.values_of("revisions").unwrap(); + if revision_args.len() < 2 { + return Err(CommandError::UserError(String::from( + "Merge requires at least two revisions", + ))); + } + let mut commits = vec![]; + let mut parent_ids = vec![]; + for revision_arg in revision_args { + let commit = resolve_single_rev(ui, mut_repo, revision_arg)?; + parent_ids.push(commit.id().clone()); + commits.push(commit); + } + let merged_tree = merge_commit_trees(repo.store(), &commits); + let mut tx = repo.start_transaction("merge commits"); + CommitBuilder::for_new_commit(ui.settings(), repo.store(), merged_tree.id().clone()) + .set_parents(parent_ids) + .set_open(false) + .write_to_transaction(&mut tx); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + + Ok(()) +} + +fn cmd_rebase( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit_to_rebase = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut parents = vec![]; + for revision_str in sub_matches.values_of("destination").unwrap() { + parents.push(resolve_single_rev(ui, mut_repo, revision_str)?); + } + let mut tx = repo.start_transaction(&format!("rebase commit {}", commit_to_rebase.id().hex())); + rebase_commit(ui.settings(), &mut tx, &commit_to_rebase, &parents); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + + Ok(()) +} + +fn cmd_backout( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit_to_back_out = resolve_revision_arg(ui, mut_repo, sub_matches)?; + let mut parents = vec![]; + for revision_str in sub_matches.values_of("destination").unwrap() { + parents.push(resolve_single_rev(ui, mut_repo, revision_str)?); + } + let mut tx = repo.start_transaction(&format!( + "back out commit {}", + commit_to_back_out.id().hex() + )); + back_out_commit(ui.settings(), &mut tx, &commit_to_back_out, &parents); + update_checkout_after_rewrite(ui, &mut tx); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + + Ok(()) +} + +fn cmd_evolve<'s>( + ui: &mut Ui<'s>, + matches: &ArgMatches, + _sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let repo = get_repo(ui, &matches)?; + + struct Listener<'a, 's, 'r> { + ui: &'a mut Ui<'s>, + repo: &'r dyn Repo, + }; + + impl<'a, 's, 'r> EvolveListener for Listener<'a, 's, 'r> { + fn orphan_evolved(&mut self, orphan: &Commit, new_commit: &Commit) { + self.ui.write("Resolving orphan: "); + self.ui.write_commit_summary(self.repo, &orphan); + self.ui.write("\n"); + self.ui.write("Resolved as: "); + self.ui.write_commit_summary(self.repo, &new_commit); + self.ui.write("\n"); + } + + fn orphan_target_ambiguous(&mut self, orphan: &Commit) { + self.ui + .write("Skipping orphan with ambiguous new parents: "); + self.ui.write_commit_summary(self.repo, &orphan); + self.ui.write("\n"); + } + + fn divergent_resolved(&mut self, sources: &[Commit], resolved: &Commit) { + self.ui.write("Resolving divergent commits:\n"); + for source in sources { + self.ui.write(" "); + self.ui.write_commit_summary(self.repo, &source); + self.ui.write("\n"); + } + self.ui.write("Resolved as: "); + self.ui.write_commit_summary(self.repo, &resolved); + self.ui.write("\n"); + } + + fn divergent_no_common_predecessor(&mut self, commit1: &Commit, commit2: &Commit) { + self.ui + .write("Skipping divergent commits with no common predecessor:\n"); + self.ui.write(" "); + self.ui.write_commit_summary(self.repo, &commit1); + self.ui.write("\n"); + self.ui.write(" "); + self.ui.write_commit_summary(self.repo, &commit2); + self.ui.write("\n"); + } + } + + // TODO: This clone is unnecessary. Maybe ui.write() etc should not require a + // mutable borrow? But the mutable borrow might be useful for making sure we + // have only one Ui instance we write to across threads? + let user_settings = ui.settings().clone(); + let mut listener = Listener { + ui, + // TODO: This should be using tx.as_repo() so the templater sees the updated state, but + // we can't do that because we let evolution::evolve() mutably borrow the Transaction. + repo: repo.as_ref(), + }; + let mut tx = repo.start_transaction("evolve"); + evolve(&user_settings, &mut tx, &mut listener); + // TODO: update checkout + tx.commit(); + // TODO: update working copy + + Ok(()) +} + +fn cmd_debug( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + if let Some(resolve_matches) = sub_matches.subcommand_matches("resolverev") { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit = resolve_revision_arg(ui, mut_repo, resolve_matches)?; + writeln!(ui, "{}", commit.id().hex()); + } else if let Some(_wc_matches) = sub_matches.subcommand_matches("workingcopy") { + let repo = get_repo(ui, &matches)?; + let wc = repo.working_copy_locked(); + writeln!(ui, "Current commit: {:?}", wc.current_commit_id()); + writeln!(ui, "Current tree: {:?}", wc.current_tree_id()); + for (file, state) in wc.file_states().iter() { + writeln!( + ui, + "{:?} {:13?} {:10?} {:?}", + state.file_type, state.size, state.mtime.0, file + ); + } + } else if let Some(_wc_matches) = sub_matches.subcommand_matches("writeworkingcopy") { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let wc = owned_wc.lock().unwrap(); + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let old_commit_id = wc.current_commit_id(); + let new_commit_id = wc.commit(ui.settings(), mut_repo).id().clone(); + writeln!(ui, "old commit {:?}", old_commit_id); + writeln!(ui, "new commit {:?}", new_commit_id); + } else if let Some(template_matches) = sub_matches.subcommand_matches("template") { + let parse = TemplateParser::parse( + crate::template_parser::Rule::template, + template_matches.value_of("template").unwrap(), + ); + writeln!(ui, "{:?}", parse); + } else if let Some(_reindex_matches) = sub_matches.subcommand_matches("index") { + let repo = get_repo(ui, &matches)?; + let index = repo.index().index_file(); + let stats = index.as_composite().stats(); + writeln!(ui, "Number of commits: {}", stats.num_commits); + writeln!(ui, "Number of merges: {}", stats.num_merges); + writeln!(ui, "Max generation number: {}", stats.max_generation_number); + writeln!(ui, "Number of heads: {}", stats.num_heads); + writeln!(ui, "Stats per level:"); + for (i, level) in stats.levels.iter().enumerate() { + writeln!(ui, " Level {}:", i); + writeln!(ui, " Number of commits: {}", level.num_commits); + writeln!(ui, " Name: {}", level.name.as_ref().unwrap()); + } + } else if let Some(_reindex_matches) = sub_matches.subcommand_matches("reindex") { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let index = mut_repo.reindex(); + writeln!( + ui, + "Finished indexing {:?} commits.", + index.index_file().as_composite().num_commits() + ); + } else { + panic!("unhandled command: {:#?}", matches); + } + Ok(()) +} + +fn cmd_bench( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut criterion = Criterion::default(); + if let Some(command_matches) = sub_matches.subcommand_matches("commonancestors") { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let commit1 = + resolve_single_rev(ui, mut_repo, command_matches.value_of("revision1").unwrap())?; + let commit2 = + resolve_single_rev(ui, mut_repo, command_matches.value_of("revision2").unwrap())?; + let routine = || common_ancestor(vec![&commit1], vec![&commit2]); + writeln!(ui, "Result: {:?}", routine()); + criterion.bench_function("commonancestors", |bencher: &mut criterion::Bencher| { + bencher.iter(routine); + }); + } else if let Some(command_matches) = sub_matches.subcommand_matches("isancestor") { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let ancestor_commit = + resolve_single_rev(ui, mut_repo, command_matches.value_of("ancestor").unwrap())?; + let descendant_commit = resolve_single_rev( + ui, + mut_repo, + command_matches.value_of("descendant").unwrap(), + )?; + let index = repo.index().index_file(); + let index = index.as_composite(); + let routine = || index.is_ancestor(ancestor_commit.id(), descendant_commit.id()); + writeln!(ui, "Result: {:?}", routine()); + criterion.bench_function("isancestor", |bencher: &mut criterion::Bencher| { + bencher.iter(routine); + }); + } else if let Some(command_matches) = sub_matches.subcommand_matches("walkrevs") { + let mut repo = get_repo(ui, &matches)?; + let mut_repo = Arc::get_mut(&mut repo).unwrap(); + let unwanted_commit = + resolve_single_rev(ui, mut_repo, command_matches.value_of("unwanted").unwrap())?; + let wanted_commit = + resolve_single_rev(ui, mut_repo, command_matches.value_of("wanted").unwrap())?; + let index = repo.index().index_file(); + let index = index.as_composite(); + let routine = || { + index + .walk_revs( + &[wanted_commit.id().clone()], + &[unwanted_commit.id().clone()], + ) + .count() + }; + writeln!(ui, "Result: {:?}", routine()); + criterion.bench_function("walkrevs", |bencher: &mut criterion::Bencher| { + bencher.iter(routine); + }); + } else if let Some(command_matches) = sub_matches.subcommand_matches("resolveprefix") { + let repo = get_repo(ui, &matches)?; + let prefix = HexPrefix::new(command_matches.value_of("prefix").unwrap().to_string()); + let index = repo.index().index_file(); + let index = index.as_composite(); + let routine = || index.resolve_prefix(&prefix); + writeln!(ui, "Result: {:?}", routine()); + criterion.bench_function("resolveprefix", |bencher: &mut criterion::Bencher| { + bencher.iter(routine); + }); + } else { + panic!("unhandled command: {:#?}", matches); + }; + Ok(()) +} + +fn format_timestamp(timestamp: &Timestamp) -> String { + let utc = Utc + .timestamp( + timestamp.timestamp.0 as i64 / 1000, + (timestamp.timestamp.0 % 1000) as u32 * 1000000, + ) + .with_timezone(&FixedOffset::east(timestamp.tz_offset * 60)); + utc.format("%Y-%m-%d %H:%M:%S.%3f %:z").to_string() +} + +fn cmd_op_log( + ui: &mut Ui, + matches: &ArgMatches, + _op_matches: &ArgMatches, + _cmd_matches: &ArgMatches, +) -> Result<(), CommandError> { + let repo = get_repo(ui, &matches)?; + let view = repo.view(); + let head_op = view.base_op_head(); + let mut styler = ui.styler(); + let mut styler = styler.as_mut(); + struct OpTemplate; + impl Template for OpTemplate { + fn format(&self, op: &Operation, styler: &mut dyn Styler) { + // TODO: why can't this label be applied outside of the template? + styler.add_label("op-log".to_string()); + // TODO: Make this templated + styler.add_label("id".to_string()); + // TODO: support lookup by op-id prefix, so we don't need to print the full hash + // here + styler.write_str(&op.id().hex()); + styler.remove_label(); + styler.write_str(" "); + let metadata = &op.store_operation().metadata; + styler.add_label("user".to_string()); + styler.write_str(&format!("{}@{}", metadata.username, metadata.hostname)); + styler.remove_label(); + styler.write_str(" "); + styler.add_label("time".to_string()); + styler.write_str(&format!( + "{} - {}", + format_timestamp(&metadata.start_time), + format_timestamp(&metadata.end_time) + )); + styler.remove_label(); + styler.write_str("\n"); + styler.add_label("description".to_string()); + styler.write_str(&metadata.description); + styler.remove_label(); + + styler.remove_label(); + } + } + let template = OpTemplate; + + let mut graph = AsciiGraphDrawer::new(&mut styler); + for op in topo_order_reverse( + vec![head_op], + Box::new(|op: &Operation| op.id().clone()), + Box::new(|op: &Operation| op.parents()), + ) { + let mut edges = vec![]; + for parent in op.parents() { + edges.push(Edge::direct(parent.id().clone())); + } + let mut buffer = vec![]; + // TODO: only use color if requested + { + let writer = Box::new(&mut buffer); + let mut styler = ColorStyler::new(writer, ui.settings()); + template.format(&op, &mut styler); + } + if !buffer.ends_with(b"\n") { + buffer.push(b'\n'); + } + graph.add_node(op.id(), &edges, b"o", &buffer); + } + + Ok(()) +} + +fn cmd_op_undo( + ui: &mut Ui, + matches: &ArgMatches, + _op_matches: &ArgMatches, + _cmd_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let bad_op = resolve_single_op(&repo, _cmd_matches.value_of("operation").unwrap())?; + let parent_ops = bad_op.parents(); + if parent_ops.len() > 1 { + return Err(CommandError::UserError( + "Cannot undo a merge operation".to_string(), + )); + } + if parent_ops.is_empty() { + return Err(CommandError::UserError( + "Cannot undo repo initialization".to_string(), + )); + } + + let fixed_view = { + let view = repo.view(); + let parent_view = parent_ops[0].view(); + let bad_view = bad_op.view(); + let current_view = view.base_op_head().view(); + merge_views( + repo.store(), + current_view.store_view(), + bad_view.store_view(), + parent_view.store_view(), + ) + }; + + let mut tx = repo.start_transaction(&format!("undo operation {}", bad_op.id().hex())); + tx.set_view(fixed_view); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + + Ok(()) +} +fn cmd_op_restore( + ui: &mut Ui, + matches: &ArgMatches, + _op_matches: &ArgMatches, + _cmd_matches: &ArgMatches, +) -> Result<(), CommandError> { + let mut repo = get_repo(ui, &matches)?; + let owned_wc = repo.working_copy().clone(); + let target_op = resolve_single_op(&repo, _cmd_matches.value_of("operation").unwrap())?; + let mut tx = repo.start_transaction(&format!("restore to operation {}", target_op.id().hex())); + tx.set_view(target_op.view().take_store_view()); + tx.commit(); + update_working_copy( + ui, + Arc::get_mut(&mut repo).unwrap(), + &owned_wc.lock().unwrap(), + )?; + + Ok(()) +} + +fn cmd_operation( + ui: &mut Ui, + matches: &ArgMatches, + sub_matches: &ArgMatches, +) -> Result<(), CommandError> { + if let Some(command_matches) = sub_matches.subcommand_matches("log") { + cmd_op_log(ui, matches, sub_matches, command_matches)?; + } else if let Some(command_matches) = sub_matches.subcommand_matches("undo") { + cmd_op_undo(ui, matches, sub_matches, command_matches)?; + } else if let Some(command_matches) = sub_matches.subcommand_matches("restore") { + cmd_op_restore(ui, matches, sub_matches, command_matches)?; + } else { + panic!("unhandled command: {:#?}", matches); + } + Ok(()) +} + +pub fn dispatch(mut ui: Ui, args: I) -> i32 +where + I: IntoIterator, + T: Into + Clone, +{ + let matches = get_app().get_matches_from(args); + if matches.subcommand_name().is_none() { + let mut help_text_buf = Vec::new(); + get_app().write_long_help(&mut help_text_buf).unwrap(); + ui.write(String::from_utf8(help_text_buf).unwrap().as_str()); + ui.write("\n"); + return 1; + } + let result = if let Some(sub_matches) = matches.subcommand_matches("init") { + cmd_init(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("checkout") { + cmd_checkout(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("files") { + cmd_files(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("diff") { + cmd_diff(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("status") { + cmd_status(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("log") { + cmd_log(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("obslog") { + cmd_obslog(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("describe") { + cmd_describe(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("close") { + cmd_close(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("open") { + cmd_open(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("duplicate") { + cmd_duplicate(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("prune") { + cmd_prune(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("new") { + cmd_new(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("squash") { + cmd_squash(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("discard") { + cmd_discard(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("restore") { + cmd_restore(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("merge") { + cmd_merge(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("rebase") { + cmd_rebase(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("backout") { + cmd_backout(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("evolve") { + cmd_evolve(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("operation") { + cmd_operation(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("bench") { + cmd_bench(&mut ui, &matches, &sub_matches) + } else if let Some(sub_matches) = matches.subcommand_matches("debug") { + cmd_debug(&mut ui, &matches, &sub_matches) + } else { + panic!("unhandled command: {:#?}", matches); + }; + match result { + Ok(()) => 0, + Err(CommandError::UserError(message)) => { + ui.write_error(format!("Error: {}\n", message).as_str()); + 1 + } + Err(CommandError::InternalError(message)) => { + ui.write_error(format!("Internal error: {}\n", message).as_str()); + 255 + } + } +} diff --git a/src/graphlog.rs b/src/graphlog.rs new file mode 100644 index 000000000..f27ffdbfe --- /dev/null +++ b/src/graphlog.rs @@ -0,0 +1,817 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::hash::Hash; +use std::io::Write; + +#[derive(Debug, Clone, PartialEq, Eq)] +// An edge to another node in the graph +pub enum Edge { + Present { target: T, direct: bool }, + Missing, +} + +impl Edge { + pub fn missing() -> Self { + Edge::Missing + } + + pub fn direct(id: T) -> Self { + Edge::Present { + target: id, + direct: true, + } + } + + pub fn indirect(id: T) -> Self { + Edge::Present { + target: id, + direct: false, + } + } +} + +pub struct AsciiGraphDrawer<'writer, K> { + writer: &'writer mut dyn Write, + edges: Vec>, + pending_text: Vec>, +} + +impl<'writer, K> AsciiGraphDrawer<'writer, K> +where + K: Clone + Eq + Hash, +{ + pub fn new(writer: &'writer mut dyn Write) -> Self { + Self { + writer, + edges: Default::default(), + pending_text: Default::default(), + } + } + + pub fn add_node(&mut self, id: &K, edges: &[Edge], node_symbol: &[u8], text: &[u8]) { + assert!(self.pending_text.is_empty()); + for line in text.split(|x| x == &b'\n') { + self.pending_text.push(line.to_vec()); + } + if self.pending_text.last() == Some(&vec![]) { + self.pending_text.pop().unwrap(); + } + self.pending_text.reverse(); + + // Check if an existing edge should be terminated by the new node. If there + // is, draw the new node in the same column. Otherwise, insert it at the right. + let edge_index = if let Some(edge_index) = self.index_by_target(id) { + // This edge terminates in the node we're adding + + // If we're inserting a merge somewhere that's not the very right, the edges + // right of it will move further right, so we need to prepare by inserting rows + // of '\'. + if edges.len() > 2 && edge_index < self.edges.len() - 1 { + for i in 2..edges.len() { + for edge in self.edges.iter().take(edge_index + 1) { + AsciiGraphDrawer::straight_edge(&mut self.writer, &edge); + } + for _ in 0..i - 2 { + self.writer.write_all(b" ").unwrap(); + } + for _ in edge_index + 1..self.edges.len() { + self.writer.write_all(b" \\").unwrap(); + } + self.writer.write_all(b"\n").unwrap(); + } + } + + self.edges.remove(edge_index); + edge_index + } else { + self.edges.len() + }; + + // Draw the edges to the left of the new node + for edge in self.edges.iter().take(edge_index) { + AsciiGraphDrawer::straight_edge(&mut self.writer, &edge); + } + // Draw the new node + self.writer.write_all(node_symbol).unwrap(); + // If it's a merge of many nodes, draw a vertical line to the right + for _ in 3..edges.len() { + self.writer.write_all(b"--").unwrap(); + } + if edges.len() > 2 { + self.writer.write_all(b"-.").unwrap(); + } + self.writer.write_all(b" ").unwrap(); + // Draw the edges to the right of the new node + for edge in self.edges.iter().skip(edge_index) { + AsciiGraphDrawer::straight_edge(&mut self.writer, &edge); + } + if edges.len() > 1 { + self.writer.write_all(b" ").unwrap(); + } + + self.maybe_write_pending_text(); + + // Update the data model. + for (i, edge) in edges.iter().enumerate() { + self.edges.insert(edge_index + i, edge.clone()); + } + + // If it's a merge commit, insert a row of '\'. + if edges.len() >= 2 { + for edge in self.edges.iter().take(edge_index) { + AsciiGraphDrawer::straight_edge(&mut self.writer, &edge); + } + AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[edge_index]); + for _ in edge_index + 1..self.edges.len() { + self.writer.write_all(b"\\ ").unwrap(); + } + self.writer.write_all(b" ").unwrap(); + self.maybe_write_pending_text(); + } + + let pad_to_index = self.edges.len(); + // Close any edges to missing nodes. + for (i, edge) in edges.iter().enumerate().rev() { + if *edge == Edge::Missing { + self.close_edge(edge_index + i, pad_to_index); + } + } + + // Merge new edges that share the same target. + let mut source_index = 1; + while source_index < self.edges.len() { + if let Edge::Present { target, .. } = &self.edges[source_index] { + if let Some(target_index) = self.index_by_target(target) { + // There already is an edge leading to the same target node. Mark that we + // want to merge the higher index into the lower index. + if source_index > target_index { + self.merge_edges(source_index, target_index, pad_to_index); + // Don't increment source_index. + continue; + } + } + } + source_index += 1; + } + + // Emit any remaining lines of text. + while !self.pending_text.is_empty() { + for edge in self.edges.iter() { + AsciiGraphDrawer::straight_edge(&mut self.writer, &edge); + } + self.maybe_write_pending_text(); + } + } + + fn index_by_target(&self, id: &K) -> Option { + for (i, edge) in self.edges.iter().enumerate() { + match edge { + Edge::Present { target, .. } if target == id => return Some(i), + _ => {} + } + } + None + } + + /// Not an instance method so the caller doesn't need mutable access to the + /// whole struct. + fn straight_edge(writer: &mut dyn Write, edge: &Edge) { + AsciiGraphDrawer::straight_edge_no_space(writer, edge); + writer.write_all(b" ").unwrap(); + } + + /// Not an instance method so the caller doesn't need mutable access to the + /// whole struct. + fn straight_edge_no_space(writer: &mut dyn Write, edge: &Edge) { + match edge { + Edge::Present { direct: true, .. } => { + writer.write_all(b"|").unwrap(); + } + Edge::Present { direct: false, .. } => { + writer.write_all(b":").unwrap(); + } + Edge::Missing => { + writer.write_all(b"|").unwrap(); + } + } + } + + fn merge_edges(&mut self, source: usize, target: usize, pad_to_index: usize) { + assert!(target < source); + self.edges.remove(source); + for i in 0..target { + AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]); + } + if source == target + 1 { + // If we're merging exactly one step to the left, draw a '/' to join the lines. + AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[target]); + for _ in source..self.edges.len() + 1 { + self.writer.write_all(b"/ ").unwrap(); + } + self.writer.write_all(b" ").unwrap(); + for _ in self.edges.len() + 1..pad_to_index { + self.writer.write_all(b" ").unwrap(); + } + self.maybe_write_pending_text(); + } else { + // If we're merging more than one step to the left, we need two rows: + // | |_|_|/ + // |/| | | + AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[target]); + for i in target + 1..source - 1 { + AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[i]); + self.writer.write_all(b"_").unwrap(); + } + AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[source - 1]); + for _ in source..self.edges.len() + 1 { + self.writer.write_all(b"/ ").unwrap(); + } + self.writer.write_all(b" ").unwrap(); + for _ in self.edges.len() + 1..pad_to_index { + self.writer.write_all(b" ").unwrap(); + } + self.maybe_write_pending_text(); + + for i in 0..target { + AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]); + } + AsciiGraphDrawer::straight_edge_no_space(&mut self.writer, &self.edges[target]); + self.writer.write_all(b"/").unwrap(); + for i in target + 1..self.edges.len() { + AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]); + } + for _ in self.edges.len()..pad_to_index { + self.writer.write_all(b" ").unwrap(); + } + self.maybe_write_pending_text(); + } + } + + fn close_edge(&mut self, source: usize, pad_to_index: usize) { + self.edges.remove(source); + for i in 0..source { + AsciiGraphDrawer::straight_edge(&mut self.writer, &self.edges[i]); + } + self.writer.write_all(b"~").unwrap(); + for _ in source..self.edges.len() { + self.writer.write_all(b"/ ").unwrap(); + } + self.writer.write_all(b" ").unwrap(); + for _ in self.edges.len() + 1..pad_to_index { + self.writer.write_all(b" ").unwrap(); + } + self.maybe_write_pending_text(); + } + + fn maybe_write_pending_text(&mut self) { + if let Some(text) = self.pending_text.pop() { + self.writer.write_all(&text).unwrap(); + } + self.writer.write_all(b"\n").unwrap(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use indoc::indoc; + + #[test] + fn single_node() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&1, &[], b"@", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!(String::from_utf8_lossy(&buffer), "@ node 1\n"); + } + + #[test] + fn long_description() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&2, &[Edge::direct(1)], b"@", b"many\nlines\nof\ntext\n"); + graph.add_node(&1, &[], b"o", b"single line"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @ many + | lines + | of + | text + o single line + " + } + ); + } + + #[test] + fn long_description_blank_lines() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node( + &2, + &[Edge::direct(1)], + b"@", + b"\n\nmany\n\nlines\n\nof\n\ntext\n\n\n", + ); + graph.add_node(&1, &[], b"o", b"single line"); + + // A final newline is ignored but all other newlines are respected. + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @ + | + | many + | + | lines + | + | of + | + | text + | + | + o single line + " + } + ); + } + + #[test] + fn chain() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&3, &[Edge::direct(2)], b"@", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @ node 3 + o node 2 + o node 1 + "} + ); + } + + #[test] + fn interleaved_chains() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&7, &[Edge::direct(5)], b"o", b"node 7"); + graph.add_node(&6, &[Edge::direct(4)], b"o", b"node 6"); + graph.add_node(&5, &[Edge::direct(3)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"@", b"node 3"); + graph.add_node(&2, &[], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 7 + | o node 6 + o | node 5 + | o node 4 + @ | node 3 + | o node 2 + o node 1 + "} + ); + } + + #[test] + fn independent_nodes() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&3, &[Edge::missing()], b"o", b"node 3"); + graph.add_node(&2, &[Edge::missing()], b"o", b"node 2"); + graph.add_node(&1, &[Edge::missing()], b"@", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 3 + ~ + o node 2 + ~ + @ node 1 + ~ + "} + ); + } + + #[test] + fn left_chain_ends() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::missing()], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 4 + | o node 3 + o | node 2 + ~/ + o node 1 + "} + ); + } + + #[test] + fn fork_multiple() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&4, &[Edge::direct(1)], b"@", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @ node 4 + | o node 3 + |/ + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn fork_multiple_chains() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&10, &[Edge::direct(7)], b"o", b"node 10"); + graph.add_node(&9, &[Edge::direct(6)], b"o", b"node 9"); + graph.add_node(&8, &[Edge::direct(5)], b"o", b"node 8"); + graph.add_node(&7, &[Edge::direct(4)], b"o", b"node 7"); + graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6"); + graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 10 + | o node 9 + | | o node 8 + o | | node 7 + | o | node 6 + | | o node 5 + o | | node 4 + | o | node 3 + |/ / + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn cross_over() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&5, &[Edge::direct(1)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(2)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 5 + | o node 4 + | | o node 3 + | |/ + |/| + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn cross_over_multiple() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&7, &[Edge::direct(1)], b"o", b"node 7"); + graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6"); + graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 7 + | o node 6 + | | o node 5 + | | | o node 4 + | |_|/ + |/| | + | o | node 3 + |/ / + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn cross_over_new_on_left() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&6, &[Edge::direct(3)], b"o", b"node 6"); + graph.add_node(&5, &[Edge::direct(2)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 6 + | o node 5 + | | o node 4 + o | | node 3 + | |/ + |/| + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn merge_multiple() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node( + &5, + &[ + Edge::direct(1), + Edge::direct(2), + Edge::direct(3), + Edge::direct(4), + ], + b"@", + b"node 5\nmore\ntext", + ); + graph.add_node(&4, &[Edge::missing()], b"o", b"node 4"); + graph.add_node(&3, &[Edge::missing()], b"o", b"node 3"); + graph.add_node(&2, &[Edge::missing()], b"o", b"node 2"); + graph.add_node(&1, &[Edge::missing()], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @---. node 5 + |\ \ \ more + | | | | text + | | | o node 4 + | | | ~ + | | o node 3 + | | ~ + | o node 2 + | ~ + o node 1 + ~ + "} + ); + } + + #[test] + fn fork_merge_in_central_edge() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&8, &[Edge::direct(1)], b"o", b"node 8"); + graph.add_node(&7, &[Edge::direct(5)], b"o", b"node 7"); + graph.add_node( + &6, + &[Edge::direct(2)], + b"o", + b"node 6\nwith\nsome\nmore\nlines", + ); + graph.add_node(&5, &[Edge::direct(4), Edge::direct(3)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 8 + | o node 7 + | | o node 6 + | | | with + | | | some + | | | more + | | | lines + | o | node 5 + | |\ \ + | o | | node 4 + |/ / / + | o | node 3 + |/ / + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn fork_merge_multiple() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&6, &[Edge::direct(5)], b"o", b"node 6"); + graph.add_node( + &5, + &[Edge::direct(2), Edge::direct(3), Edge::direct(4)], + b"o", + b"node 5", + ); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 6 + o-. node 5 + |\ \ + | | o node 4 + | o | node 3 + | |/ + o | node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn fork_merge_multiple_in_central_edge() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&10, &[Edge::direct(1)], b"o", b"node 10"); + graph.add_node(&9, &[Edge::direct(7)], b"o", b"node 9"); + graph.add_node(&8, &[Edge::direct(2)], b"o", b"node 8"); + graph.add_node( + &7, + &[ + Edge::direct(6), + Edge::direct(5), + Edge::direct(4), + Edge::direct(3), + ], + b"o", + b"node 7", + ); + graph.add_node(&6, &[Edge::direct(1)], b"o", b"node 6"); + graph.add_node(&5, &[Edge::direct(1)], b"o", b"node 5"); + graph.add_node(&4, &[Edge::direct(1)], b"o", b"node 4"); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node(&2, &[Edge::direct(1)], b"o", b"node 2"); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 10 + | o node 9 + | | o node 8 + | | \ + | | \ + | o---. | node 7 + | |\ \ \ \ + | o | | | | node 6 + |/ / / / / + | o | | | node 5 + |/ / / / + | o | | node 4 + |/ / / + | o | node 3 + |/ / + | o node 2 + |/ + o node 1 + "} + ); + } + + #[test] + fn merge_multiple_missing_edges() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node( + &1, + &[ + Edge::missing(), + Edge::missing(), + Edge::missing(), + Edge::missing(), + ], + b"@", + b"node 1\nwith\nmany\nlines\nof\ntext", + ); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + @---. node 1 + |\ \ \ with + | | | ~ many + | | ~ lines + | ~ of + ~ text + "} + ); + } + + #[test] + fn merge_missing_edges_and_fork() { + let mut buffer = vec![]; + let mut graph = AsciiGraphDrawer::new(&mut buffer); + graph.add_node(&3, &[Edge::direct(1)], b"o", b"node 3"); + graph.add_node( + &2, + &[ + Edge::missing(), + Edge::indirect(1), + Edge::missing(), + Edge::indirect(1), + ], + b"o", + b"node 2\nwith\nmany\nlines\nof\ntext", + ); + graph.add_node(&1, &[], b"o", b"node 1"); + + println!("{}", String::from_utf8_lossy(&buffer)); + assert_eq!( + String::from_utf8_lossy(&buffer), + indoc! {r" + o node 3 + | o---. node 2 + | |\ \ \ with + | | : ~/ many + | ~/ / lines + |/ / of + |/ text + o node 1 + "} + ); + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 000000000..dc3201b8a --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,28 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![deny(unused_must_use)] + +#[macro_use] +extern crate pest_derive; + +pub mod commands; +pub mod graphlog; +pub mod styler; +pub mod template_parser; +pub mod templater; +pub mod ui; + +// TODO: make this a separate crate? +pub mod testutils; diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 000000000..06f99fcf2 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,27 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj::commands::dispatch; +use jj::ui::Ui; +use jj_lib::settings::UserSettings; + +fn main() { + // TODO: We need to do some argument parsing here, at least for things like + // --config, and for reading user configs from the repo pointed to by + // -R. + let user_settings = UserSettings::for_user().unwrap(); + let ui = Ui::for_terminal(user_settings); + let status = dispatch(ui, &mut std::env::args_os()); + std::process::exit(status); +} diff --git a/src/styler.rs b/src/styler.rs new file mode 100644 index 000000000..149659404 --- /dev/null +++ b/src/styler.rs @@ -0,0 +1,198 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::io::{Error, Read, Write}; + +use jj_lib::settings::UserSettings; + +// Lets the caller label strings and translates the labels to colors +pub trait Styler: Write { + fn write_bytes(&mut self, data: &[u8]) { + self.write_all(data).unwrap() + } + + fn write_str(&mut self, text: &str) { + self.write_all(text.as_bytes()).unwrap() + } + + fn write_from_reader(&mut self, reader: &mut dyn Read) { + let mut buffer = vec![]; + reader.read_to_end(&mut buffer).unwrap(); + self.write_all(buffer.as_slice()).unwrap() + } + + fn add_label(&mut self, label: String); + + fn remove_label(&mut self); +} + +pub struct PlainTextStyler<'a> { + output: Box, +} + +impl<'a> PlainTextStyler<'a> { + pub fn new(output: Box) -> PlainTextStyler<'a> { + Self { output } + } +} + +impl Write for PlainTextStyler<'_> { + fn write(&mut self, data: &[u8]) -> Result { + self.output.write(data) + } + + fn flush(&mut self) -> Result<(), Error> { + self.output.flush() + } +} + +impl Styler for PlainTextStyler<'_> { + fn add_label(&mut self, _label: String) {} + + fn remove_label(&mut self) {} +} + +pub struct ColorStyler<'a> { + output: Box, + colors: HashMap, + labels: Vec, + cached_colors: HashMap, Vec>, + current_color: Vec, +} + +fn config_colors(user_settings: &UserSettings) -> HashMap { + let mut result = HashMap::new(); + result.insert(String::from("error"), String::from("red")); + + result.insert(String::from("commit_id"), String::from("blue")); + result.insert(String::from("commit_id open"), String::from("green")); + result.insert(String::from("change_id"), String::from("magenta")); + result.insert(String::from("author"), String::from("yellow")); + result.insert(String::from("committer"), String::from("yellow")); + result.insert(String::from("pruned"), String::from("red")); + result.insert(String::from("obsolete"), String::from("red")); + result.insert(String::from("orphan"), String::from("red")); + result.insert(String::from("divergent"), String::from("red")); + result.insert(String::from("conflict"), String::from("red")); + + result.insert(String::from("diff header"), String::from("yellow")); + result.insert(String::from("diff left"), String::from("red")); + result.insert(String::from("diff right"), String::from("green")); + + result.insert(String::from("op-log id"), String::from("blue")); + result.insert(String::from("op-log user"), String::from("yellow")); + result.insert(String::from("op-log time"), String::from("magenta")); + + if let Ok(table) = user_settings.config().get_table("colors") { + for (key, value) in table { + result.insert(key, value.to_string()); + } + } + result +} + +impl<'a> ColorStyler<'a> { + pub fn new(output: Box, user_settings: &UserSettings) -> ColorStyler<'a> { + ColorStyler { + output, + colors: config_colors(user_settings), + labels: vec![], + cached_colors: HashMap::new(), + current_color: b"\x1b[0m".to_vec(), + } + } + + fn current_color(&mut self) -> Vec { + if let Some(cached) = self.cached_colors.get(&self.labels) { + cached.clone() + } else { + let mut best_match = (-1, ""); + for (key, value) in &self.colors { + let mut num_matching = 0; + let mut valid = true; + for label in key.split_whitespace() { + if !self.labels.contains(&label.to_string()) { + valid = false; + break; + } + num_matching += 1; + } + if !valid { + continue; + } + if num_matching >= best_match.0 { + best_match = (num_matching, value) + } + } + + let color = self.color_for_name(&best_match.1); + self.cached_colors + .insert(self.labels.clone(), color.clone()); + color + } + } + + fn color_for_name(&self, color_name: &str) -> Vec { + match color_name { + "black" => b"\x1b[30m".to_vec(), + "red" => b"\x1b[31m".to_vec(), + "green" => b"\x1b[32m".to_vec(), + "yellow" => b"\x1b[33m".to_vec(), + "blue" => b"\x1b[34m".to_vec(), + "magenta" => b"\x1b[35m".to_vec(), + "cyan" => b"\x1b[36m".to_vec(), + "white" => b"\x1b[37m".to_vec(), + "bright black" => b"\x1b[1;30m".to_vec(), + "bright red" => b"\x1b[1;31m".to_vec(), + "bright green" => b"\x1b[1;32m".to_vec(), + "bright yellow" => b"\x1b[1;33m".to_vec(), + "bright blue" => b"\x1b[1;34m".to_vec(), + "bright magenta" => b"\x1b[1;35m".to_vec(), + "bright cyan" => b"\x1b[1;36m".to_vec(), + "bright white" => b"\x1b[1;37m".to_vec(), + _ => b"\x1b[0m".to_vec(), + } + } +} + +impl Write for ColorStyler<'_> { + fn write(&mut self, data: &[u8]) -> Result { + self.output.write(data) + } + + fn flush(&mut self) -> Result<(), Error> { + self.output.flush() + } +} + +impl Styler for ColorStyler<'_> { + fn add_label(&mut self, label: String) { + self.labels.push(label); + let new_color = self.current_color(); + if new_color != self.current_color { + self.output.write_all(&new_color).unwrap(); + } + self.current_color = new_color; + } + + fn remove_label(&mut self) { + self.labels.pop(); + let new_color = self.current_color(); + if new_color != self.current_color { + self.output.write_all(&new_color).unwrap(); + } + self.current_color = new_color; + } +} diff --git a/src/template.pest b/src/template.pest new file mode 100644 index 000000000..74e61237b --- /dev/null +++ b/src/template.pest @@ -0,0 +1,52 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Example: +// "commit: " short(commit_id) "\n" +// predecessors % ("predecessor: " commit_id) +// parents % (commit_id " is a parent of " super.commit_id) + +whitespace = { " " | "\n" } + +escape = @{ "\\" ~ ("n" | "\"" | "\\") } +literal_char = @{ !("\"" | "\\") ~ ANY } +raw_literal = @{ literal_char+ } +literal = { "\"" ~ (raw_literal | escape)* ~ "\"" } + +identifier = @{ (ASCII_ALPHANUMERIC | "_")+ } + +function = { identifier ~ "(" ~ template ~ ("," ~ template)* ~ ")" } + +method = { "." ~ identifier ~ "(" ~ template ~ ("," ~ template)* ~ ")" ~ maybe_method } + +maybe_method = { method | "" } + +// Note that "x(y)" is a function call but "x (y)" concatenates "x" and "y" +term = { + ("(" ~ term ~ ")") ~ maybe_method + | function ~ maybe_method + | identifier ~ maybe_method + | literal ~ maybe_method + | "" +} + +list = { + ("(" ~ list ~ ")") + | term ~ (whitespace+ ~ term)+ +} + +template = { + list + | term +} diff --git a/src/template_parser.rs b/src/template_parser.rs new file mode 100644 index 000000000..a7325a798 --- /dev/null +++ b/src/template_parser.rs @@ -0,0 +1,416 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate pest; + +use pest::iterators::Pair; +use pest::iterators::Pairs; +use pest::Parser; + +use jj_lib::commit::Commit; +use jj_lib::store::{CommitId, Signature}; + +use crate::styler::PlainTextStyler; +use crate::templater::{ + AuthorProperty, ChangeIdProperty, CommitIdKeyword, CommitterProperty, ConditionalTemplate, + ConflictProperty, ConstantTemplateProperty, CurrentCheckoutProperty, DescriptionProperty, + DivergentProperty, DynamicLabelTemplate, LabelTemplate, ListTemplate, LiteralTemplate, + ObsoleteProperty, OpenProperty, OrphanProperty, PrunedProperty, StringPropertyTemplate, + Template, TemplateFunction, TemplateProperty, +}; +use jj_lib::repo::Repo; + +#[derive(Parser)] +#[grammar = "template.pest"] +pub struct TemplateParser; + +fn parse_string_literal(pair: Pair) -> String { + assert_eq!(pair.as_rule(), Rule::literal); + let mut result = String::new(); + for part in pair.into_inner() { + match part.as_rule() { + Rule::raw_literal => { + result.push_str(part.as_str()); + } + Rule::escape => match part.as_str().as_bytes()[1] as char { + '"' => result.push('"'), + '\\' => result.push('\\'), + 'n' => result.push('\n'), + char => panic!("invalid escape: \\{:?}", char), + }, + _ => panic!("unexpected part of string: {:?}", part), + } + } + result +} + +struct StringShort; + +impl TemplateProperty for StringShort { + fn extract(&self, context: &String) -> String { + context.chars().take(12).collect() + } +} + +struct StringFirstLine; + +impl TemplateProperty for StringFirstLine { + fn extract(&self, context: &String) -> String { + context.lines().next().unwrap().to_string() + } +} + +struct CommitIdShortest; + +impl TemplateProperty for CommitIdShortest { + fn extract(&self, context: &CommitId) -> String { + CommitIdKeyword::shortest_format(context.clone()) + } +} + +struct SignatureName; + +impl TemplateProperty for SignatureName { + fn extract(&self, context: &Signature) -> String { + context.name.clone() + } +} + +struct SignatureEmail; + +impl TemplateProperty for SignatureEmail { + fn extract(&self, context: &Signature) -> String { + context.email.clone() + } +} + +fn parse_method_chain<'a, I: 'a>( + pair: Pair, + input_property: Property<'a, I>, +) -> Property<'a, I> { + assert_eq!(pair.as_rule(), Rule::maybe_method); + if pair.as_str().is_empty() { + input_property + } else { + let method = pair.into_inner().next().unwrap(); + match input_property { + Property::String(property) => { + let next_method = parse_string_method(method); + next_method.after(property) + } + Property::Boolean(property) => { + let next_method = parse_boolean_method(method); + next_method.after(property) + } + Property::CommitId(property) => { + let next_method = parse_commit_id_method(method); + next_method.after(property) + } + Property::Signature(property) => { + let next_method = parse_signature_method(method); + next_method.after(property) + } + } + } +} + +fn parse_string_method<'a>(method: Pair) -> Property<'a, String> { + assert_eq!(method.as_rule(), Rule::method); + let mut inner = method.into_inner(); + let name = inner.next().unwrap(); + // TODO: validate arguments + + let this_function = match name.as_str() { + "short" => Property::String(Box::new(StringShort)), + "first_line" => Property::String(Box::new(StringFirstLine)), + name => panic!("no such string method: {}", name), + }; + let chain_method = inner.last().unwrap(); + parse_method_chain(chain_method, this_function) +} + +fn parse_boolean_method<'a>(method: Pair) -> Property<'a, bool> { + assert_eq!(method.as_rule(), Rule::maybe_method); + let mut inner = method.into_inner(); + let name = inner.next().unwrap(); + // TODO: validate arguments + + panic!("no such boolean method: {}", name.as_str()); +} + +// TODO: pass a context to the returned function (we need the repo to find the +// shortest unambiguous prefix) +fn parse_commit_id_method<'a>(method: Pair) -> Property<'a, CommitId> { + assert_eq!(method.as_rule(), Rule::method); + let mut inner = method.into_inner(); + let name = inner.next().unwrap(); + // TODO: validate arguments + + let this_function = match name.as_str() { + "short" => Property::String(Box::new(CommitIdShortest)), + name => panic!("no such commit id method: {}", name), + }; + let chain_method = inner.last().unwrap(); + parse_method_chain(chain_method, this_function) +} + +fn parse_signature_method<'a>(method: Pair) -> Property<'a, Signature> { + assert_eq!(method.as_rule(), Rule::method); + let mut inner = method.into_inner(); + let name = inner.next().unwrap(); + // TODO: validate arguments + + let this_function: Property<'a, Signature> = match name.as_str() { + // TODO: Automatically label these too (so author.name() gets + // labels "author" *and" "name". Perhaps drop parentheses + // from syntax for that? Or maybe this should be using + // syntax for nested records (e.g. + // `author % (name "<" email ">")`)? + "name" => Property::String(Box::new(SignatureName)), + "email" => Property::String(Box::new(SignatureEmail)), + name => panic!("no such commit id method: {}", name), + }; + let chain_method = inner.last().unwrap(); + parse_method_chain(chain_method, this_function) +} + +enum Property<'a, I> { + String(Box + 'a>), + Boolean(Box + 'a>), + CommitId(Box + 'a>), + Signature(Box + 'a>), +} + +impl<'a, I: 'a> Property<'a, I> { + fn after(self, first: Box + 'a>) -> Property<'a, C> { + match self { + Property::String(property) => Property::String(Box::new(TemplateFunction::new( + first, + Box::new(move |value| property.extract(&value)), + ))), + Property::Boolean(property) => Property::Boolean(Box::new(TemplateFunction::new( + first, + Box::new(move |value| property.extract(&value)), + ))), + Property::CommitId(property) => Property::CommitId(Box::new(TemplateFunction::new( + first, + Box::new(move |value| property.extract(&value)), + ))), + Property::Signature(property) => Property::Signature(Box::new(TemplateFunction::new( + first, + Box::new(move |value| property.extract(&value)), + ))), + } + } +} + +fn parse_commit_keyword<'a, 'r: 'a>( + repo: &'r dyn Repo, + pair: Pair, +) -> (Property<'a, Commit>, String) { + assert_eq!(pair.as_rule(), Rule::identifier); + let property = match pair.as_str() { + "description" => Property::String(Box::new(DescriptionProperty)), + "change_id" => Property::String(Box::new(ChangeIdProperty)), + "commit_id" => Property::CommitId(Box::new(CommitIdKeyword)), + "author" => Property::Signature(Box::new(AuthorProperty)), + "committer" => Property::Signature(Box::new(CommitterProperty)), + "open" => Property::Boolean(Box::new(OpenProperty)), + "pruned" => Property::Boolean(Box::new(PrunedProperty)), + "current_checkout" => Property::Boolean(Box::new(CurrentCheckoutProperty { repo })), + "obsolete" => Property::Boolean(Box::new(ObsoleteProperty { repo })), + "orphan" => Property::Boolean(Box::new(OrphanProperty { repo })), + "divergent" => Property::Boolean(Box::new(DivergentProperty { repo })), + "conflict" => Property::Boolean(Box::new(ConflictProperty)), + name => panic!("unexpected identifier: {}", name), + }; + (property, pair.as_str().to_string()) +} + +fn coerce_to_string<'a, I: 'a>( + property: Property<'a, I>, +) -> Box + 'a> { + match property { + Property::String(property) => property, + Property::Boolean(property) => Box::new(TemplateFunction::new( + property, + Box::new(|value| String::from(if value { "true" } else { "false" })), + )), + Property::CommitId(property) => Box::new(TemplateFunction::new( + property, + Box::new(CommitIdKeyword::default_format), + )), + Property::Signature(property) => Box::new(TemplateFunction::new( + property, + Box::new(|signature| signature.name), + )), + } +} + +fn parse_boolean_commit_property<'a, 'r: 'a>( + repo: &'r dyn Repo, + pair: Pair, +) -> Box + 'a> { + let mut inner = pair.into_inner(); + let pair = inner.next().unwrap(); + let _method = inner.next().unwrap(); + assert!(inner.next().is_none()); + match pair.as_rule() { + Rule::identifier => match parse_commit_keyword(repo, pair.clone()).0 { + Property::Boolean(property) => property, + _ => panic!("cannot yet use this as boolean: {:?}", pair), + }, + _ => panic!("cannot yet use this as boolean: {:?}", pair), + } +} + +fn parse_commit_term<'a, 'r: 'a>( + repo: &'r dyn Repo, + pair: Pair, +) -> Box + 'a> { + assert_eq!(pair.as_rule(), Rule::term); + if pair.as_str().is_empty() { + Box::new(LiteralTemplate(String::new())) + } else { + let mut inner = pair.into_inner(); + let expr = inner.next().unwrap(); + let maybe_method = inner.next().unwrap(); + assert!(inner.next().is_none()); + match expr.as_rule() { + Rule::literal => { + let text = parse_string_literal(expr); + if maybe_method.as_str().is_empty() { + Box::new(LiteralTemplate(text)) + } else { + let input_property = + Property::String(Box::new(ConstantTemplateProperty { output: text })); + let property = parse_method_chain(maybe_method, input_property); + let string_property = coerce_to_string(property); + Box::new(StringPropertyTemplate { + property: string_property, + }) + } + } + Rule::identifier => { + let (term_property, labels) = parse_commit_keyword(repo, expr); + let property = parse_method_chain(maybe_method, term_property); + let string_property = coerce_to_string(property); + Box::new(LabelTemplate::new( + Box::new(StringPropertyTemplate { + property: string_property, + }), + labels, + )) + } + Rule::function => { + let mut inner = expr.into_inner(); + let name = inner.next().unwrap().as_str(); + match name { + "label" => { + let label_pair = inner.next().unwrap(); + let label_template = parse_commit_template_rule( + repo, + label_pair.into_inner().next().unwrap(), + ); + let arg_template = match inner.next() { + None => panic!("label() requires two arguments"), + Some(pair) => pair, + }; + if inner.next().is_some() { + panic!("label() accepts only two arguments") + } + let content: Box + 'a> = + parse_commit_template_rule(repo, arg_template); + let get_labels = move |commit: &Commit| -> String { + let mut buf: Vec = vec![]; + { + let writer = Box::new(&mut buf); + let mut styler = PlainTextStyler::new(writer); + label_template.format(commit, &mut styler); + } + String::from_utf8(buf).unwrap() + }; + Box::new(DynamicLabelTemplate::new(content, Box::new(get_labels))) + } + "if" => { + let condition_pair = inner.next().unwrap(); + let condition_template = condition_pair.into_inner().next().unwrap(); + let condition = parse_boolean_commit_property(repo, condition_template); + + let true_template = match inner.next() { + None => panic!("if() requires at least two arguments"), + Some(pair) => parse_commit_template_rule(repo, pair), + }; + let false_template = match inner.next() { + None => None, + Some(pair) => Some(parse_commit_template_rule(repo, pair)), + }; + if inner.next().is_some() { + panic!("if() accepts at most three arguments") + } + Box::new(ConditionalTemplate::new( + condition, + true_template, + false_template, + )) + } + name => panic!("function {} not implemented", name), + } + } + other => panic!("unexpected term: {:?}", other), + } + } +} + +fn parse_commit_template_rule<'a, 'r: 'a>( + repo: &'r dyn Repo, + pair: Pair, +) -> Box + 'a> { + match pair.as_rule() { + Rule::template => { + let mut inner = pair.into_inner(); + let formatter = parse_commit_template_rule(repo, inner.next().unwrap()); + assert!(inner.next().is_none()); + formatter + } + Rule::term => parse_commit_term(repo, pair), + Rule::list => { + let mut formatters: Vec>> = vec![]; + for inner_pair in pair.into_inner() { + formatters.push(parse_commit_template_rule(repo, inner_pair)); + } + Box::new(ListTemplate(formatters)) + } + _ => Box::new(LiteralTemplate(String::new())), + } +} + +pub fn parse_commit_template<'a, 'r: 'a>( + repo: &'r dyn Repo, + template_text: &str, +) -> Box + 'a> { + let mut pairs: Pairs = TemplateParser::parse(Rule::template, template_text).unwrap(); + + let first_pair = pairs.next().unwrap(); + assert!(pairs.next().is_none()); + + if first_pair.as_span().end() != template_text.len() { + panic!( + "failed to parse template past position {}", + first_pair.as_span().end() + ); + } + + parse_commit_template_rule(repo, first_pair) +} diff --git a/src/templater.rs b/src/templater.rs new file mode 100644 index 000000000..66126e80e --- /dev/null +++ b/src/templater.rs @@ -0,0 +1,327 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::borrow::BorrowMut; +use std::ops::Add; + +use jj_lib::commit::Commit; +use jj_lib::repo::Repo; +use jj_lib::store::{CommitId, Signature}; + +use crate::styler::Styler; + +pub trait Template { + fn format(&self, context: &C, styler: &mut dyn Styler); +} + +// TODO: Extract a trait for this type? +pub struct TemplateFormatter<'s, 't: 's, C> { + template: Box + 't>, + styler: &'s mut dyn Styler, +} + +impl<'s, 't: 's, C> TemplateFormatter<'s, 't, C> { + pub fn new(template: Box + 't>, styler: &'s mut dyn Styler) -> Self { + TemplateFormatter { template, styler } + } + + pub fn format<'c, 'a: 'c>(&'a mut self, context: &'c C) { + self.template.format(context, self.styler.borrow_mut()); + } +} + +pub struct LiteralTemplate(pub String); + +impl Template for LiteralTemplate { + fn format(&self, _context: &C, styler: &mut dyn Styler) { + styler.write_str(&self.0) + } +} + +// TODO: figure out why this lifetime is needed +pub struct LabelTemplate<'a, C> { + content: Box + 'a>, + labels: Vec, +} + +impl<'a, C> LabelTemplate<'a, C> { + pub fn new(content: Box + 'a>, labels: String) -> Self { + let labels: Vec = labels + .split_whitespace() + .map(|label| label.to_string()) + .collect(); + LabelTemplate { content, labels } + } +} + +impl<'a, C> Template for LabelTemplate<'a, C> { + fn format(&self, context: &C, styler: &mut dyn Styler) { + for label in &self.labels { + styler.add_label(label.clone()); + } + self.content.format(context, styler); + for _label in &self.labels { + styler.remove_label(); + } + } +} + +// TODO: figure out why this lifetime is needed +pub struct DynamicLabelTemplate<'a, C> { + content: Box + 'a>, + label_property: Box String + 'a>, +} + +impl<'a, C> DynamicLabelTemplate<'a, C> { + pub fn new( + content: Box + 'a>, + label_property: Box String + 'a>, + ) -> Self { + DynamicLabelTemplate { + content, + label_property, + } + } +} + +impl<'a, C> Template for DynamicLabelTemplate<'a, C> { + fn format(&self, context: &C, styler: &mut dyn Styler) { + let labels = self.label_property.as_ref()(context); + let labels: Vec = labels + .split_whitespace() + .map(|label| label.to_string()) + .collect(); + for label in &labels { + styler.add_label(label.clone()); + } + self.content.format(context, styler); + for _label in &labels { + styler.remove_label(); + } + } +} + +// TODO: figure out why this lifetime is needed +pub struct ListTemplate<'a, C>(pub Vec + 'a>>); + +impl<'a, C> Template for ListTemplate<'a, C> { + fn format(&self, context: &C, styler: &mut dyn Styler) { + for template in &self.0 { + template.format(context, styler) + } + } +} + +pub trait TemplateProperty { + fn extract(&self, context: &C) -> O; +} + +pub struct ConstantTemplateProperty { + pub output: O, +} + +impl TemplateProperty for ConstantTemplateProperty { + fn extract(&self, _context: &C) -> O { + self.output.clone() + } +} + +// TODO: figure out why this lifetime is needed +pub struct StringPropertyTemplate<'a, C> { + pub property: Box + 'a>, +} + +impl<'a, C> Template for StringPropertyTemplate<'a, C> { + fn format(&self, context: &C, styler: &mut dyn Styler) { + let text = self.property.extract(context); + styler.write_str(&text); + } +} + +pub struct ChangeIdProperty; + +impl<'r> TemplateProperty for ChangeIdProperty { + fn extract(&self, context: &Commit) -> String { + context.change_id().hex() + } +} + +pub struct DescriptionProperty; + +impl<'r> TemplateProperty for DescriptionProperty { + fn extract(&self, context: &Commit) -> String { + let description = context.description().to_owned(); + if description.ends_with('\n') { + description + } else { + description.add("\n") + } + } +} + +pub struct AuthorProperty; + +impl<'r> TemplateProperty for AuthorProperty { + fn extract(&self, context: &Commit) -> Signature { + context.author().clone() + } +} + +pub struct CommitterProperty; + +impl<'r> TemplateProperty for CommitterProperty { + fn extract(&self, context: &Commit) -> Signature { + context.committer().clone() + } +} + +pub struct OpenProperty; + +impl<'r> TemplateProperty for OpenProperty { + fn extract(&self, context: &Commit) -> bool { + context.is_open() + } +} + +pub struct PrunedProperty; + +impl TemplateProperty for PrunedProperty { + fn extract(&self, context: &Commit) -> bool { + context.is_pruned() + } +} + +pub struct CurrentCheckoutProperty<'r> { + pub repo: &'r dyn Repo, +} + +impl<'r> TemplateProperty for CurrentCheckoutProperty<'r> { + fn extract(&self, context: &Commit) -> bool { + context.id() == self.repo.view().checkout() + } +} + +pub struct ObsoleteProperty<'r> { + pub repo: &'r dyn Repo, +} + +impl<'r> TemplateProperty for ObsoleteProperty<'r> { + fn extract(&self, context: &Commit) -> bool { + self.repo.evolution().is_obsolete(context.id()) + } +} + +pub struct OrphanProperty<'r> { + pub repo: &'r dyn Repo, +} + +impl<'r> TemplateProperty for OrphanProperty<'r> { + fn extract(&self, context: &Commit) -> bool { + self.repo.evolution().is_orphan(context.id()) + } +} + +pub struct DivergentProperty<'r> { + pub repo: &'r dyn Repo, +} + +impl<'r> TemplateProperty for DivergentProperty<'r> { + fn extract(&self, context: &Commit) -> bool { + self.repo.evolution().is_divergent(context.change_id()) + } +} + +pub struct ConflictProperty; + +impl<'r> TemplateProperty for ConflictProperty { + fn extract(&self, context: &Commit) -> bool { + context.tree().has_conflict() + } +} + +pub struct ConditionalTemplate<'a, C> { + pub condition: Box + 'a>, + pub true_template: Box + 'a>, + pub false_template: Option + 'a>>, +} + +// TODO: figure out why this lifetime is needed +impl<'a, C> ConditionalTemplate<'a, C> { + pub fn new( + condition: Box + 'a>, + true_template: Box + 'a>, + false_template: Option + 'a>>, + ) -> Self { + ConditionalTemplate { + condition, + true_template, + false_template, + } + } +} + +impl<'a, C> Template for ConditionalTemplate<'a, C> { + fn format(&self, context: &C, styler: &mut dyn Styler) { + if self.condition.extract(context) { + self.true_template.format(context, styler); + } else if let Some(false_template) = &self.false_template { + false_template.format(context, styler); + } + } +} + +// TODO: If needed, add a ContextualTemplateFunction where the function also +// gets the context +pub struct TemplateFunction<'a, C, I, O> { + pub property: Box + 'a>, + pub function: Box O + 'a>, +} + +// TODO: figure out why this lifetime is needed +impl<'a, C, I, O> TemplateFunction<'a, C, I, O> { + pub fn new( + template: Box + 'a>, + function: Box O + 'a>, + ) -> Self { + TemplateFunction { + property: template, + function, + } + } +} + +impl<'a, C, I, O> TemplateProperty for TemplateFunction<'a, C, I, O> { + fn extract(&self, context: &C) -> O { + (self.function)(self.property.extract(context)) + } +} + +pub struct CommitIdKeyword; + +impl CommitIdKeyword { + pub fn default_format(commit_id: CommitId) -> String { + commit_id.hex() + } + + pub fn shortest_format(commit_id: CommitId) -> String { + // TODO: make this actually be the shortest unambiguous prefix + commit_id.hex()[..12].to_string() + } +} + +impl<'r> TemplateProperty for CommitIdKeyword { + fn extract(&self, context: &Commit) -> CommitId { + context.id().clone() + } +} diff --git a/src/testutils.rs b/src/testutils.rs new file mode 100644 index 000000000..0f71520e9 --- /dev/null +++ b/src/testutils.rs @@ -0,0 +1,56 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::io::Cursor; +use std::path::{Path, PathBuf}; + +use jj_lib::testutils::user_settings; + +use crate::commands; +use crate::ui::Ui; + +pub struct CommandRunner { + pub cwd: PathBuf, + pub stdout_buf: Vec, +} + +impl CommandRunner { + pub fn new(cwd: &Path) -> CommandRunner { + CommandRunner { + cwd: cwd.to_owned(), + stdout_buf: vec![], + } + } + + pub fn run(self, mut args: Vec<&str>) -> CommandOutput { + let mut stdout_buf = self.stdout_buf; + let stdout = Box::new(Cursor::new(&mut stdout_buf)); + let ui = Ui::new(self.cwd, stdout, false, user_settings()); + args.insert(0, "jj"); + let status = commands::dispatch(ui, args); + CommandOutput { status, stdout_buf } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct CommandOutput { + pub status: i32, + pub stdout_buf: Vec, +} + +impl CommandOutput { + pub fn stdout_string(&self) -> String { + String::from_utf8(self.stdout_buf.clone()).unwrap() + } +} diff --git a/src/ui.rs b/src/ui.rs new file mode 100644 index 000000000..29b36eff1 --- /dev/null +++ b/src/ui.rs @@ -0,0 +1,101 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt; +use std::io; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::sync::{Mutex, MutexGuard}; + +use jj_lib::commit::Commit; +use jj_lib::settings::UserSettings; + +use crate::styler::{ColorStyler, PlainTextStyler, Styler}; +use crate::templater::TemplateFormatter; +use jj_lib::repo::Repo; + +pub struct Ui<'a> { + cwd: PathBuf, + styler: Mutex>, + settings: UserSettings, +} + +impl<'a> Ui<'a> { + pub fn new( + cwd: PathBuf, + stdout: Box, + is_atty: bool, + settings: UserSettings, + ) -> Ui<'a> { + let styler: Box = if is_atty { + Box::new(ColorStyler::new(stdout, &settings)) + } else { + Box::new(PlainTextStyler::new(stdout)) + }; + let styler = Mutex::new(styler); + Ui { + cwd, + styler, + settings, + } + } + + pub fn for_terminal(settings: UserSettings) -> Ui<'static> { + let cwd = std::env::current_dir().unwrap(); + let stdout: Box = Box::new(io::stdout()); + Ui::new(cwd, stdout, true, settings) + } + + pub fn cwd(&self) -> &Path { + &self.cwd + } + + pub fn settings(&self) -> &UserSettings { + &self.settings + } + + pub fn styler(&self) -> MutexGuard> { + self.styler.lock().unwrap() + } + + pub fn write(&mut self, text: &str) { + self.styler().write_str(text); + } + + pub fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) { + self.styler().write_fmt(fmt).unwrap() + } + + pub fn write_error(&mut self, text: &str) { + let mut styler = self.styler(); + styler.add_label(String::from("error")); + styler.write_str(text); + } + + pub fn write_commit_summary<'r>(&mut self, repo: &'r dyn Repo, commit: &Commit) { + let template_string = self + .settings + .config() + .get_str("template.commit_summary") + .unwrap_or_else(|_| { + String::from( + r#"label(if(open, "open"), commit_id.short() " " description.first_line())"#, + ) + }); + let template = crate::template_parser::parse_commit_template(repo, &template_string); + let mut styler = self.styler(); + let mut template_writer = TemplateFormatter::new(template, styler.as_mut()); + template_writer.format(commit); + } +} diff --git a/tests/smoke_test.rs b/tests/smoke_test.rs new file mode 100644 index 000000000..888930aa8 --- /dev/null +++ b/tests/smoke_test.rs @@ -0,0 +1,112 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj::testutils; +use regex::Regex; + +#[test] +fn smoke_test() { + let temp_dir = tempfile::tempdir().unwrap(); + + let output = testutils::CommandRunner::new(temp_dir.path()).run(vec!["init", "repo"]); + assert_eq!(output.status, 0); + let repo_path = temp_dir.path().join("repo"); + + // Check the output of `jj status` right after initializing repo + let output = testutils::CommandRunner::new(&repo_path).run(vec!["status"]); + assert_eq!(output.status, 0); + let stdout_string = output.stdout_string(); + let output_regex = Regex::new( + "^Working copy : ([[:xdigit:]]+) \n\ + Parent commit: 000000000000 \n\ + Diff summary:\n\ + $", + ) + .unwrap(); + assert!( + output_regex.is_match(&stdout_string), + "output was: {}", + stdout_string + ); + let wc_hex_id_empty = output_regex + .captures(&stdout_string) + .unwrap() + .get(1) + .unwrap() + .as_str() + .to_owned(); + + // Write some files and check the output of `jj status` + std::fs::write(repo_path.join("file1"), "file1").unwrap(); + std::fs::write(repo_path.join("file2"), "file2").unwrap(); + std::fs::write(repo_path.join("file3"), "file3").unwrap(); + + let output = testutils::CommandRunner::new(&repo_path).run(vec!["status"]); + assert_eq!(output.status, 0); + let stdout_string = output.stdout_string(); + let output_regex = Regex::new( + "^Working copy : ([[:xdigit:]]+) \n\ + Parent commit: 000000000000 \n\ + Diff summary:\n\ + A file1\n\ + A file2\n\ + A file3\n\ + $", + ) + .unwrap(); + assert!( + output_regex.is_match(&stdout_string), + "output was: {}", + stdout_string + ); + let wc_hex_id_non_empty = output_regex + .captures(&stdout_string) + .unwrap() + .get(1) + .unwrap() + .as_str() + .to_owned(); + + // The working copy's id should have changed + assert_ne!(wc_hex_id_empty, wc_hex_id_non_empty); + + // Running `jj status` again gives the same output + let output2 = testutils::CommandRunner::new(&repo_path).run(vec!["status"]); + assert_eq!(output, output2); + + // Add a commit description + let output = + testutils::CommandRunner::new(&repo_path).run(vec!["describe", "--text", "add some files"]); + assert_eq!(output.status, 0); + let stdout_string = output.stdout_string(); + let output_regex = + Regex::new("^leaving: [[:xdigit:]]+ \nnow at: [[:xdigit:]]+ add some files\n$").unwrap(); + assert!( + output_regex.is_match(&stdout_string), + "output was: {}", + stdout_string + ); + + // Close the commit + let output = testutils::CommandRunner::new(&repo_path).run(vec!["close"]); + assert_eq!(output.status, 0); + let stdout_string = output.stdout_string(); + let output_regex = + Regex::new("^leaving: [[:xdigit:]]+ add some files\nnow at: [[:xdigit:]]+ \n$").unwrap(); + assert!( + output_regex.is_match(&stdout_string), + "output was: {}", + stdout_string + ); +} diff --git a/tests/test_init_command.rs b/tests/test_init_command.rs new file mode 100644 index 000000000..58256f9b6 --- /dev/null +++ b/tests/test_init_command.rs @@ -0,0 +1,64 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use jj::testutils; + +#[test] +fn test_init_git() { + let temp_dir = tempfile::tempdir().unwrap(); + let git_repo_path = temp_dir.path().join("git-repo"); + git2::Repository::init(git_repo_path.clone()).unwrap(); + + let output = testutils::CommandRunner::new(temp_dir.path()).run(vec![ + "init", + "repo", + "--git-store", + git_repo_path.to_str().unwrap(), + ]); + assert_eq!(output.status, 0); + + let repo_path = temp_dir.path().join("repo"); + assert!(repo_path.is_dir()); + assert!(repo_path.join(".jj").is_dir()); + let store_file_contents = std::fs::read_to_string(repo_path.join(".jj").join("store")).unwrap(); + assert!(store_file_contents.starts_with("git: ")); + assert!(store_file_contents.ends_with("/git-repo")); + assert_eq!( + output.stdout_string(), + format!("Initialized repo in \"{}\"\n", repo_path.to_str().unwrap()) + ); +} + +#[test] +fn test_init_local() { + let temp_dir = tempfile::tempdir().unwrap(); + + let output = testutils::CommandRunner::new(temp_dir.path()).run(vec!["init", "repo"]); + assert_eq!(output.status, 0); + + let repo_path = temp_dir.path().join("repo"); + assert!(repo_path.is_dir()); + assert!(repo_path.join(".jj").is_dir()); + let store_dir = repo_path.join(".jj").join("store"); + assert!(store_dir.is_dir()); + assert!(store_dir.join("commits").is_dir()); + assert!(store_dir.join("trees").is_dir()); + assert!(store_dir.join("files").is_dir()); + assert!(store_dir.join("symlinks").is_dir()); + assert!(store_dir.join("conflicts").is_dir()); + assert_eq!( + output.stdout_string(), + format!("Initialized repo in \"{}\"\n", repo_path.to_str().unwrap()) + ); +}