2023-08-04 22:59:19 +00:00
|
|
|
cargo-features = []
|
2022-04-01 06:44:28 +00:00
|
|
|
|
2021-01-03 16:45:07 +00:00
|
|
|
[workspace]
|
2023-08-04 22:59:19 +00:00
|
|
|
resolver = "2"
|
|
|
|
members = ["cli", "lib", "lib/testutils", "lib/gen-protos"]
|
feat(cargo): improve --release binary size
Summary: On x86_64-linux, these options reduce the total number of bytes in the
`jj` binary by 40 percent, from 16MiB to 9.8MiB, while having relatively minimal
impact on build time, going from 58s to 1m15s.
While `strip=debuginfo` is already the default, `codegen-units=1` is doing
all the work here, and seems to have a rather miraculous effect despite not
meaningfully hurting compile time too much for me at least. This actually
will probably improve performance in some cases too, but it's likely hard to
quantify.
Ultimately, with or without this change, the dominant entity taking up most
of the compile time for the whole project is `jj-lib` and `jj-cli` crates. A
`--timings` report with Cargo indicates almost all dependencies (on my machine)
compile within 30s whether or not this change is in play, so the remaining time
is all on us.
Various other changes, such as using `opt-level=z` or `lto=thin`, had no real
visible effect. `lto=fat` was pretty successful, reducing total bytes by over
50% (7.8MiB), but at a nearly 2x link-time cost.
It *might* be worth exploring if something like `lto=thin` could improve
performance since it didn't meaningfully impact compile time or size any
further, but that's TBD.
Note: these numbers were performed with a wiped `target/` directory after each
run, and so all dependencies were compiled from scratch, on my 12-core Ryzen
5600X.
Signed-off-by: Austin Seipp <aseipp@pobox.com>
Change-Id: Ioyoulrmtwxypsrwwwysyylvmqxzttkmn
2023-07-10 03:54:40 +00:00
|
|
|
|
2023-08-05 22:14:11 +00:00
|
|
|
[workspace.package]
|
2023-10-04 18:59:33 +00:00
|
|
|
version = "0.10.0"
|
2023-08-05 22:14:11 +00:00
|
|
|
license = "Apache-2.0"
|
|
|
|
rust-version = "1.71" # NOTE: remember to update CI, contributing.md, changelog.md, and flake.nix
|
|
|
|
edition = "2021"
|
|
|
|
readme = "README.md"
|
|
|
|
homepage = "https://github.com/martinvonz/jj"
|
|
|
|
repository = "https://github.com/martinvonz/jj"
|
|
|
|
documentation = "https://github.com/martinvonz/jj"
|
|
|
|
categories = ["version-control", "development-tools"]
|
|
|
|
keywords = ["VCS", "DVCS", "SCM", "Git", "Mercurial"]
|
|
|
|
|
2023-08-05 16:14:11 +00:00
|
|
|
[workspace.dependencies]
|
2023-08-17 15:25:01 +00:00
|
|
|
anyhow = "1.0.75"
|
2023-08-05 16:14:11 +00:00
|
|
|
assert_cmd = "2.0.8"
|
|
|
|
assert_matches = "1.5.0"
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
async-trait = "0.1.73"
|
2023-08-05 16:14:11 +00:00
|
|
|
backoff = "0.4.0"
|
|
|
|
blake2 = "0.10.6"
|
2023-10-06 15:41:02 +00:00
|
|
|
byteorder = "1.5.0"
|
2023-09-07 15:32:15 +00:00
|
|
|
bytes = "1.5.0"
|
2023-08-05 16:14:11 +00:00
|
|
|
cargo_metadata = "0.17.0"
|
2023-09-29 15:18:17 +00:00
|
|
|
clap = { version = "4.4.6", features = ["derive", "deprecated", "wrap_help"] }
|
|
|
|
clap_complete = "4.4.3"
|
2023-08-05 16:14:11 +00:00
|
|
|
clap_mangen = "0.2.10"
|
2023-09-15 16:58:06 +00:00
|
|
|
chrono = { version = "0.4.31", default-features = false, features = [
|
2023-08-05 16:14:11 +00:00
|
|
|
"std",
|
|
|
|
"clock",
|
|
|
|
] }
|
|
|
|
config = { version = "0.13.2", default-features = false, features = ["toml"] }
|
|
|
|
criterion = "0.5.1"
|
|
|
|
crossterm = { version = "0.26", default-features = false }
|
|
|
|
digest = "0.10.7"
|
|
|
|
dirs = "5.0.1"
|
2023-08-16 10:40:43 +00:00
|
|
|
either = "1.9.0"
|
2023-08-05 16:14:11 +00:00
|
|
|
esl01-renderdag = "0.3.0"
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
futures = "0.3.28"
|
2023-08-05 16:14:11 +00:00
|
|
|
glob = "0.3.1"
|
|
|
|
git2 = "0.17.2"
|
|
|
|
hex = "0.4.3"
|
|
|
|
itertools = "0.11.0"
|
2023-10-02 15:11:32 +00:00
|
|
|
indexmap = "2.0.2"
|
2023-10-09 15:47:09 +00:00
|
|
|
libc = { version = "0.2.149" }
|
2023-10-10 16:01:33 +00:00
|
|
|
insta = { version = "1.34.0", features = ["filters"] }
|
2023-08-05 16:14:11 +00:00
|
|
|
maplit = "1.0.2"
|
|
|
|
num_cpus = "1.16.0"
|
|
|
|
once_cell = "1.18.0"
|
2023-09-26 15:15:19 +00:00
|
|
|
pest = "2.7.4"
|
|
|
|
pest_derive = "2.7.4"
|
2023-07-06 06:03:55 +00:00
|
|
|
pretty_assertions = "1.4.0"
|
2023-08-05 16:14:11 +00:00
|
|
|
prost = "0.11.9"
|
|
|
|
prost-build = "0.11.9"
|
|
|
|
rand = "0.8.5"
|
|
|
|
rand_chacha = "0.3.1"
|
2023-09-21 16:00:06 +00:00
|
|
|
rayon = "1.8.0"
|
2023-10-10 16:01:33 +00:00
|
|
|
regex = "1.10.0"
|
2023-08-05 16:14:11 +00:00
|
|
|
rpassword = "7.2.0"
|
2023-10-09 15:47:09 +00:00
|
|
|
rustix = { version = "0.38.18", features = ["fs"] }
|
2023-09-21 16:00:06 +00:00
|
|
|
smallvec = { version = "1.11.1", features = [
|
2023-08-05 16:14:11 +00:00
|
|
|
"const_generics",
|
|
|
|
"const_new",
|
|
|
|
"union",
|
|
|
|
] }
|
2023-08-29 21:13:35 +00:00
|
|
|
scm-record = "0.1.0"
|
2023-08-05 16:14:11 +00:00
|
|
|
serde = { version = "1.0", features = ["derive"] }
|
2023-09-14 15:30:54 +00:00
|
|
|
serde_json = "1.0.107"
|
2023-08-22 15:41:11 +00:00
|
|
|
slab = "0.4.9"
|
2023-08-05 16:14:11 +00:00
|
|
|
strsim = "0.10.0"
|
2023-08-21 15:45:30 +00:00
|
|
|
tempfile = "3.8.0"
|
2023-09-18 15:45:32 +00:00
|
|
|
test-case = "3.2.1"
|
2023-08-05 16:14:11 +00:00
|
|
|
textwrap = "0.16.0"
|
2023-09-27 15:10:35 +00:00
|
|
|
thiserror = "1.0.49"
|
2023-09-18 15:45:32 +00:00
|
|
|
timeago = { version = "0.4.2", default-features = false }
|
2023-09-08 15:27:43 +00:00
|
|
|
toml_edit = { version = "0.19.15", features = ["serde"] }
|
2023-08-05 16:14:11 +00:00
|
|
|
tracing = "0.1.37"
|
|
|
|
tracing-chrome = "0.7.1"
|
|
|
|
tracing-subscriber = { version = "0.3.17", default-features = false, features = [
|
|
|
|
"std",
|
|
|
|
"ansi",
|
|
|
|
"env-filter",
|
|
|
|
"fmt",
|
|
|
|
] }
|
2023-10-09 15:47:09 +00:00
|
|
|
tokio = { version = "1.33.0" }
|
2023-09-20 15:14:35 +00:00
|
|
|
unicode-width = "0.1.11"
|
2023-08-05 16:14:11 +00:00
|
|
|
watchman_client = { version = "0.8.0" }
|
|
|
|
whoami = "1.4.1"
|
|
|
|
version_check = "0.9.4"
|
|
|
|
zstd = "0.12.4"
|
|
|
|
|
|
|
|
# put all inter-workspace libraries, i.e. those that use 'path = ...' here in
|
|
|
|
# their own (alphabetically sorted) block
|
|
|
|
|
2023-10-04 18:59:33 +00:00
|
|
|
jj-lib = { path = "lib", version = "0.10.0" }
|
2023-08-05 16:14:11 +00:00
|
|
|
testutils = { path = "lib/testutils" }
|
|
|
|
|
feat(cargo): improve --release binary size
Summary: On x86_64-linux, these options reduce the total number of bytes in the
`jj` binary by 40 percent, from 16MiB to 9.8MiB, while having relatively minimal
impact on build time, going from 58s to 1m15s.
While `strip=debuginfo` is already the default, `codegen-units=1` is doing
all the work here, and seems to have a rather miraculous effect despite not
meaningfully hurting compile time too much for me at least. This actually
will probably improve performance in some cases too, but it's likely hard to
quantify.
Ultimately, with or without this change, the dominant entity taking up most
of the compile time for the whole project is `jj-lib` and `jj-cli` crates. A
`--timings` report with Cargo indicates almost all dependencies (on my machine)
compile within 30s whether or not this change is in play, so the remaining time
is all on us.
Various other changes, such as using `opt-level=z` or `lto=thin`, had no real
visible effect. `lto=fat` was pretty successful, reducing total bytes by over
50% (7.8MiB), but at a nearly 2x link-time cost.
It *might* be worth exploring if something like `lto=thin` could improve
performance since it didn't meaningfully impact compile time or size any
further, but that's TBD.
Note: these numbers were performed with a wiped `target/` directory after each
run, and so all dependencies were compiled from scratch, on my 12-core Ryzen
5600X.
Signed-off-by: Austin Seipp <aseipp@pobox.com>
Change-Id: Ioyoulrmtwxypsrwwwysyylvmqxzttkmn
2023-07-10 03:54:40 +00:00
|
|
|
[profile.release]
|
|
|
|
strip = "debuginfo"
|
|
|
|
codegen-units = 1
|