aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodingGuidelines12
-rw-r--r--Documentation/RelNotes/2.54.0.adoc136
-rw-r--r--Documentation/config/format.adoc5
-rw-r--r--Documentation/config/hook.adoc30
-rw-r--r--Documentation/config/http.adoc26
-rw-r--r--Documentation/config/trailer.adoc121
-rw-r--r--Documentation/git-backfill.adoc5
-rw-r--r--Documentation/git-config.adoc2
-rw-r--r--Documentation/git-fast-import.adoc9
-rw-r--r--Documentation/git-format-patch.adoc20
-rw-r--r--Documentation/git-hook.adoc27
-rw-r--r--Documentation/git-interpret-trailers.adoc189
-rw-r--r--Documentation/git-multi-pack-index.adoc27
-rw-r--r--Documentation/git-pack-objects.adoc25
-rw-r--r--Documentation/git-stash.adoc10
-rw-r--r--Documentation/gitformat-pack.adoc8
-rw-r--r--Documentation/githooks.adoc19
-rw-r--r--Documentation/gitignore.adoc5
-rw-r--r--Documentation/line-range-options.adoc4
-rw-r--r--Documentation/pretty-formats.adoc4
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--Makefile77
-rw-r--r--add-patch.c8
-rw-r--r--apply.c115
-rw-r--r--apply.h1
-rw-r--r--builtin/am.c2
-rw-r--r--builtin/backfill.c24
-rw-r--r--builtin/branch.c2
-rw-r--r--builtin/cat-file.c9
-rw-r--r--builtin/config.c7
-rw-r--r--builtin/fast-export.c15
-rw-r--r--builtin/fast-import.c73
-rw-r--r--builtin/fetch.c4
-rw-r--r--builtin/fsck.c275
-rw-r--r--builtin/gc.c42
-rw-r--r--builtin/history.c8
-rw-r--r--builtin/hook.c61
-rw-r--r--builtin/index-pack.c6
-rw-r--r--builtin/interpret-trailers.c2
-rw-r--r--builtin/log.c89
-rw-r--r--builtin/merge-file.c3
-rw-r--r--builtin/mktag.c5
-rw-r--r--builtin/multi-pack-index.c92
-rw-r--r--builtin/name-rev.c16
-rw-r--r--builtin/pack-objects.c299
-rw-r--r--builtin/rebase.c3
-rw-r--r--builtin/receive-pack.c105
-rw-r--r--builtin/refs.c6
-rw-r--r--builtin/remote.c2
-rw-r--r--builtin/repack.c19
-rw-r--r--builtin/repo.c28
-rw-r--r--builtin/rev-parse.c40
-rw-r--r--builtin/show-ref.c2
-rw-r--r--builtin/stash.c4
-rw-r--r--builtin/submodule--helper.c1
-rw-r--r--builtin/unpack-objects.c8
-rw-r--r--builtin/worktree.c21
-rw-r--r--cache-tree.c14
-rw-r--r--cbtree.c21
-rw-r--r--cbtree.h17
-rwxr-xr-xci/run-static-analysis.sh2
-rw-r--r--commit-graph.c45
-rw-r--r--commit-reach.c6
-rw-r--r--compat/mingw.c6
-rw-r--r--compat/nedmalloc/malloc.c.h2
-rw-r--r--compat/poll/poll.c4
-rw-r--r--compat/posix.h2
-rw-r--r--compat/regcomp_enhanced.c5
-rw-r--r--compat/win32/flush.c2
-rw-r--r--compat/winansi.c37
-rw-r--r--config.mak.dev2
-rw-r--r--connect.c4
-rw-r--r--contrib/buildsystems/CMakeLists.txt24
-rw-r--r--contrib/diff-highlight/DiffHighlight.pm71
-rw-r--r--contrib/diff-highlight/README19
-rwxr-xr-xcontrib/diff-highlight/t/t9400-diff-highlight.sh78
-rw-r--r--contrib/meson.build1
-rw-r--r--contrib/subtree/meson.build2
-rw-r--r--diff.c279
-rw-r--r--diffcore.h16
-rw-r--r--fetch-pack.c21
-rw-r--r--fsck.c76
-rw-r--r--fsck.h42
-rw-r--r--git-compat-util.h8
-rw-r--r--git-curl-compat.h8
-rw-r--r--git.c3
-rw-r--r--gpg-interface.c2
-rw-r--r--gpg-interface.h1
-rw-r--r--hash.c18
-rw-r--r--hash.h3
-rw-r--r--hook.c190
-rw-r--r--hook.h34
-rw-r--r--http-push.c8
-rw-r--r--http-walker.c4
-rw-r--r--http.c142
-rw-r--r--http.h9
-rw-r--r--line-log.c196
-rw-r--r--line-log.h14
-rw-r--r--list-objects.c2
-rw-r--r--mailinfo.c2
-rw-r--r--meson.build96
-rw-r--r--midx-write.c525
-rw-r--r--midx.c39
-rw-r--r--midx.h12
-rw-r--r--negotiator/default.c10
-rw-r--r--negotiator/skipping.c10
-rw-r--r--notes.c2
-rw-r--r--object-file.c144
-rw-r--r--object-file.h38
-rw-r--r--object-name.c439
-rw-r--r--object-name.h11
-rw-r--r--odb.c144
-rw-r--r--odb.h147
-rw-r--r--odb/source-files.c65
-rw-r--r--odb/source.h113
-rw-r--r--odb/streaming.c1
-rw-r--r--oidtree.c63
-rw-r--r--oidtree.h48
-rw-r--r--pack-bitmap.c9
-rw-r--r--pack-check.c7
-rw-r--r--pack-revindex.c4
-rw-r--r--pack.h9
-rw-r--r--packfile.c350
-rw-r--r--packfile.h27
-rw-r--r--path-walk.c45
-rw-r--r--path.c2
-rw-r--r--path.h6
-rw-r--r--pretty.c15
-rw-r--r--range-diff.c2
-rw-r--r--read-cache.c33
-rw-r--r--reflog.c2
-rw-r--r--refs.c20
-rw-r--r--refs.h3
-rw-r--r--refs/files-backend.c2
-rw-r--r--refs/reftable-backend.c6
-rw-r--r--refspec.c2
-rw-r--r--reftable/blocksource.c19
-rw-r--r--reftable/fsck.c2
-rw-r--r--reftable/reftable-basics.h2
-rw-r--r--reftable/reftable-block.h3
-rw-r--r--reftable/reftable-blocksource.h2
-rw-r--r--reftable/reftable-error.h2
-rw-r--r--reftable/reftable-fsck.h1
-rw-r--r--reftable/reftable-iterator.h1
-rw-r--r--reftable/reftable-merged.h1
-rw-r--r--reftable/reftable-record.h2
-rw-r--r--reftable/reftable-stack.h1
-rw-r--r--reftable/reftable-system.h18
-rw-r--r--reftable/reftable-table.h1
-rw-r--r--reftable/reftable-writer.h10
-rw-r--r--reftable/stack.c40
-rw-r--r--reftable/system.c32
-rw-r--r--reftable/system.h32
-rw-r--r--remote-curl.c18
-rw-r--r--remote.c2
-rw-r--r--replay.c27
-rw-r--r--repository.c1
-rw-r--r--rerere.c8
-rw-r--r--revision.c69
-rw-r--r--revision.h1
-rw-r--r--shallow.c6
-rw-r--r--split-index.c13
-rw-r--r--strbuf.c2
-rw-r--r--string-list.c9
-rw-r--r--string-list.h8
-rw-r--r--submodule-config.c1
-rw-r--r--submodule.c4
-rw-r--r--t/helper/test-read-midx.c21
-rw-r--r--t/lib-httpd.sh1
-rw-r--r--t/lib-httpd/apache.conf8
-rw-r--r--t/lib-httpd/http-429.sh98
-rw-r--r--t/meson.build2
-rwxr-xr-xt/t0061-run-command.sh12
-rwxr-xr-xt/t0300-credentials.sh4
-rw-r--r--t/t0450/adoc-help-mismatches1
-rwxr-xr-xt/t1416-ref-transaction-hooks.sh30
-rwxr-xr-xt/t1800-hook.sh179
-rwxr-xr-xt/t1900-repo-info.sh6
-rwxr-xr-xt/t1901-repo-structure.sh6
-rwxr-xr-xt/t2000-conflict-when-checking-files-out.sh122
-rwxr-xr-xt/t2107-update-index-basic.sh2
-rwxr-xr-xt/t2200-add-update.sh38
-rwxr-xr-xt/t2203-add-intent.sh24
-rwxr-xr-xt/t2400-worktree-add.sh28
-rwxr-xr-xt/t3430-rebase-merges.sh6
-rwxr-xr-xt/t3650-replay-basics.sh10
-rwxr-xr-xt/t3902-quoted.sh16
-rwxr-xr-xt/t4012-diff-binary.sh4
-rwxr-xr-xt/t4014-format-patch.sh141
-rwxr-xr-xt/t4100-apply-stat.sh88
-rwxr-xr-xt/t4103-apply-binary.sh20
-rwxr-xr-xt/t4120-apply-popt.sh41
-rwxr-xr-xt/t4124-apply-ws-rule.sh16
-rwxr-xr-xt/t4201-shortlog.sh4
-rwxr-xr-xt/t4211-line-log.sh348
-rw-r--r--t/t4211/sha1/expect.beginning-of-file4
-rw-r--r--t/t4211/sha1/expect.end-of-file11
-rw-r--r--t/t4211/sha1/expect.move-support-f5
-rw-r--r--t/t4211/sha1/expect.multiple10
-rw-r--r--t/t4211/sha1/expect.multiple-overlapping7
-rw-r--r--t/t4211/sha1/expect.multiple-superset7
-rw-r--r--t/t4211/sha1/expect.no-assertion-error12
-rw-r--r--t/t4211/sha1/expect.parallel-change-f-to-main7
-rw-r--r--t/t4211/sha1/expect.simple-f4
-rw-r--r--t/t4211/sha1/expect.simple-f-to-main5
-rw-r--r--t/t4211/sha1/expect.simple-main11
-rw-r--r--t/t4211/sha1/expect.simple-main-to-end11
-rw-r--r--t/t4211/sha1/expect.two-ranges10
-rw-r--r--t/t4211/sha1/expect.vanishes-early10
-rw-r--r--t/t4211/sha256/expect.beginning-of-file4
-rw-r--r--t/t4211/sha256/expect.end-of-file11
-rw-r--r--t/t4211/sha256/expect.move-support-f5
-rw-r--r--t/t4211/sha256/expect.multiple10
-rw-r--r--t/t4211/sha256/expect.multiple-overlapping7
-rw-r--r--t/t4211/sha256/expect.multiple-superset7
-rw-r--r--t/t4211/sha256/expect.no-assertion-error12
-rw-r--r--t/t4211/sha256/expect.parallel-change-f-to-main7
-rw-r--r--t/t4211/sha256/expect.simple-f4
-rw-r--r--t/t4211/sha256/expect.simple-f-to-main5
-rw-r--r--t/t4211/sha256/expect.simple-main11
-rw-r--r--t/t4211/sha256/expect.simple-main-to-end11
-rw-r--r--t/t4211/sha256/expect.two-ranges10
-rw-r--r--t/t4211/sha256/expect.vanishes-early10
-rwxr-xr-xt/t4254-am-corrupt.sh3
-rwxr-xr-xt/t5315-pack-objects-compression.sh2
-rwxr-xr-xt/t5318-commit-graph.sh20
-rwxr-xr-xt/t5319-multi-pack-index.sh18
-rwxr-xr-xt/t5331-pack-objects-stdin.sh105
-rwxr-xr-xt/t5335-compact-multi-pack-index.sh293
-rwxr-xr-xt/t5510-fetch.sh7
-rwxr-xr-xt/t5516-fetch-push.sh15
-rwxr-xr-xt/t5551-http-fetch-smart.sh7
-rwxr-xr-xt/t5584-http-429-retry.sh266
-rwxr-xr-xt/t5620-backfill.sh226
-rwxr-xr-xt/t6101-rev-parse-parents.sh3
-rwxr-xr-xt/t6403-merge-file.sh9
-rwxr-xr-xt/t7004-tag.sh15
-rwxr-xr-xt/t7704-repack-cruft.sh22
-rwxr-xr-xt/t8003-blame-corner-cases.sh75
-rwxr-xr-xt/t9001-send-email.sh12
-rwxr-xr-xt/t9300-fast-import.sh32
-rwxr-xr-xt/t9305-fast-import-signatures.sh10
-rwxr-xr-xt/t9306-fast-import-signed-tags.sh118
-rw-r--r--t/test-lib-functions.sh3
-rw-r--r--t/unit-tests/u-oidtree.c18
-rw-r--r--tmp-objdir.c1
-rw-r--r--tools/README.md7
-rwxr-xr-xtools/check-builtins.sh (renamed from check-builtins.sh)0
-rw-r--r--tools/coccinelle/.gitignore (renamed from contrib/coccinelle/.gitignore)0
-rw-r--r--tools/coccinelle/README (renamed from contrib/coccinelle/README)2
-rw-r--r--tools/coccinelle/array.cocci (renamed from contrib/coccinelle/array.cocci)0
-rw-r--r--tools/coccinelle/commit.cocci (renamed from contrib/coccinelle/commit.cocci)0
-rw-r--r--tools/coccinelle/config_fn_ctx.pending.cocci (renamed from contrib/coccinelle/config_fn_ctx.pending.cocci)0
-rw-r--r--tools/coccinelle/equals-null.cocci (renamed from contrib/coccinelle/equals-null.cocci)0
-rw-r--r--tools/coccinelle/flex_alloc.cocci (renamed from contrib/coccinelle/flex_alloc.cocci)0
-rw-r--r--tools/coccinelle/free.cocci (renamed from contrib/coccinelle/free.cocci)0
-rw-r--r--tools/coccinelle/git_config_number.cocci (renamed from contrib/coccinelle/git_config_number.cocci)0
-rw-r--r--tools/coccinelle/hashmap.cocci (renamed from contrib/coccinelle/hashmap.cocci)0
-rw-r--r--tools/coccinelle/index-compatibility.cocci (renamed from contrib/coccinelle/index-compatibility.cocci)0
-rw-r--r--tools/coccinelle/meson.build (renamed from contrib/coccinelle/meson.build)0
-rw-r--r--tools/coccinelle/object_id.cocci (renamed from contrib/coccinelle/object_id.cocci)0
-rw-r--r--tools/coccinelle/preincr.cocci (renamed from contrib/coccinelle/preincr.cocci)0
-rw-r--r--tools/coccinelle/qsort.cocci (renamed from contrib/coccinelle/qsort.cocci)0
-rw-r--r--tools/coccinelle/refs.cocci (renamed from contrib/coccinelle/refs.cocci)0
-rwxr-xr-xtools/coccinelle/spatchcache (renamed from contrib/coccinelle/spatchcache)6
-rw-r--r--tools/coccinelle/strbuf.cocci (renamed from contrib/coccinelle/strbuf.cocci)7
-rw-r--r--tools/coccinelle/strvec.cocci46
-rw-r--r--tools/coccinelle/swap.cocci (renamed from contrib/coccinelle/swap.cocci)0
-rw-r--r--tools/coccinelle/tests/free.c (renamed from contrib/coccinelle/tests/free.c)0
-rw-r--r--tools/coccinelle/tests/free.res (renamed from contrib/coccinelle/tests/free.res)0
-rw-r--r--tools/coccinelle/the_repository.cocci (renamed from contrib/coccinelle/the_repository.cocci)0
-rw-r--r--tools/coccinelle/xcalloc.cocci (renamed from contrib/coccinelle/xcalloc.cocci)0
-rw-r--r--tools/coccinelle/xopen.cocci (renamed from contrib/coccinelle/xopen.cocci)0
-rw-r--r--tools/coccinelle/xstrdup_or_null.cocci (renamed from contrib/coccinelle/xstrdup_or_null.cocci)0
-rw-r--r--tools/coccinelle/xstrncmpz.cocci (renamed from contrib/coccinelle/xstrncmpz.cocci)0
-rwxr-xr-xtools/coverage-diff.sh (renamed from contrib/coverage-diff.sh)0
-rwxr-xr-xtools/detect-compiler (renamed from detect-compiler)0
-rwxr-xr-xtools/generate-cmdlist.sh (renamed from generate-cmdlist.sh)0
-rwxr-xr-xtools/generate-configlist.sh (renamed from generate-configlist.sh)0
-rwxr-xr-xtools/generate-hooklist.sh (renamed from generate-hooklist.sh)0
-rwxr-xr-xtools/generate-perl.sh (renamed from generate-perl.sh)0
-rwxr-xr-xtools/generate-python.sh (renamed from generate-python.sh)0
-rwxr-xr-xtools/generate-script.sh (renamed from generate-script.sh)0
-rw-r--r--tools/meson.build1
-rw-r--r--tools/precompiled.h1
-rw-r--r--tools/update-unicode/.gitignore (renamed from contrib/update-unicode/.gitignore)0
-rw-r--r--tools/update-unicode/README (renamed from contrib/update-unicode/README)0
-rwxr-xr-xtools/update-unicode/update_unicode.sh (renamed from contrib/update-unicode/update_unicode.sh)0
-rw-r--r--trailer.c2
-rw-r--r--transport-helper.c2
-rw-r--r--transport.c3
-rw-r--r--walker.c2
-rw-r--r--worktree.c10
-rw-r--r--worktree.h3
294 files changed, 6857 insertions, 2410 deletions
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index b8670751f5..4992e52093 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -668,6 +668,18 @@ For C programs:
unsigned other_field:1;
unsigned field_with_longer_name:1;
+ - When a function `F` accepts flags, those flags should be defined as `enum
+ F_flags`. Individual flag definitions should start with `F` and be in
+ all-uppercase letters. Flag values should be represented via bit shifts.
+ E.g.
+
+ enum frobnicate_flags {
+ FROBNICATE_FOO = (1 << 0),
+ FROBNICATE_BAR = (1 << 1),
+ };
+
+ int frobnicate(enum frobnicate_flags flags);
+
- Array names should be named in the singular form if the individual items are
subject of use. E.g.:
diff --git a/Documentation/RelNotes/2.54.0.adoc b/Documentation/RelNotes/2.54.0.adoc
index 7de9f7e7ec..27dbfdc6a5 100644
--- a/Documentation/RelNotes/2.54.0.adoc
+++ b/Documentation/RelNotes/2.54.0.adoc
@@ -97,6 +97,33 @@ UI, Workflows & Features
* "git history" learned the "split" subcommand.
+ * The reference-transaction hook was taught to be triggered before
+ taking locks on references in the "preparing" phase.
+
+ * "git apply" now reports the name of the input file along with the
+ line number when it encounters a corrupt patch, and correctly
+ resets the line counter when processing multiple patch files.
+
+ * The HTTP transport learned to react to "429 Too Many Requests".
+
+ * "git repo info -h" and "git repo structure -h" limit their help output
+ to the part that is specific to the subcommand.
+
+ * "git format-patch --cover-letter" learns to use a simpler format
+ instead of the traditional shortlog format to list its commits with
+ a new --commit-list-format option and format.commitListFormat
+ configuration variable.
+
+ * `git backfill` learned to accept revision and pathspec arguments.
+
+ * "git replay" (experimental) learns, in addition to "pick" and
+ "replay", a new operating mode "revert".
+
+ * git replay now supports replaying down to the root commit.
+
+ * Handling of signed commits and tags in fast-import has been made more
+ configurable.
+
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
@@ -110,7 +137,7 @@ Performance, Internal Implementation, Development Support etc.
* Improve set-up time of a perf test.
- * ISO C23 redefines strchr and friends that tradiotionally took
+ * ISO C23 redefines strchr and friends that traditionally took
a const pointer and returned a non-const pointer derived from it to
preserve constness (i.e., if you ask for a substring in a const
string, you get a const pointer to the substring). Update code
@@ -209,7 +236,7 @@ Performance, Internal Implementation, Development Support etc.
many source files inside subdirectories unaffected, which has been
corrected.
- * The run_command() API lost its implicit dependencyon the singleton
+ * The run_command() API lost its implicit dependency on the singleton
`the_repository` instance.
* The unit test helper function was taught to use backslash +
@@ -227,6 +254,68 @@ Performance, Internal Implementation, Development Support etc.
* Add a coccinelle rule to break the build when "struct strbuf" gets
passed by value.
+ * Further work on incremental repacking using MIDX/bitmap
+
+ * The logic to count objects has been cleaned up.
+
+ * Tweak the build infrastructure by moving tools around.
+
+ * Uses of prio_queue as a LIFO stack of commits have been written
+ with commit_stack.
+
+ * The cleanup of remaining bitmaps in "ahead_behind()" has been
+ simplified.
+
+ * split-index.c has been updated to not use the global the_repository
+ and the_hash_algo variables.
+
+ * The unsigned integer that is used as an bitset to specify the kind
+ of branches interpret_branch_name() function has been changed to
+ use a dedicated enum type.
+
+ * Various updates to contrib/diff-highlight, including documentation
+ updates, test improvements, and color configuration handling.
+
+ * Code paths that loop over another array to push each element into a
+ strvec have been rewritten to use strvec_pushv() instead.
+
+ * In case homebrew breaks REG_ENHANCED again, leave a in-code comment
+ to suggest use of our replacement regex as a workaround.
+
+ * MinGW build updates.
+
+ * The way dash 0.5.13 handles non-ASCII contents in here-doc
+ is buggy and breaks our existing tests, which unfortunately
+ have been rewritten to avoid triggering the bug.
+
+ * Object name handling (disambiguation and abbreviation) has been
+ refactored to be backend-generic, moving logic into the respective
+ object database backends.
+
+ * pack-objects's --stdin-packs=follow mode learns to handle
+ excluded-but-open packs.
+
+ * A few code paths that spawned child processes for network
+ connection weren't wait(2)ing for their children and letting "init"
+ reap them instead; they have been tightened.
+
+ * Adjust the codebase for C23 that changes functions like strchr()
+ that discarded constness when they return a pointer into a const
+ string to preserve constness.
+
+ * A handful of inappropriate uses of the_repository have been
+ rewritten to use the right repository structure instance in the
+ read-cache.c codepath.
+
+ * Internals of "git fsck" have been refactored to not depend on the
+ global `the_repository` variable.
+
+ * Reduce dependency on `the_repository` in add-patch.c file.
+
+ * The way the "git log -L<range>:<file>" feature is bolted onto the
+ log/diff machinery is being reworked a bit to make the feature
+ compatible with more diff options, like -S/G.
+
Fixes since v2.53
-----------------
@@ -384,6 +473,42 @@ Fixes since v2.53
took an empty string as a valid <num>.
(merge 4f6a803aba ty/doc-diff-u-wo-number later to maint).
+ * The handling of the incomplete lines at the end by "git
+ diff-highlight" has been fixed.
+
+ * merge-file --object-id used to trigger a BUG when run in a linked
+ worktree, which has been fixed.
+ (merge 57246b7c62 mr/merge-file-object-id-worktree-fix later to maint).
+
+ * "git apply -p<n>" parses <n> more carefully now.
+ (merge d05d84c5f5 mf/apply-p-no-atoi later to maint).
+
+ * A test to run a .bat file with whitespaces in the name with arguments
+ with whitespaces in them was flaky in that sometimes it got killed
+ before it produced expected side effects, which has been rewritten to
+ make it more robust.
+ (merge 3ad4921838 jk/t0061-bat-test-update later to maint).
+
+ * "git ls-remote '+refs/tags/*:refs/tags/*' https://..." run outside a
+ repository would dereference a NULL while trying to see if the given
+ refspec is a single-object refspec, which has been corrected.
+ (merge 4e5dc601dd kj/refspec-parsing-outside-repository later to maint).
+
+ * Fix a regression in writing the commit-graph where commits with dates
+ exceeding 34 bits (beyond year 2514) could cause an underflow and
+ crash Git during the generation data overflow chunk writing.
+
+ * The value of a wrong pointer variable was referenced in an error
+ message that reported that it shouldn't be NULL.
+ (merge 753ecf4205 yc/path-walk-fix-error-reporting later to maint).
+
+ * The check in "receive-pack" to prevent a checked out branch from
+ getting updated via updateInstead mechanism has been corrected.
+
+ * "git backfill" is capable of auto-detecting a sparsely checked out
+ working tree, which was broken.
+ (merge 339eba65a7 th/backfill-auto-detect-sparseness-fix later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge d79fff4a11 jk/remote-tracking-ref-leakfix later to maint).
(merge 7a747f972d dd/t5403-modernise later to maint).
@@ -431,3 +556,10 @@ Fixes since v2.53
(merge 2f05039717 rj/pack-refs-tests-path-is-helpers later to maint).
(merge 2594747ad1 jk/transport-color-leakfix later to maint).
(merge 48430e44ac mf/t0008-cleanup later to maint).
+ (merge fc8a4f15e7 gi/doc-boolean-config-typofix later to maint).
+ (merge 37182267a0 kh/doc-interpret-trailers-1 later to maint).
+ (merge f64c50e768 jc/rerere-modern-strbuf-handling later to maint).
+ (merge 699248d89e th/t8003-unhide-git-failures later to maint).
+ (merge d8e34f971b za/t2000-modernise later to maint).
+ (merge 849988bc74 th/t6101-unhide-git-failures later to maint).
+ (merge 0f0ce07625 sp/doc-gitignore-oowt later to maint).
diff --git a/Documentation/config/format.adoc b/Documentation/config/format.adoc
index ab0710e86a..dbd186290b 100644
--- a/Documentation/config/format.adoc
+++ b/Documentation/config/format.adoc
@@ -101,6 +101,11 @@ format.coverLetter::
generate a cover-letter only when there's more than one patch.
Default is false.
+format.commitListFormat::
+ When the `--cover-letter-format` option is not given, `format-patch`
+ uses the value of this variable to decide how to format the entry of
+ each commit. Defaults to `shortlog`.
+
format.outputDirectory::
Set a custom directory to store the resulting files instead of the
current working directory. All directory components will be created.
diff --git a/Documentation/config/hook.adoc b/Documentation/config/hook.adoc
index 64e845a260..9e78f26439 100644
--- a/Documentation/config/hook.adoc
+++ b/Documentation/config/hook.adoc
@@ -1,23 +1,23 @@
-hook.<name>.command::
- The command to execute for `hook.<name>`. `<name>` is a unique
- "friendly" name that identifies this hook. (The hook events that
- trigger the command are configured with `hook.<name>.event`.) The
- value can be an executable path or a shell oneliner. If more than
- one value is specified for the same `<name>`, only the last value
- parsed is used. See linkgit:git-hook[1].
+hook.<friendly-name>.command::
+ The command to execute for `hook.<friendly-name>`. `<friendly-name>`
+ is a unique name that identifies this hook. The hook events that
+ trigger the command are configured with `hook.<friendly-name>.event`.
+ The value can be an executable path or a shell oneliner. If more than
+ one value is specified for the same `<friendly-name>`, only the last
+ value parsed is used. See linkgit:git-hook[1].
-hook.<name>.event::
- The hook events that trigger `hook.<name>`. The value is the name
- of a hook event, like "pre-commit" or "update". (See
+hook.<friendly-name>.event::
+ The hook events that trigger `hook.<friendly-name>`. The value is the
+ name of a hook event, like "pre-commit" or "update". (See
linkgit:githooks[5] for a complete list of hook events.) On the
- specified event, the associated `hook.<name>.command` is executed.
- This is a multi-valued key. To run `hook.<name>` on multiple
+ specified event, the associated `hook.<friendly-name>.command` is executed.
+ This is a multi-valued key. To run `hook.<friendly-name>` on multiple
events, specify the key more than once. An empty value resets
the list of events, clearing any previously defined events for
- `hook.<name>`. See linkgit:git-hook[1].
+ `hook.<friendly-name>`. See linkgit:git-hook[1].
-hook.<name>.enabled::
- Whether the hook `hook.<name>` is enabled. Defaults to `true`.
+hook.<friendly-name>.enabled::
+ Whether the hook `hook.<friendly-name>` is enabled. Defaults to `true`.
Set to `false` to disable the hook without removing its
configuration. This is particularly useful when a hook is defined
in a system or global config file and needs to be disabled for a
diff --git a/Documentation/config/http.adoc b/Documentation/config/http.adoc
index 9da5c298cc..849c89f36c 100644
--- a/Documentation/config/http.adoc
+++ b/Documentation/config/http.adoc
@@ -315,6 +315,32 @@ http.keepAliveCount::
unset, curl's default value is used. Can be overridden by the
`GIT_HTTP_KEEPALIVE_COUNT` environment variable.
+http.retryAfter::
+ Default wait time in seconds before retrying when a server returns
+ HTTP 429 (Too Many Requests) without a Retry-After header.
+ Defaults to 0 (retry immediately). When a Retry-After header is
+ present, its value takes precedence over this setting; however,
+ automatic use of the server-provided `Retry-After` header requires
+ libcurl 7.66.0 or later. On older versions, configure this setting
+ manually to control the retry delay. Can be overridden by the
+ `GIT_HTTP_RETRY_AFTER` environment variable.
+ See also `http.maxRetries` and `http.maxRetryTime`.
+
+http.maxRetries::
+ Maximum number of times to retry after receiving HTTP 429 (Too Many
+ Requests) responses. Set to 0 (the default) to disable retries.
+ Can be overridden by the `GIT_HTTP_MAX_RETRIES` environment variable.
+ See also `http.retryAfter` and `http.maxRetryTime`.
+
+http.maxRetryTime::
+ Maximum time in seconds to wait for a single retry attempt when
+ handling HTTP 429 (Too Many Requests) responses. If the server
+ requests a delay (via Retry-After header) or if `http.retryAfter`
+ is configured with a value that exceeds this maximum, Git will fail
+ immediately rather than waiting. Default is 300 seconds (5 minutes).
+ Can be overridden by the `GIT_HTTP_MAX_RETRY_TIME` environment
+ variable. See also `http.retryAfter` and `http.maxRetries`.
+
http.noEPSV::
A boolean which disables using of EPSV ftp command by curl.
This can be helpful with some "poor" ftp servers which don't
diff --git a/Documentation/config/trailer.adoc b/Documentation/config/trailer.adoc
index 60bc221c88..1bc70192d3 100644
--- a/Documentation/config/trailer.adoc
+++ b/Documentation/config/trailer.adoc
@@ -1,21 +1,21 @@
-trailer.separators::
+`trailer.separators`::
This option tells which characters are recognized as trailer
- separators. By default only ':' is recognized as a trailer
- separator, except that '=' is always accepted on the command
+ separators. By default only `:` is recognized as a trailer
+ separator, except that `=` is always accepted on the command
line for compatibility with other git commands.
+
The first character given by this option will be the default character
used when another separator is not specified in the config for this
trailer.
+
-For example, if the value for this option is "%=$", then only lines
-using the format '<key><sep><value>' with <sep> containing '%', '='
-or '$' and then spaces will be considered trailers. And '%' will be
+For example, if the value for this option is `%=$`, then only lines
+using the format _<key><sep><value>_ with _<sep>_ containing `%`, `=`
+or `$` and then spaces will be considered trailers. And `%` will be
the default separator used, so by default trailers will appear like:
-'<key>% <value>' (one percent sign and one space will appear between
+`<key>% <value>` (one percent sign and one space will appear between
the key and the value).
-trailer.where::
+`trailer.where`::
This option tells where a new trailer will be added.
+
This can be `end`, which is the default, `start`, `after` or `before`.
@@ -27,41 +27,41 @@ If it is `start`, then each new trailer will appear at the start,
instead of the end, of the existing trailers.
+
If it is `after`, then each new trailer will appear just after the
-last trailer with the same <key>.
+last trailer with the same _<key>_.
+
If it is `before`, then each new trailer will appear just before the
-first trailer with the same <key>.
+first trailer with the same _<key>_.
-trailer.ifexists::
+`trailer.ifexists`::
This option makes it possible to choose what action will be
performed when there is already at least one trailer with the
- same <key> in the input.
+ same _<key>_ in the input.
+
The valid values for this option are: `addIfDifferentNeighbor` (this
is the default), `addIfDifferent`, `add`, `replace` or `doNothing`.
+
With `addIfDifferentNeighbor`, a new trailer will be added only if no
-trailer with the same (<key>, <value>) pair is above or below the line
+trailer with the same (_<key>_, _<value>_) pair is above or below the line
where the new trailer will be added.
+
With `addIfDifferent`, a new trailer will be added only if no trailer
-with the same (<key>, <value>) pair is already in the input.
+with the same (_<key>_, _<value>_) pair is already in the input.
+
With `add`, a new trailer will be added, even if some trailers with
-the same (<key>, <value>) pair are already in the input.
+the same (_<key>_, _<value>_) pair are already in the input.
+
-With `replace`, an existing trailer with the same <key> will be
+With `replace`, an existing trailer with the same _<key>_ will be
deleted and the new trailer will be added. The deleted trailer will be
-the closest one (with the same <key>) to the place where the new one
+the closest one (with the same _<key>_) to the place where the new one
will be added.
+
With `doNothing`, nothing will be done; that is no new trailer will be
-added if there is already one with the same <key> in the input.
+added if there is already one with the same _<key>_ in the input.
-trailer.ifmissing::
+`trailer.ifmissing`::
This option makes it possible to choose what action will be
performed when there is not yet any trailer with the same
- <key> in the input.
+ _<key>_ in the input.
+
The valid values for this option are: `add` (this is the default) and
`doNothing`.
@@ -70,67 +70,68 @@ With `add`, a new trailer will be added.
+
With `doNothing`, nothing will be done.
-trailer.<keyAlias>.key::
- Defines a <keyAlias> for the <key>. The <keyAlias> must be a
- prefix (case does not matter) of the <key>. For example, in `git
- config trailer.ack.key "Acked-by"` the "Acked-by" is the <key> and
- the "ack" is the <keyAlias>. This configuration allows the shorter
+`trailer.<key-alias>.key`::
+ Defines a _<key-alias>_ for the _<key>_. The _<key-alias>_ must be a
+ prefix (case does not matter) of the _<key>_. For example, in `git
+ config trailer.ack.key "Acked-by"` the `Acked-by` is the _<key>_ and
+ the `ack` is the _<key-alias>_. This configuration allows the shorter
`--trailer "ack:..."` invocation on the command line using the "ack"
- <keyAlias> instead of the longer `--trailer "Acked-by:..."`.
+ `<key-alias>` instead of the longer `--trailer "Acked-by:..."`.
+
-At the end of the <key>, a separator can appear and then some
-space characters. By default the only valid separator is ':',
+At the end of the _<key>_, a separator can appear and then some
+space characters. By default the only valid separator is `:`,
but this can be changed using the `trailer.separators` config
variable.
+
If there is a separator in the key, then it overrides the default
separator when adding the trailer.
-trailer.<keyAlias>.where::
- This option takes the same values as the 'trailer.where'
+`trailer.<key-alias>.where`::
+ This option takes the same values as the `trailer.where`
configuration variable and it overrides what is specified by
- that option for trailers with the specified <keyAlias>.
+ that option for trailers with the specified _<key-alias>_.
-trailer.<keyAlias>.ifexists::
- This option takes the same values as the 'trailer.ifexists'
+`trailer.<key-alias>.ifexists`::
+ This option takes the same values as the `trailer.ifexists`
configuration variable and it overrides what is specified by
- that option for trailers with the specified <keyAlias>.
+ that option for trailers with the specified _<key-alias>_.
-trailer.<keyAlias>.ifmissing::
- This option takes the same values as the 'trailer.ifmissing'
+`trailer.<key-alias>.ifmissing`::
+ This option takes the same values as the `trailer.ifmissing`
configuration variable and it overrides what is specified by
- that option for trailers with the specified <keyAlias>.
+ that option for trailers with the specified _<key-alias>_.
-trailer.<keyAlias>.command::
- Deprecated in favor of 'trailer.<keyAlias>.cmd'.
- This option behaves in the same way as 'trailer.<keyAlias>.cmd', except
+`trailer.<key-alias>.command`::
+ Deprecated in favor of `trailer.<key-alias>.cmd`.
+ This option behaves in the same way as `trailer.<key-alias>.cmd`, except
that it doesn't pass anything as argument to the specified command.
- Instead the first occurrence of substring $ARG is replaced by the
- <value> that would be passed as argument.
+ Instead the first occurrence of substring `$ARG` is replaced by the
+ _<value>_ that would be passed as argument.
+
-Note that $ARG in the user's command is
-only replaced once and that the original way of replacing $ARG is not safe.
+Note that `$ARG` in the user's command is
+only replaced once and that the original way of replacing `$ARG` is not safe.
+
-When both 'trailer.<keyAlias>.cmd' and 'trailer.<keyAlias>.command' are given
-for the same <keyAlias>, 'trailer.<keyAlias>.cmd' is used and
-'trailer.<keyAlias>.command' is ignored.
+When both `trailer.<key-alias>.cmd` and `trailer.<key-alias>.command` are given
+for the same _<key-alias>_, `trailer.<key-alias>.cmd` is used and
+`trailer.<key-alias>.command` is ignored.
-trailer.<keyAlias>.cmd::
+`trailer.<key-alias>.cmd`::
This option can be used to specify a shell command that will be called
- once to automatically add a trailer with the specified <keyAlias>, and then
- called each time a '--trailer <keyAlias>=<value>' argument is specified to
- modify the <value> of the trailer that this option would produce.
+ once to automatically add a trailer with the specified _<key-alias>_, and then
+ called each time a `--trailer <key-alias>=<value>` argument is specified to
+ modify the _<value>_ of the trailer that this option would produce.
+
When the specified command is first called to add a trailer
-with the specified <keyAlias>, the behavior is as if a special
-'--trailer <keyAlias>=<value>' argument was added at the beginning
-of the "git interpret-trailers" command, where <value>
-is taken to be the standard output of the command with any
-leading and trailing whitespace trimmed off.
+with the specified _<key-alias>_, the behavior is as if a special
+`--trailer <key-alias>=<value>` argument was added at the beginning
+of linkgit:git-interpret-trailers[1], where _<value>_ is taken to be the
+standard output of the command with any leading and trailing whitespace
+trimmed off.
+
-If some '--trailer <keyAlias>=<value>' arguments are also passed
+If some `--trailer <key-alias>=<value>` arguments are also passed
on the command line, the command is called again once for each
-of these arguments with the same <keyAlias>. And the <value> part
+of these arguments with the same _<key-alias>_. And the _<value>_ part
of these arguments, if any, will be passed to the command as its
-first argument. This way the command can produce a <value> computed
-from the <value> passed in the '--trailer <keyAlias>=<value>' argument.
+first argument. This way the command can produce a _<value>_ computed
+from the _<value>_ passed in the `--trailer <key-alias>=<value>`
+argument.
diff --git a/Documentation/git-backfill.adoc b/Documentation/git-backfill.adoc
index b8394dcf22..246ab417c2 100644
--- a/Documentation/git-backfill.adoc
+++ b/Documentation/git-backfill.adoc
@@ -63,9 +63,12 @@ OPTIONS
current sparse-checkout. If the sparse-checkout feature is enabled,
then `--sparse` is assumed and can be disabled with `--no-sparse`.
+You may also specify the commit limiting options from linkgit:git-rev-list[1].
+
SEE ALSO
--------
-linkgit:git-clone[1].
+linkgit:git-clone[1],
+linkgit:git-rev-list[1]
GIT
---
diff --git a/Documentation/git-config.adoc b/Documentation/git-config.adoc
index 5300dd4c51..00545b2054 100644
--- a/Documentation/git-config.adoc
+++ b/Documentation/git-config.adoc
@@ -221,7 +221,7 @@ Use `--no-value` to unset _<pattern>_.
+
Valid `<type>`'s include:
+
-- 'bool': canonicalize values `true`, `yes`,`on`, and positive
+- 'bool': canonicalize values `true`, `yes`, `on`, and positive
numbers as "true", and values `false`, `no`, `off` and `0` as
"false".
- 'int': canonicalize values as simple decimal numbers. An optional suffix of
diff --git a/Documentation/git-fast-import.adoc b/Documentation/git-fast-import.adoc
index b3f42d4637..d68bc52b7e 100644
--- a/Documentation/git-fast-import.adoc
+++ b/Documentation/git-fast-import.adoc
@@ -66,11 +66,10 @@ fast-import stream! This option is enabled automatically for
remote-helpers that use the `import` capability, as they are
already trusted to run their own code.
-`--signed-tags=(verbatim|warn-verbatim|warn-strip|strip|abort)`::
+`--signed-tags=<mode>`::
Specify how to handle signed tags. Behaves in the same way as
- the `--signed-commits=<mode>` below, except that the
- `strip-if-invalid` mode is not yet supported. Like for signed
- commits, the default mode is `verbatim`.
+ the `--signed-commits=<mode>` below. Like for signed commits,
+ the default mode is `verbatim`.
`--signed-commits=<mode>`::
Specify how to handle signed commits. The following <mode>s
@@ -90,6 +89,8 @@ already trusted to run their own code.
commit signatures and replaces invalid signatures with newly created ones.
Valid signatures are left unchanged. If `<keyid>` is provided, that key is
used for signing; otherwise the configured default signing key is used.
+* `abort-if-invalid` will make this program die when encountering a signed
+ commit that is unable to be verified.
Options for Frontends
~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/git-format-patch.adoc b/Documentation/git-format-patch.adoc
index 36146006fa..5662382450 100644
--- a/Documentation/git-format-patch.adoc
+++ b/Documentation/git-format-patch.adoc
@@ -24,6 +24,7 @@ SYNOPSIS
[(--reroll-count|-v) <n>]
[--to=<email>] [--cc=<email>]
[--[no-]cover-letter] [--quiet]
+ [--commit-list-format=<format-spec>]
[--[no-]encode-email-headers]
[--no-notes | --notes[=<ref>]]
[--interdiff=<previous>]
@@ -318,9 +319,21 @@ feeding the result to `git send-email`.
--cover-letter::
--no-cover-letter::
- In addition to the patches, generate a cover letter file
- containing the branch description, shortlog and the overall diffstat. You can
- fill in a description in the file before sending it out.
+ In addition to the patches, generate a cover letter file containing the
+ branch description, commit list and the overall diffstat. You can fill
+ in a description in the file before sending it out.
+
+--commit-list-format=<format-spec>::
+ Specify the format in which to generate the commit list of the patch
+ series. The accepted values for format-spec are `shortlog`, `modern` or
+ a format-string prefixed with `log:`. E.g. `log: %s (%an)`.
+ `modern` is the same as `log:%w(72)[%(count)/%(total)] %s`.
+ The `log:` prefix can be omitted if the format-string has a `%` in it
+ (expecting that it is part of `%<placeholder>`).
+ Defaults to the `format.commitListFormat` configuration variable, if
+ set, or `shortlog`.
+ This option given from the command-line implies the use of
+ `--cover-letter` unless `--no-cover-letter` is given.
--encode-email-headers::
--no-encode-email-headers::
@@ -453,6 +466,7 @@ with configuration variables.
signOff = true
outputDirectory = <directory>
coverLetter = auto
+ commitListFormat = shortlog
coverFromDescription = auto
------------
diff --git a/Documentation/git-hook.adoc b/Documentation/git-hook.adoc
index 12d2701b52..318c637bd8 100644
--- a/Documentation/git-hook.adoc
+++ b/Documentation/git-hook.adoc
@@ -8,8 +8,8 @@ git-hook - Run git hooks
SYNOPSIS
--------
[verse]
-'git hook' run [--ignore-missing] [--to-stdin=<path>] <hook-name> [-- <hook-args>]
-'git hook' list [-z] <hook-name>
+'git hook' run [--allow-unknown-hook-name] [--ignore-missing] [--to-stdin=<path>] <hook-name> [-- <hook-args>]
+'git hook' list [--allow-unknown-hook-name] [-z] [--show-scope] <hook-name>
DESCRIPTION
-----------
@@ -44,7 +44,7 @@ event`), and then `~/bin/spellchecker` will have a chance to check your commit
message (during the `commit-msg` hook event).
Commands are run in the order Git encounters their associated
-`hook.<name>.event` configs during the configuration parse (see
+`hook.<friendly-name>.event` configs during the configuration parse (see
linkgit:git-config[1]). Although multiple `hook.linter.event` configs can be
added, only one `hook.linter.command` event is valid - Git uses "last-one-wins"
to determine which command to run.
@@ -76,10 +76,10 @@ first start `~/bin/linter --cpp20` and second start `~/bin/leak-detector`. It
would evaluate the output of each when deciding whether to proceed with the
commit.
-For a full list of hook events which you can set your `hook.<name>.event` to,
+For a full list of hook events which you can set your `hook.<friendly-name>.event` to,
and how hooks are invoked during those events, see linkgit:githooks[5].
-Git will ignore any `hook.<name>.event` that specifies an event it doesn't
+Git will ignore any `hook.<friendly-name>.event` that specifies an event it doesn't
recognize. This is intended so that tools which wrap Git can use the hook
infrastructure to run their own hooks; see "WRAPPERS" for more guidance.
@@ -113,7 +113,7 @@ Any positional arguments to the hook should be passed after a
mandatory `--` (or `--end-of-options`, see linkgit:gitcli[7]). See
linkgit:githooks[5] for arguments hooks might expect (if any).
-list [-z]::
+list [-z] [--show-scope]::
Print a list of hooks which will be run on `<hook-name>` event. If no
hooks are configured for that event, print a warning and return 1.
Use `-z` to terminate output lines with NUL instead of newlines.
@@ -121,6 +121,13 @@ list [-z]::
OPTIONS
-------
+--allow-unknown-hook-name::
+ By default `git hook run` and `git hook list` will bail out when
+ `<hook-name>` is not a hook event known to Git (see linkgit:githooks[5]
+ for the list of known hooks). This is meant to help catch typos
+ such as `prereceive` when `pre-receive` was intended. Pass this
+ flag to allow unknown hook names.
+
--to-stdin::
For "run"; specify a file which will be streamed into the
hook's stdin. The hook will receive the entire file from
@@ -134,6 +141,12 @@ OPTIONS
-z::
Terminate "list" output lines with NUL instead of newlines.
+--show-scope::
+ For "list"; prefix each configured hook's friendly name with a
+ tab-separated config scope (e.g. `local`, `global`, `system`),
+ mirroring the output style of `git config --show-scope`. Traditional
+ hooks from the hookdir are unaffected.
+
WRAPPERS
--------
@@ -153,7 +166,7 @@ Then, in your 'mywrapper' tool, you can invoke any users' configured hooks by
running:
----
-git hook run mywrapper-start-tests \
+git hook run --allow-unknown-hook-name mywrapper-start-tests \
# providing something to stdin
--stdin some-tempfile-123 \
# execute hooks in serial
diff --git a/Documentation/git-interpret-trailers.adoc b/Documentation/git-interpret-trailers.adoc
index fd335fe772..77b4f63b05 100644
--- a/Documentation/git-interpret-trailers.adoc
+++ b/Documentation/git-interpret-trailers.adoc
@@ -7,14 +7,14 @@ git-interpret-trailers - Add or parse structured information in commit messages
SYNOPSIS
--------
-[verse]
-'git interpret-trailers' [--in-place] [--trim-empty]
+[synopsis]
+git interpret-trailers [--in-place] [--trim-empty]
[(--trailer (<key>|<key-alias>)[(=|:)<value>])...]
[--parse] [<file>...]
DESCRIPTION
-----------
-Add or parse 'trailer' lines that look similar to RFC 822 e-mail
+Add or parse _trailer_ lines that look similar to RFC 822 e-mail
headers, at the end of the otherwise free-form part of a commit
message. For example, in the following commit message
@@ -27,23 +27,24 @@ Signed-off-by: Alice <alice@example.com>
Signed-off-by: Bob <bob@example.com>
------------------------------------------------
-the last two lines starting with "Signed-off-by" are trailers.
+the last two lines starting with `Signed-off-by` are trailers.
This command reads commit messages from either the
-<file> arguments or the standard input if no <file> is specified.
+_<file>_ arguments or the standard input if no _<file>_ is specified.
If `--parse` is specified, the output consists of the parsed trailers
coming from the input, without influencing them with any command line
options or configuration variables.
-Otherwise, this command applies `trailer.*` configuration variables
-(which could potentially add new trailers, as well as reposition them),
-as well as any command line arguments that can override configuration
-variables (such as `--trailer=...` which could also add new trailers),
-to each input file. The result is emitted on the standard output.
+Otherwise, this command applies `trailer.<key-alias>` configuration
+variables (which could potentially add new trailers, as well as
+reposition them), as well as any command line arguments that can
+override configuration variables (such as `--trailer=...` which could
+also add new trailers), to each input file. The result is emitted on the
+standard output.
This command can also operate on the output of linkgit:git-format-patch[1],
which is more elaborate than a plain commit message. Namely, such output
-includes a commit message (as above), a "---" divider line, and a patch part.
+includes a commit message (as above), a `---` divider line, and a patch part.
For these inputs, the divider and patch parts are not modified by
this command and are emitted as is on the output, unless
`--no-divider` is specified.
@@ -53,24 +54,24 @@ are applied to each input and the way any existing trailer in
the input is changed. They also make it possible to
automatically add some trailers.
-By default, a '<key>=<value>' or '<key>:<value>' argument given
+By default, a `<key>=<value>` or `<key>:<value>` argument given
using `--trailer` will be appended after the existing trailers only if
-the last trailer has a different (<key>, <value>) pair (or if there
-is no existing trailer). The <key> and <value> parts will be trimmed
+the last trailer has a different (_<key>_, _<value>_) pair (or if there
+is no existing trailer). The _<key>_ and _<value>_ parts will be trimmed
to remove starting and trailing whitespace, and the resulting trimmed
-<key> and <value> will appear in the output like this:
+_<key>_ and _<value>_ will appear in the output like this:
------------------------------------------------
key: value
------------------------------------------------
-This means that the trimmed <key> and <value> will be separated by
-`': '` (one colon followed by one space).
+This means that the trimmed _<key>_ and _<value>_ will be separated by
+"`:`{nbsp}" (one colon followed by one space).
-For convenience, a <key-alias> can be configured to make using `--trailer`
+For convenience, a _<key-alias>_ can be configured to make using `--trailer`
shorter to type on the command line. This can be configured using the
-'trailer.<key-alias>.key' configuration variable. The <keyAlias> must be a prefix
-of the full <key> string, although case sensitivity does not matter. For
+`trailer.<key-alias>.key` configuration variable. The _<key-alias>_ must be a prefix
+of the full _<key>_ string, although case sensitivity does not matter. For
example, if you have
------------------------------------------------
@@ -91,13 +92,13 @@ least one Git-generated or user-configured trailer and consists of at
least 25% trailers.
The group must be preceded by one or more empty (or whitespace-only) lines.
The group must either be at the end of the input or be the last
-non-whitespace lines before a line that starts with '---' (followed by a
+non-whitespace lines before a line that starts with `---` (followed by a
space or the end of the line).
When reading trailers, there can be no whitespace before or inside the
-<key>, but any number of regular space and tab characters are allowed
-between the <key> and the separator. There can be whitespaces before,
-inside or after the <value>. The <value> may be split over multiple lines
+_<key>_, but any number of regular space and tab characters are allowed
+between the _<key>_ and the separator. There can be whitespaces before,
+inside or after the _<value>_. The _<value>_ may be split over multiple lines
with each subsequent line starting with at least one whitespace, like
the "folding" in RFC 822. Example:
@@ -111,77 +112,97 @@ rules for RFC 822 headers. For example they do not follow the encoding rule.
OPTIONS
-------
---in-place::
- Edit the files in place.
+`--in-place`::
+`--no-in-place`::
+ Edit the files in place. The default is `--no-in-place`.
---trim-empty::
- If the <value> part of any trailer contains only whitespace,
+`--trim-empty`::
+`--no-trim-empty`::
+ If the _<value>_ part of any trailer contains only whitespace,
the whole trailer will be removed from the output.
This applies to existing trailers as well as new trailers.
++
+The default is `--no-trim-empty`.
---trailer <key>[(=|:)<value>]::
- Specify a (<key>, <value>) pair that should be applied as a
- trailer to the inputs. See the description of this
- command.
+`--trailer=<key>[(=|:)<value>]`::
+`--no-trailer`::
+ Specify a (_<key>_, _<value>_) pair that should be applied as a
+ trailer to the inputs. See the description of this command. Can
+ be given multiple times.
++
+Use `--no-trailer` to reset the list.
---where <placement>::
---no-where::
+`--where=<placement>`::
+`--no-where`::
Specify where all new trailers will be added. A setting
- provided with '--where' overrides the `trailer.where` and any
- applicable `trailer.<keyAlias>.where` configuration variables
- and applies to all '--trailer' options until the next occurrence of
- '--where' or '--no-where'. Upon encountering '--no-where', clear the
- effect of any previous use of '--where', such that the relevant configuration
- variables are no longer overridden. Possible placements are `after`,
+ provided with `--where` overrides the `trailer.where` and any
+ applicable `trailer.<key-alias>.where` configuration variables
+ and applies to all `--trailer` options until the next occurrence of
+ `--where` or `--no-where`. Possible placements are `after`,
`before`, `end` or `start`.
++
+Use `--no-where` to clear the effect of any previous use of `--where`,
+such that the relevant configuration variables are no longer overridden.
---if-exists <action>::
---no-if-exists::
+`--if-exists=<action>`::
+`--no-if-exists`::
Specify what action will be performed when there is already at
- least one trailer with the same <key> in the input. A setting
- provided with '--if-exists' overrides the `trailer.ifExists` and any
- applicable `trailer.<keyAlias>.ifExists` configuration variables
- and applies to all '--trailer' options until the next occurrence of
- '--if-exists' or '--no-if-exists'. Upon encountering '--no-if-exists', clear the
- effect of any previous use of '--if-exists', such that the relevant configuration
- variables are no longer overridden. Possible actions are `addIfDifferent`,
+ least one trailer with the same _<key>_ in the input. A setting
+ provided with `--if-exists` overrides the `trailer.ifExists` and any
+ applicable `trailer.<key-alias>.ifExists` configuration variables
+ and applies to all `--trailer` options until the next occurrence of
+ `--if-exists` or `--no-if-exists`. Possible actions are `addIfDifferent`,
`addIfDifferentNeighbor`, `add`, `replace` and `doNothing`.
++
+Use `--no-if-exists` to clear the effect of any previous use of
+`--if-exists`, such that the relevant configuration variables are no
+longer overridden.
---if-missing <action>::
---no-if-missing::
+`--if-missing=<action>`::
+`--no-if-missing`::
Specify what action will be performed when there is no other
- trailer with the same <key> in the input. A setting
- provided with '--if-missing' overrides the `trailer.ifMissing` and any
- applicable `trailer.<keyAlias>.ifMissing` configuration variables
- and applies to all '--trailer' options until the next occurrence of
- '--if-missing' or '--no-if-missing'. Upon encountering '--no-if-missing',
- clear the effect of any previous use of '--if-missing', such that the relevant
- configuration variables are no longer overridden. Possible actions are `doNothing`
- or `add`.
+ trailer with the same _<key>_ in the input. A setting
+ provided with `--if-missing` overrides the `trailer.ifMissing` and any
+ applicable `trailer.<key-alias>.ifMissing` configuration variables
+ and applies to all `--trailer` options until the next occurrence of
+ `--if-missing` or `--no-if-missing`. Possible actions are
+ `doNothing` or `add`.
++
+Use `--no-if-missing` to clear the effect of any previous use of
+`--if-missing`, such that the relevant configuration variables are no
+longer overridden.
---only-trailers::
- Output only the trailers, not any other parts of the input.
+`--only-trailers`::
+`--no-only-trailers`::
+ Output only the trailers, not any other parts of the
+ input. The default is `--no-only-trailers`.
---only-input::
+`--only-input`::
+`--no-only-input`::
Output only trailers that exist in the input; do not add any
- from the command-line or by applying `trailer.*` configuration
- variables.
+ from the command-line or by applying `trailer.<key-alias>` configuration
+ variables. The default is `--no-only-input`.
---unfold::
+`--unfold`::
+`--no-unfold`::
If a trailer has a value that runs over multiple lines (aka "folded"),
- reformat the value into a single line.
+ reformat the value into a single line. The default is `--no-unfold`.
---parse::
+`--parse`::
A convenience alias for `--only-trailers --only-input
--unfold`. This makes it easier to only see the trailers coming from the
input without influencing them with any command line options or
configuration variables, while also making the output machine-friendly with
- --unfold.
+ `--unfold`.
++
+There is no convenience alias to negate this alias.
---no-divider::
- Do not treat `---` as the end of the commit message. Use this
- when you know your input contains just the commit message itself
- (and not an email or the output of `git format-patch`).
+`--divider`::
+`--no-divider`::
+ Treat `---` as the end of the commit message. This is the default.
+ Use `--no-divider` when you know your input contains just the
+ commit message itself (and not an email or the output of
+ linkgit:git-format-patch[1]).
CONFIGURATION VARIABLES
-----------------------
@@ -193,7 +214,7 @@ include::config/trailer.adoc[]
EXAMPLES
--------
-* Configure a 'sign' trailer with a 'Signed-off-by' key, and then
+* Configure a `sign` trailer with a `Signed-off-by` key, and then
add two of these trailers to a commit message file:
+
------------
@@ -230,8 +251,8 @@ Signed-off-by: Bob <bob@example.com>
Acked-by: Alice <alice@example.com>
------------
-* Extract the last commit as a patch, and add a 'Cc' and a
- 'Reviewed-by' trailer to it:
+* Extract the last commit as a patch, and add a `Cc` and a
+ `Reviewed-by` trailer to it:
+
------------
$ git format-patch -1
@@ -239,9 +260,9 @@ $ git format-patch -1
$ git interpret-trailers --trailer 'Cc: Alice <alice@example.com>' --trailer 'Reviewed-by: Bob <bob@example.com>' 0001-foo.patch >0001-bar.patch
------------
-* Configure a 'sign' trailer with a command to automatically add a
- 'Signed-off-by: ' with the author information only if there is no
- 'Signed-off-by: ' already, and show how it works:
+* Configure a `sign` trailer with a command to automatically add a
+ "`Signed-off-by:`{nbsp}" with the author information only if there is no
+ "`Signed-off-by:`{nbsp}" already, and show how it works:
+
------------
$ cat msg1.txt
@@ -272,7 +293,7 @@ body text
Signed-off-by: Alice <alice@example.com>
------------
-* Configure a 'fix' trailer with a key that contains a '#' and no
+* Configure a `fix` trailer with a key that contains a `#` and no
space after this character, and show how it works:
+
------------
@@ -284,7 +305,7 @@ subject
Fix #42
------------
-* Configure a 'help' trailer with a cmd use a script `glog-find-author`
+* Configure a `help` trailer with a cmd use a script `glog-find-author`
which search specified author identity from git log in git repository
and show how it works:
+
@@ -308,7 +329,7 @@ Helped-by: Junio C Hamano <gitster@pobox.com>
Helped-by: Christian Couder <christian.couder@gmail.com>
------------
-* Configure a 'ref' trailer with a cmd use a script `glog-grep`
+* Configure a `ref` trailer with a cmd use a script `glog-grep`
to grep last relevant commit from git log in the git repository
and show how it works:
+
@@ -331,7 +352,7 @@ body text
Reference-to: 8bc9a0c769 (Add copyright notices., 2005-04-07)
------------
-* Configure a 'see' trailer with a command to show the subject of a
+* Configure a `see` trailer with a command to show the subject of a
commit that is related, and show how it works:
+
------------
@@ -359,8 +380,8 @@ See-also: fe3187489d69c4 (subject of related commit)
* Configure a commit template with some trailers with empty values
(using sed to show and keep the trailing spaces at the end of the
trailers), then configure a commit-msg hook that uses
- 'git interpret-trailers' to remove trailers with empty values and
- to add a 'git-version' trailer:
+ git-interpret-trailers(1) to remove trailers with empty values and to
+ add a `git-version` trailer:
+
------------
$ cat temp.txt
diff --git a/Documentation/git-multi-pack-index.adoc b/Documentation/git-multi-pack-index.adoc
index 2f642697e9..6125683014 100644
--- a/Documentation/git-multi-pack-index.adoc
+++ b/Documentation/git-multi-pack-index.adoc
@@ -9,7 +9,14 @@ git-multi-pack-index - Write and verify multi-pack-indexes
SYNOPSIS
--------
[verse]
-'git multi-pack-index' [--object-dir=<dir>] [--[no-]bitmap] <sub-command>
+'git multi-pack-index' [<options>] write [--preferred-pack=<pack>]
+ [--[no-]bitmap] [--[no-]incremental] [--[no-]stdin-packs]
+ [--refs-snapshot=<path>]
+'git multi-pack-index' [<options>] compact [--[no-]incremental]
+ [--[no-]bitmap] <from> <to>
+'git multi-pack-index' [<options>] verify
+'git multi-pack-index' [<options>] expire
+'git multi-pack-index' [<options>] repack [--batch-size=<size>]
DESCRIPTION
-----------
@@ -18,6 +25,8 @@ Write or verify a multi-pack-index (MIDX) file.
OPTIONS
-------
+The following command-line options are applicable to all sub-commands:
+
--object-dir=<dir>::
Use given directory for the location of Git objects. We check
`<dir>/packs/multi-pack-index` for the current MIDX file, and
@@ -73,7 +82,21 @@ marker).
Write an incremental MIDX file containing only objects
and packs not present in an existing MIDX layer.
Migrates non-incremental MIDXs to incremental ones when
- necessary. Incompatible with `--bitmap`.
+ necessary.
+--
+
+compact::
+ Write a new MIDX layer containing only objects and packs present
+ in the range `<from>` to `<to>`, where both arguments are
+ checksums of existing layers in the MIDX chain.
++
+--
+ --incremental::
+ Write the result to a MIDX chain instead of writing a
+ stand-alone MIDX.
+
+ --[no-]bitmap::
+ Control whether or not a multi-pack bitmap is written.
--
verify::
diff --git a/Documentation/git-pack-objects.adoc b/Documentation/git-pack-objects.adoc
index 71b9682485..b78175fbe1 100644
--- a/Documentation/git-pack-objects.adoc
+++ b/Documentation/git-pack-objects.adoc
@@ -94,13 +94,24 @@ base-name::
included packs (those not beginning with `^`), excluding any
objects listed in the excluded packs (beginning with `^`).
+
-When `mode` is "follow", objects from packs not listed on stdin receive
-special treatment. Objects within unlisted packs will be included if
-those objects are (1) reachable from the included packs, and (2) not
-found in any excluded packs. This mode is useful, for example, to
-resurrect once-unreachable objects found in cruft packs to generate
-packs which are closed under reachability up to the boundary set by the
-excluded packs.
+When `mode` is "follow" packs may additionally be prefixed with `!`,
+indicating that they are excluded but not necessarily closed under
+reachability. In addition to objects in included packs, the resulting
+pack may include additional objects based on the following:
++
+--
+* If any packs are marked with `!`, then objects reachable from such
+ packs or included ones via objects outside of excluded-closed packs
+ will be included. In this case, all `^` packs are treated as closed
+ under reachability.
+* Otherwise (if there are no `!` packs), objects within unlisted packs
+ will be included if those objects are (1) reachable from the
+ included packs, and (2) not found in any excluded packs.
+--
++
+This mode is useful, for example, to resurrect once-unreachable
+objects found in cruft packs to generate packs which are closed under
+reachability up to the boundary set by the excluded packs.
+
Incompatible with `--revs`, or options that imply `--revs` (such as
`--all`), with the exception of `--unpacked`, which is compatible.
diff --git a/Documentation/git-stash.adoc b/Documentation/git-stash.adoc
index 235d57ddd8..b05c990ecd 100644
--- a/Documentation/git-stash.adoc
+++ b/Documentation/git-stash.adoc
@@ -14,10 +14,10 @@ git stash drop [-q | --quiet] [<stash>]
git stash pop [--index] [-q | --quiet] [<stash>]
git stash apply [--index] [-q | --quiet] [<stash>]
git stash branch <branchname> [<stash>]
-git stash [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
+git stash [push] [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
[-u | --include-untracked] [-a | --all] [(-m | --message) <message>]
[--pathspec-from-file=<file> [--pathspec-file-nul]]
- [--] [<pathspec>...]]
+ [--] [<pathspec>...]
git stash save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
[-u | --include-untracked] [-a | --all] [<message>]
git stash clear
@@ -60,10 +60,8 @@ COMMANDS
the description along with the stashed state.
+
For quickly making a snapshot, you can omit "push". In this mode,
-non-option arguments are not allowed to prevent a misspelled
-subcommand from making an unwanted stash entry. The two exceptions to this
-are `stash -p` which acts as alias for `stash push -p` and pathspec elements,
-which are allowed after a double hyphen `--` for disambiguation.
+pathspec elements are only allowed after a double hyphen `--`
+to prevent a misspelled subcommand from making an unwanted stash entry.
`save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-u | --include-untracked] [-a | --all] [-q | --quiet] [<message>]`::
diff --git a/Documentation/gitformat-pack.adoc b/Documentation/gitformat-pack.adoc
index 1b4db4aa61..3416edceab 100644
--- a/Documentation/gitformat-pack.adoc
+++ b/Documentation/gitformat-pack.adoc
@@ -374,7 +374,9 @@ HEADER:
The signature is: {'M', 'I', 'D', 'X'}
1-byte version number:
- Git only writes or recognizes version 1.
+ Git writes the version specified by the "midx.version"
+ configuration option, which defaults to 2. It recognizes
+ both versions 1 and 2.
1-byte Object Id Version
We infer the length of object IDs (OIDs) from this value:
@@ -413,7 +415,9 @@ CHUNK DATA:
strings. There is no extra padding between the filenames,
and they are listed in lexicographic order. The chunk itself
is padded at the end with between 0 and 3 NUL bytes to make the
- chunk size a multiple of 4 bytes.
+ chunk size a multiple of 4 bytes. Version 1 MIDXs are required to
+ list their packs in lexicographic order, but version 2 MIDXs may
+ list their packs in any arbitrary order.
Bitmapped Packfiles (ID: {'B', 'T', 'M', 'P'})
Stores a table of two 4-byte unsigned integers in network order.
diff --git a/Documentation/githooks.adoc b/Documentation/githooks.adoc
index 056553788d..ed045940d1 100644
--- a/Documentation/githooks.adoc
+++ b/Documentation/githooks.adoc
@@ -484,13 +484,16 @@ reference-transaction
~~~~~~~~~~~~~~~~~~~~~
This hook is invoked by any Git command that performs reference
-updates. It executes whenever a reference transaction is prepared,
-committed or aborted and may thus get called multiple times. The hook
-also supports symbolic reference updates.
+updates. It executes whenever a reference transaction is preparing,
+prepared, committed or aborted and may thus get called multiple times.
+The hook also supports symbolic reference updates.
The hook takes exactly one argument, which is the current state the
given reference transaction is in:
+ - "preparing": All reference updates have been queued to the
+ transaction but references are not yet locked on disk.
+
- "prepared": All reference updates have been queued to the
transaction and references were locked on disk.
@@ -511,16 +514,18 @@ ref and `<ref-name>` is the full name of the ref. When force updating
the reference regardless of its current value or when the reference is
to be created anew, `<old-value>` is the all-zeroes object name. To
distinguish these cases, you can inspect the current value of
-`<ref-name>` via `git rev-parse`.
+`<ref-name>` via `git rev-parse`. During the "preparing" state, symbolic
+references are not resolved: `<ref-name>` will reflect the symbolic reference
+itself rather than the object it points to.
For symbolic reference updates the `<old_value>` and `<new-value>`
fields could denote references instead of objects. A reference will be
denoted with a 'ref:' prefix, like `ref:<ref-target>`.
The exit status of the hook is ignored for any state except for the
-"prepared" state. In the "prepared" state, a non-zero exit status will
-cause the transaction to be aborted. The hook will not be called with
-"aborted" state in that case.
+"preparing" and "prepared" states. In these states, a non-zero exit
+status will cause the transaction to be aborted. The hook will not be
+called with "aborted" state in that case.
push-to-checkout
~~~~~~~~~~~~~~~~
diff --git a/Documentation/gitignore.adoc b/Documentation/gitignore.adoc
index 9fccab4ae8..a3d24e5c34 100644
--- a/Documentation/gitignore.adoc
+++ b/Documentation/gitignore.adoc
@@ -96,6 +96,11 @@ PATTERN FORMAT
particular `.gitignore` file itself. Otherwise the pattern may also
match at any level below the `.gitignore` level.
+ - Patterns read from exclude sources that are outside the working tree,
+ such as $GIT_DIR/info/exclude and core.excludesFile, are treated as if
+ they are specified at the root of the working tree, i.e. a leading "/"
+ in such patterns anchors the match at the root of the repository.
+
- If there is a separator at the end of the pattern then the pattern
will only match directories, otherwise the pattern can match both
files and directories.
diff --git a/Documentation/line-range-options.adoc b/Documentation/line-range-options.adoc
index c44ba05320..ecb2c79fb9 100644
--- a/Documentation/line-range-options.adoc
+++ b/Documentation/line-range-options.adoc
@@ -12,4 +12,8 @@
(namely `--raw`, `--numstat`, `--shortstat`, `--dirstat`, `--summary`,
`--name-only`, `--name-status`, `--check`) are not currently implemented.
+
+Patch formatting options such as `--word-diff`, `--color-moved`,
+`--no-prefix`, and whitespace options (`-w`, `-b`) are supported,
+as are pickaxe options (`-S`, `-G`).
++
include::line-range-format.adoc[]
diff --git a/Documentation/pretty-formats.adoc b/Documentation/pretty-formats.adoc
index 5405e57a60..2ae0eb11a9 100644
--- a/Documentation/pretty-formats.adoc
+++ b/Documentation/pretty-formats.adoc
@@ -253,6 +253,10 @@ The placeholders are:
linkgit:git-rev-list[1])
+%d+:: ref names, like the --decorate option of linkgit:git-log[1]
+%D+:: ref names without the " (", ")" wrapping.
++%(count)+:: the number of a patch within a patch series. Used only in
+ `--commit-list-format` in `format-patch`
++%(total)+:: the total number of patches in a patch series. Used only in
+ `--commit-list-format` in `format-patch`
++%(decorate++`[:<option>,...]`++)++::
ref names with custom decorations. The `decorate` string may be followed by a
colon and zero or more comma-separated options. Option values may contain
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 44240e07b8..9c55beb496 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,6 +1,6 @@
#!/bin/sh
-DEF_VER=v2.53.GIT
+DEF_VER=v2.54.0-rc1
LF='
'
diff --git a/Makefile b/Makefile
index bf2228de9d..5d22394c2e 100644
--- a/Makefile
+++ b/Makefile
@@ -1005,8 +1005,8 @@ SPATCH_TEST_FLAGS =
# COMPUTE_HEADER_DEPENDENCIES=no this will be unset too.
SPATCH_USE_O_DEPENDENCIES = YesPlease
-# Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci
-# files into a single contrib/cocci/ALL.cocci before running
+# Set SPATCH_CONCAT_COCCI to concatenate the tools/coccinelle/*.cocci
+# files into a single tools/coccinelle/ALL.cocci before running
# "coccicheck".
#
# Pros:
@@ -1025,7 +1025,7 @@ SPATCH_USE_O_DEPENDENCIES = YesPlease
# generate a specific patch, e.g. this will always use strbuf.cocci,
# not ALL.cocci:
#
-# make contrib/coccinelle/strbuf.cocci.patch
+# make tools/coccinelle/strbuf.cocci.patch
SPATCH_CONCAT_COCCI = YesPlease
# Rebuild 'coccicheck' if $(SPATCH), its flags etc. change
@@ -1066,11 +1066,13 @@ SOURCES_CMD = ( \
'*.sh' \
':!*[tp][0-9][0-9][0-9][0-9]*' \
':!contrib' \
+ ':!tools' \
2>/dev/null || \
$(FIND) . \
\( -name .git -type d -prune \) \
-o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \
-o \( -name contrib -type d -prune \) \
+ -o \( -name tools -type d -prune \) \
-o \( -name build -type d -prune \) \
-o \( -name .build -type d -prune \) \
-o \( -name 'trash*' -type d -prune \) \
@@ -2673,6 +2675,7 @@ git$X: git.o GIT-LDFLAGS $(BUILTIN_OBJS) $(GITLIBS)
help.sp help.s help.o: command-list.h
builtin/bugreport.sp builtin/bugreport.s builtin/bugreport.o: hook-list.h
+builtin/hook.sp builtin/hook.s builtin/hook.o: hook-list.h
builtin/help.sp builtin/help.s builtin/help.o: config-list.h GIT-PREFIX
builtin/help.sp builtin/help.s builtin/help.o: EXTRA_CPPFLAGS = \
@@ -2697,21 +2700,21 @@ $(BUILT_INS): git$X
ln -s $< $@ 2>/dev/null || \
cp $< $@
-config-list.h: generate-configlist.sh
+config-list.h: tools/generate-configlist.sh
@mkdir -p .depend
- $(QUIET_GEN)$(SHELL_PATH) ./generate-configlist.sh . $@ .depend/config-list.h.d
+ $(QUIET_GEN)$(SHELL_PATH) ./tools/generate-configlist.sh . $@ .depend/config-list.h.d
-include .depend/config-list.h.d
-command-list.h: generate-cmdlist.sh command-list.txt
+command-list.h: tools/generate-cmdlist.sh command-list.txt
command-list.h: $(wildcard Documentation/git*.adoc)
- $(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh \
+ $(QUIET_GEN)$(SHELL_PATH) ./tools/generate-cmdlist.sh \
$(patsubst %,--exclude-program %,$(EXCLUDED_PROGRAMS)) \
. $@
-hook-list.h: generate-hooklist.sh Documentation/githooks.adoc
- $(QUIET_GEN)$(SHELL_PATH) ./generate-hooklist.sh . $@
+hook-list.h: tools/generate-hooklist.sh Documentation/githooks.adoc
+ $(QUIET_GEN)$(SHELL_PATH) ./tools/generate-hooklist.sh . $@
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):\
$(localedir_SQ):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
@@ -2724,8 +2727,8 @@ GIT-SCRIPT-DEFINES: FORCE
echo "$$FLAGS" >$@; \
fi
-$(SCRIPT_SH_GEN) $(SCRIPT_LIB) : % : %.sh generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES
- $(QUIET_GEN)./generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \
+$(SCRIPT_SH_GEN) $(SCRIPT_LIB) : % : %.sh tools/generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES
+ $(QUIET_GEN)./tools/generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \
mv $@+ $@
git.rc: git.rc.in GIT-VERSION-GEN GIT-VERSION-FILE
@@ -2765,8 +2768,8 @@ endif
PERL_DEFINES += $(gitexecdir) $(perllibdir) $(localedir)
-$(SCRIPT_PERL_GEN): % : %.perl generate-perl.sh GIT-PERL-DEFINES GIT-PERL-HEADER GIT-VERSION-FILE
- $(QUIET_GEN)$(SHELL_PATH) generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@+" && \
+$(SCRIPT_PERL_GEN): % : %.perl tools/generate-perl.sh GIT-PERL-DEFINES GIT-PERL-HEADER GIT-VERSION-FILE
+ $(QUIET_GEN)$(SHELL_PATH) tools/generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@+" && \
mv $@+ $@
PERL_DEFINES := $(subst $(space),:,$(PERL_DEFINES))
@@ -2794,8 +2797,8 @@ GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile
perllibdir:
@echo '$(perllibdir_SQ)'
-git-instaweb: git-instaweb.sh generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES
- $(QUIET_GEN)./generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \
+git-instaweb: git-instaweb.sh tools/generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES
+ $(QUIET_GEN)./tools/generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \
chmod +x $@+ && \
mv $@+ $@
else # NO_PERL
@@ -2812,9 +2815,9 @@ endif # NO_PERL
$(SCRIPT_PYTHON_GEN): GIT-BUILD-OPTIONS
ifndef NO_PYTHON
-$(SCRIPT_PYTHON_GEN): generate-python.sh
+$(SCRIPT_PYTHON_GEN): tools/generate-python.sh
$(SCRIPT_PYTHON_GEN): % : %.py
- $(QUIET_GEN)$(SHELL_PATH) generate-python.sh ./GIT-BUILD-OPTIONS "$<" "$@"
+ $(QUIET_GEN)$(SHELL_PATH) tools/generate-python.sh ./GIT-BUILD-OPTIONS "$<" "$@"
else # NO_PYTHON
$(SCRIPT_PYTHON_GEN): % : unimplemented.sh
$(QUIET_GEN) \
@@ -3234,9 +3237,9 @@ endif
NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
endif
-perl/build/lib/%.pm: perl/%.pm generate-perl.sh GIT-BUILD-OPTIONS GIT-VERSION-FILE GIT-PERL-DEFINES
+perl/build/lib/%.pm: perl/%.pm tools/generate-perl.sh GIT-BUILD-OPTIONS GIT-VERSION-FILE GIT-PERL-DEFINES
$(call mkdir_p_parent_template)
- $(QUIET_GEN)$(SHELL_PATH) generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@"
+ $(QUIET_GEN)$(SHELL_PATH) tools/generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@"
perl/build/man/man3/Git.3pm: perl/Git.pm
$(call mkdir_p_parent_template)
@@ -3465,15 +3468,15 @@ check:
exit 1; \
fi
-COCCI_GEN_ALL = .build/contrib/coccinelle/ALL.cocci
-COCCI_GLOB = $(wildcard contrib/coccinelle/*.cocci)
+COCCI_GEN_ALL = .build/tools/coccinelle/ALL.cocci
+COCCI_GLOB = $(wildcard tools/coccinelle/*.cocci)
COCCI_RULES_TRACKED = $(COCCI_GLOB:%=.build/%)
COCCI_RULES_TRACKED_NO_PENDING = $(filter-out %.pending.cocci,$(COCCI_RULES_TRACKED))
COCCI_RULES =
COCCI_RULES += $(COCCI_GEN_ALL)
COCCI_RULES += $(COCCI_RULES_TRACKED)
COCCI_NAMES =
-COCCI_NAMES += $(COCCI_RULES:.build/contrib/coccinelle/%.cocci=%)
+COCCI_NAMES += $(COCCI_RULES:.build/tools/coccinelle/%.cocci=%)
COCCICHECK_PENDING = $(filter %.pending.cocci,$(COCCI_RULES))
COCCICHECK = $(filter-out $(COCCICHECK_PENDING),$(COCCI_RULES))
@@ -3488,20 +3491,20 @@ COCCICHECK_PATCHES_PENDING_INTREE = $(COCCICHECK_PATCHES_PENDING:.build/%=%)
# on $(MAKECMDGOALS) that match these $(COCCI_RULES)
COCCI_RULES_GLOB =
COCCI_RULES_GLOB += cocci%
-COCCI_RULES_GLOB += .build/contrib/coccinelle/%
+COCCI_RULES_GLOB += .build/tools/coccinelle/%
COCCI_RULES_GLOB += $(COCCICHECK_PATCHES)
COCCI_RULES_GLOB += $(COCCICHEC_PATCHES_PENDING)
COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_INTREE)
COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_PENDING_INTREE)
COCCI_GOALS = $(filter $(COCCI_RULES_GLOB),$(MAKECMDGOALS))
-COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res)
+COCCI_TEST_RES = $(wildcard tools/coccinelle/tests/*.res)
$(COCCI_RULES_TRACKED): .build/% : %
$(call mkdir_p_parent_template)
$(QUIET_CP)cp $< $@
-.build/contrib/coccinelle/FOUND_H_SOURCES: $(FOUND_H_SOURCES)
+.build/tools/coccinelle/FOUND_H_SOURCES: $(FOUND_H_SOURCES)
$(call mkdir_p_parent_template)
$(QUIET_GEN) >$@
@@ -3515,12 +3518,12 @@ endif
define cocci-rule
## Rule for .build/$(1).patch/$(2); Params:
-# $(1) = e.g. ".build/contrib/coccinelle/free.cocci"
+# $(1) = e.g. ".build/tools/coccinelle/free.cocci"
# $(2) = e.g. "grep.c"
# $(3) = e.g. "grep.o"
-COCCI_$(1:.build/contrib/coccinelle/%.cocci=%) += $(1).d/$(2).patch
+COCCI_$(1:.build/tools/coccinelle/%.cocci=%) += $(1).d/$(2).patch
$(1).d/$(2).patch: GIT-SPATCH-DEFINES
-$(1).d/$(2).patch: $(if $(and $(SPATCH_USE_O_DEPENDENCIES),$(wildcard $(3))),$(3),.build/contrib/coccinelle/FOUND_H_SOURCES)
+$(1).d/$(2).patch: $(if $(and $(SPATCH_USE_O_DEPENDENCIES),$(wildcard $(3))),$(3),.build/tools/coccinelle/FOUND_H_SOURCES)
$(1).d/$(2).patch: $(1)
$(1).d/$(2).patch: $(1).d/%.patch : %
$$(call mkdir_p_parent_template)
@@ -3546,13 +3549,13 @@ endif
define spatch-rule
-.build/contrib/coccinelle/$(1).cocci.patch: $$(COCCI_$(1))
+.build/tools/coccinelle/$(1).cocci.patch: $$(COCCI_$(1))
$$(QUIET_SPATCH_CAT)cat $$^ >$$@ && \
if test -s $$@; \
then \
echo ' ' SPATCH result: $$@; \
fi
-contrib/coccinelle/$(1).cocci.patch: .build/contrib/coccinelle/$(1).cocci.patch
+tools/coccinelle/$(1).cocci.patch: .build/tools/coccinelle/$(1).cocci.patch
$$(QUIET_CP)cp $$< $$@
endef
@@ -3566,9 +3569,9 @@ $(COCCI_TEST_RES_GEN): GIT-SPATCH-DEFINES
$(COCCI_TEST_RES_GEN): .build/%.res : %.c
$(COCCI_TEST_RES_GEN): .build/%.res : %.res
ifdef SPATCH_CONCAT_COCCI
-$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : $(COCCI_GEN_ALL)
+$(COCCI_TEST_RES_GEN): .build/tools/coccinelle/tests/%.res : $(COCCI_GEN_ALL)
else
-$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci
+$(COCCI_TEST_RES_GEN): .build/tools/coccinelle/tests/%.res : tools/coccinelle/%.cocci
endif
$(call mkdir_p_parent_template)
$(QUIET_SPATCH_TEST)$(SPATCH) $(SPATCH_TEST_FLAGS) \
@@ -3584,14 +3587,14 @@ coccicheck-test: $(COCCI_TEST_RES_GEN)
coccicheck: coccicheck-test
ifdef SPATCH_CONCAT_COCCI
-COCCICHECK_PATCH_MUST_BE_EMPTY_FILES = contrib/coccinelle/ALL.cocci.patch
+COCCICHECK_PATCH_MUST_BE_EMPTY_FILES = tools/coccinelle/ALL.cocci.patch
else
COCCICHECK_PATCH_MUST_BE_EMPTY_FILES = $(COCCICHECK_PATCHES_INTREE)
endif
coccicheck: $(COCCICHECK_PATCH_MUST_BE_EMPTY_FILES)
! grep ^ $(COCCICHECK_PATCH_MUST_BE_EMPTY_FILES) /dev/null
-# See contrib/coccinelle/README
+# See tools/coccinelle/README
coccicheck-pending: coccicheck-test
coccicheck-pending: $(COCCICHECK_PATCHES_PENDING_INTREE)
@@ -3865,8 +3868,8 @@ profile-clean:
cocciclean:
$(RM) GIT-SPATCH-DEFINES
- $(RM) -r .build/contrib/coccinelle
- $(RM) contrib/coccinelle/*.cocci.patch
+ $(RM) -r .build/tools/coccinelle
+ $(RM) tools/coccinelle/*.cocci.patch
clean: profile-clean coverage-clean cocciclean
$(RM) -r .build $(UNIT_TEST_BIN)
@@ -3944,7 +3947,7 @@ check-docs::
### Make sure built-ins do not have dups and listed in git.c
#
check-builtins::
- ./check-builtins.sh
+ ./tools/check-builtins.sh
### Test suite coverage testing
#
diff --git a/add-patch.c b/add-patch.c
index 4e28e5c187..f27edcbe8d 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -558,8 +558,8 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
strvec_push(&args,
/* could be on an unborn branch */
!strcmp("HEAD", s->revision) &&
- repo_get_oid(the_repository, "HEAD", &oid) ?
- empty_tree_oid_hex(the_repository->hash_algo) : s->revision);
+ repo_get_oid(s->r, "HEAD", &oid) ?
+ empty_tree_oid_hex(s->r->hash_algo) : s->revision);
}
color_arg_index = args.nr;
/* Use `--no-color` explicitly, just in case `diff.color = always`. */
@@ -1271,7 +1271,7 @@ static int edit_hunk_manually(struct add_p_state *s, struct hunk *hunk)
"removed, then the edit is\n"
"aborted and the hunk is left unchanged.\n"));
- if (strbuf_edit_interactively(the_repository, &s->buf,
+ if (strbuf_edit_interactively(s->r, &s->buf,
"addp-hunk-edit.diff", NULL) < 0)
return -1;
@@ -1679,7 +1679,7 @@ static size_t patch_update_file(struct add_p_state *s,
if (file_diff->hunk_nr) {
if (rendered_hunk_index != hunk_index) {
if (use_pager) {
- setup_pager(the_repository);
+ setup_pager(s->r);
sigchain_push(SIGPIPE, SIG_IGN);
}
render_hunk(s, hunk, 0, colored, &s->buf);
diff --git a/apply.c b/apply.c
index b6dd1066a0..4aa1694cfa 100644
--- a/apply.c
+++ b/apply.c
@@ -42,6 +42,7 @@
struct gitdiff_data {
struct strbuf *root;
+ const char *patch_input_file;
int linenr;
int p_value;
};
@@ -900,7 +901,8 @@ static int parse_traditional_patch(struct apply_state *state,
}
}
if (!name)
- return error(_("unable to find filename in patch at line %d"), state->linenr);
+ return error(_("unable to find filename in patch at %s:%d"),
+ state->patch_input_file, state->linenr);
return 0;
}
@@ -937,20 +939,35 @@ static int gitdiff_verify_name(struct gitdiff_data *state,
if (*name) {
char *another;
- if (isnull)
+ if (isnull) {
+ if (state->patch_input_file)
+ return error(_("git apply: bad git-diff - expected /dev/null, got %s at %s:%d"),
+ *name, state->patch_input_file, state->linenr);
return error(_("git apply: bad git-diff - expected /dev/null, got %s on line %d"),
*name, state->linenr);
+ }
another = find_name(state->root, line, NULL, state->p_value, TERM_TAB);
if (!another || strcmp(another, *name)) {
free(another);
+ if (state->patch_input_file)
+ return error((side == DIFF_NEW_NAME) ?
+ _("git apply: bad git-diff - inconsistent new filename at %s:%d") :
+ _("git apply: bad git-diff - inconsistent old filename at %s:%d"),
+ state->patch_input_file, state->linenr);
return error((side == DIFF_NEW_NAME) ?
- _("git apply: bad git-diff - inconsistent new filename on line %d") :
- _("git apply: bad git-diff - inconsistent old filename on line %d"), state->linenr);
+ _("git apply: bad git-diff - inconsistent new filename on line %d") :
+ _("git apply: bad git-diff - inconsistent old filename on line %d"),
+ state->linenr);
}
free(another);
} else {
- if (!is_dev_null(line))
- return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
+ if (!is_dev_null(line)) {
+ if (state->patch_input_file)
+ return error(_("git apply: bad git-diff - expected /dev/null at %s:%d"),
+ state->patch_input_file, state->linenr);
+ return error(_("git apply: bad git-diff - expected /dev/null on line %d"),
+ state->linenr);
+ }
}
return 0;
@@ -974,12 +991,19 @@ static int gitdiff_newname(struct gitdiff_data *state,
DIFF_NEW_NAME);
}
-static int parse_mode_line(const char *line, int linenr, unsigned int *mode)
+static int parse_mode_line(const char *line,
+ const char *patch_input_file,
+ int linenr,
+ unsigned int *mode)
{
char *end;
*mode = strtoul(line, &end, 8);
- if (end == line || !isspace(*end))
+ if (end == line || !isspace(*end)) {
+ if (patch_input_file)
+ return error(_("invalid mode at %s:%d: %s"),
+ patch_input_file, linenr, line);
return error(_("invalid mode on line %d: %s"), linenr, line);
+ }
*mode = canon_mode(*mode);
return 0;
}
@@ -988,14 +1012,16 @@ static int gitdiff_oldmode(struct gitdiff_data *state,
const char *line,
struct patch *patch)
{
- return parse_mode_line(line, state->linenr, &patch->old_mode);
+ return parse_mode_line(line, state->patch_input_file, state->linenr,
+ &patch->old_mode);
}
static int gitdiff_newmode(struct gitdiff_data *state,
const char *line,
struct patch *patch)
{
- return parse_mode_line(line, state->linenr, &patch->new_mode);
+ return parse_mode_line(line, state->patch_input_file, state->linenr,
+ &patch->new_mode);
}
static int gitdiff_delete(struct gitdiff_data *state,
@@ -1314,6 +1340,7 @@ static int check_header_line(int linenr, struct patch *patch)
}
int parse_git_diff_header(struct strbuf *root,
+ const char *patch_input_file,
int *linenr,
int p_value,
const char *line,
@@ -1345,6 +1372,7 @@ int parse_git_diff_header(struct strbuf *root,
size -= len;
(*linenr)++;
parse_hdr_state.root = root;
+ parse_hdr_state.patch_input_file = patch_input_file;
parse_hdr_state.linenr = *linenr;
parse_hdr_state.p_value = p_value;
@@ -1382,6 +1410,7 @@ int parse_git_diff_header(struct strbuf *root,
int res;
if (len < oplen || memcmp(p->str, line, oplen))
continue;
+ parse_hdr_state.linenr = *linenr;
res = p->fn(&parse_hdr_state, line + oplen, patch);
if (res < 0)
return -1;
@@ -1396,12 +1425,20 @@ int parse_git_diff_header(struct strbuf *root,
done:
if (!patch->old_name && !patch->new_name) {
if (!patch->def_name) {
- error(Q_("git diff header lacks filename information when removing "
- "%d leading pathname component (line %d)",
- "git diff header lacks filename information when removing "
- "%d leading pathname components (line %d)",
- parse_hdr_state.p_value),
- parse_hdr_state.p_value, *linenr);
+ if (patch_input_file)
+ error(Q_("git diff header lacks filename information when removing "
+ "%d leading pathname component at %s:%d",
+ "git diff header lacks filename information when removing "
+ "%d leading pathname components at %s:%d",
+ parse_hdr_state.p_value),
+ parse_hdr_state.p_value, patch_input_file, *linenr);
+ else
+ error(Q_("git diff header lacks filename information when removing "
+ "%d leading pathname component (line %d)",
+ "git diff header lacks filename information when removing "
+ "%d leading pathname components (line %d)",
+ parse_hdr_state.p_value),
+ parse_hdr_state.p_value, *linenr);
return -128;
}
patch->old_name = xstrdup(patch->def_name);
@@ -1409,8 +1446,12 @@ done:
}
if ((!patch->new_name && !patch->is_delete) ||
(!patch->old_name && !patch->is_new)) {
- error(_("git diff header lacks filename information "
- "(line %d)"), *linenr);
+ if (patch_input_file)
+ error(_("git diff header lacks filename information at %s:%d"),
+ patch_input_file, *linenr);
+ else
+ error(_("git diff header lacks filename information (line %d)"),
+ *linenr);
return -128;
}
patch->is_toplevel_relative = 1;
@@ -1577,8 +1618,9 @@ static int find_header(struct apply_state *state,
struct fragment dummy;
if (parse_fragment_header(line, len, &dummy) < 0)
continue;
- error(_("patch fragment without header at line %d: %.*s"),
- state->linenr, (int)len-1, line);
+ error(_("patch fragment without header at %s:%d: %.*s"),
+ state->patch_input_file, state->linenr,
+ (int)len-1, line);
return -128;
}
@@ -1590,7 +1632,9 @@ static int find_header(struct apply_state *state,
* or mode change, so we handle that specially
*/
if (!memcmp("diff --git ", line, 11)) {
- int git_hdr_len = parse_git_diff_header(&state->root, &state->linenr,
+ int git_hdr_len = parse_git_diff_header(&state->root,
+ state->patch_input_file,
+ &state->linenr,
state->p_value, line, len,
size, patch);
if (git_hdr_len < 0)
@@ -1796,8 +1840,16 @@ static int parse_fragment(struct apply_state *state,
trailing++;
check_old_for_crlf(patch, line, len);
if (!state->apply_in_reverse &&
- state->ws_error_action == correct_ws_error)
- check_whitespace(state, line, len, patch->ws_rule);
+ state->ws_error_action == correct_ws_error) {
+ const char *test_line = line;
+ int test_len = len;
+ if (*line == '\n') {
+ test_line = " \n";
+ test_len = 2;
+ }
+ check_whitespace(state, test_line, test_len,
+ patch->ws_rule);
+ }
break;
case '-':
if (!state->apply_in_reverse)
@@ -1875,7 +1927,8 @@ static int parse_single_patch(struct apply_state *state,
len = parse_fragment(state, line, size, patch, fragment);
if (len <= 0) {
free(fragment);
- return error(_("corrupt patch at line %d"), state->linenr);
+ return error(_("corrupt patch at %s:%d"),
+ state->patch_input_file, state->linenr);
}
fragment->patch = line;
fragment->size = len;
@@ -2065,8 +2118,8 @@ static struct fragment *parse_binary_hunk(struct apply_state *state,
corrupt:
free(data);
*status_p = -1;
- error(_("corrupt binary patch at line %d: %.*s"),
- state->linenr-1, llen-1, buffer);
+ error(_("corrupt binary patch at %s:%d: %.*s"),
+ state->patch_input_file, state->linenr-1, llen-1, buffer);
return NULL;
}
@@ -2102,7 +2155,8 @@ static int parse_binary(struct apply_state *state,
forward = parse_binary_hunk(state, &buffer, &size, &status, &used);
if (!forward && !status)
/* there has to be one hunk (forward hunk) */
- return error(_("unrecognized binary patch at line %d"), state->linenr-1);
+ return error(_("unrecognized binary patch at %s:%d"),
+ state->patch_input_file, state->linenr-1);
if (status)
/* otherwise we already gave an error message */
return status;
@@ -2264,7 +2318,8 @@ static int parse_chunk(struct apply_state *state, char *buffer, unsigned long si
*/
if ((state->apply || state->check) &&
(!patch->is_binary && !metadata_changes(patch))) {
- error(_("patch with only garbage at line %d"), state->linenr);
+ error(_("patch with only garbage at %s:%d"),
+ state->patch_input_file, state->linenr);
return -128;
}
}
@@ -4825,6 +4880,7 @@ static int apply_patch(struct apply_state *state,
int flush_attributes = 0;
state->patch_input_file = filename;
+ state->linenr = 1;
if (read_patch_file(&buf, fd) < 0)
return -128;
offset = 0;
@@ -4981,7 +5037,8 @@ static int apply_option_parse_p(const struct option *opt,
BUG_ON_OPT_NEG(unset);
- state->p_value = atoi(arg);
+ if (strtol_i(arg, 10, &state->p_value) < 0 || state->p_value < 0)
+ die(_("option -p expects a non-negative integer, got '%s'"), arg);
state->p_value_known = 1;
return 0;
}
diff --git a/apply.h b/apply.h
index 90e887ec0e..5f2f03d3ed 100644
--- a/apply.h
+++ b/apply.h
@@ -167,6 +167,7 @@ int check_apply_state(struct apply_state *state, int force_apply);
* Returns -1 on failure, the length of the parsed header otherwise.
*/
int parse_git_diff_header(struct strbuf *root,
+ const char *patch_input_file,
int *linenr,
int p_value,
const char *line,
diff --git a/builtin/am.c b/builtin/am.c
index 9d0b51c651..fe6e087eee 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1188,7 +1188,7 @@ static void am_append_signoff(struct am_state *state)
{
struct strbuf sb = STRBUF_INIT;
- strbuf_attach(&sb, state->msg, state->msg_len, state->msg_len);
+ strbuf_attach(&sb, state->msg, state->msg_len, state->msg_len + 1);
append_signoff(&sb, 0, 0);
state->msg = strbuf_detach(&sb, &state->msg_len);
}
diff --git a/builtin/backfill.c b/builtin/backfill.c
index e9a33e81be..d794dd842f 100644
--- a/builtin/backfill.c
+++ b/builtin/backfill.c
@@ -35,6 +35,7 @@ struct backfill_context {
struct oid_array current_batch;
size_t min_batch_size;
int sparse;
+ struct rev_info revs;
};
static void backfill_context_clear(struct backfill_context *ctx)
@@ -79,7 +80,6 @@ static int fill_missing_blobs(const char *path UNUSED,
static int do_backfill(struct backfill_context *ctx)
{
- struct rev_info revs;
struct path_walk_info info = PATH_WALK_INFO_INIT;
int ret;
@@ -91,13 +91,14 @@ static int do_backfill(struct backfill_context *ctx)
}
}
- repo_init_revisions(ctx->repo, &revs, "");
- handle_revision_arg("HEAD", &revs, 0, 0);
+ /* Walk from HEAD if otherwise unspecified. */
+ if (!ctx->revs.pending.nr)
+ add_head_to_pending(&ctx->revs);
info.blobs = 1;
info.tags = info.commits = info.trees = 0;
- info.revs = &revs;
+ info.revs = &ctx->revs;
info.path_fn = fill_missing_blobs;
info.path_fn_data = ctx;
@@ -108,7 +109,6 @@ static int do_backfill(struct backfill_context *ctx)
download_batch(ctx);
path_walk_info_clear(&info);
- release_revisions(&revs);
return ret;
}
@@ -119,7 +119,8 @@ int cmd_backfill(int argc, const char **argv, const char *prefix, struct reposit
.repo = repo,
.current_batch = OID_ARRAY_INIT,
.min_batch_size = 50000,
- .sparse = 0,
+ .sparse = -1,
+ .revs = REV_INFO_INIT,
};
struct option options[] = {
OPT_UNSIGNED(0, "min-batch-size", &ctx.min_batch_size,
@@ -134,7 +135,15 @@ int cmd_backfill(int argc, const char **argv, const char *prefix, struct reposit
builtin_backfill_usage, options);
argc = parse_options(argc, argv, prefix, options, builtin_backfill_usage,
- 0);
+ PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ repo_init_revisions(repo, &ctx.revs, prefix);
+ argc = setup_revisions(argc, argv, &ctx.revs, NULL);
+
+ if (argc > 1)
+ die(_("unrecognized argument: %s"), argv[1]);
repo_config(repo, git_default_config, NULL);
@@ -143,5 +152,6 @@ int cmd_backfill(int argc, const char **argv, const char *prefix, struct reposit
result = do_backfill(&ctx);
backfill_context_clear(&ctx);
+ release_revisions(&ctx.revs);
return result;
}
diff --git a/builtin/branch.c b/builtin/branch.c
index a1a43380d0..1572a4f9ef 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -228,7 +228,7 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
int ret = 0;
int remote_branch = 0;
struct strbuf bname = STRBUF_INIT;
- unsigned allowed_interpret;
+ enum interpret_branch_kind allowed_interpret;
struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
struct string_list_item *item;
int branch_name_pos;
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index b6f12f41d6..d9fbad5358 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -161,7 +161,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
case 'e':
ret = !odb_has_object(the_repository->objects, &oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR);
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR);
goto cleanup;
case 'w':
@@ -848,6 +848,9 @@ static void batch_each_object(struct batch_options *opt,
.callback = callback,
.payload = _payload,
};
+ struct odb_for_each_object_options opts = {
+ .flags = flags,
+ };
struct bitmap_index *bitmap = NULL;
struct odb_source *source;
@@ -860,7 +863,7 @@ static void batch_each_object(struct batch_options *opt,
odb_prepare_alternates(the_repository->objects);
for (source = the_repository->objects->sources; source; source = source->next) {
int ret = odb_source_loose_for_each_object(source, NULL, batch_one_object_oi,
- &payload, flags);
+ &payload, &opts);
if (ret)
break;
}
@@ -884,7 +887,7 @@ static void batch_each_object(struct batch_options *opt,
for (source = the_repository->objects->sources; source; source = source->next) {
struct odb_source_files *files = odb_source_files_downcast(source);
int ret = packfile_store_for_each_object(files->packed, &oi,
- batch_one_object_oi, &payload, flags);
+ batch_one_object_oi, &payload, &opts);
if (ret)
break;
}
diff --git a/builtin/config.c b/builtin/config.c
index 7c4857be62..cf4ba0f7cc 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -838,6 +838,7 @@ static int get_urlmatch(const struct config_location_options *opts,
const char *var, const char *url)
{
int ret;
+ char *section;
char *section_tail;
struct config_display_options display_opts = *_display_opts;
struct string_list_item *item;
@@ -851,8 +852,8 @@ static int get_urlmatch(const struct config_location_options *opts,
if (!url_normalize(url, &config.url))
die("%s", config.url.err);
- config.section = xstrdup_tolower(var);
- section_tail = strchr(config.section, '.');
+ config.section = section = xstrdup_tolower(var);
+ section_tail = strchr(section, '.');
if (section_tail) {
*section_tail = '\0';
config.key = section_tail + 1;
@@ -886,7 +887,7 @@ static int get_urlmatch(const struct config_location_options *opts,
string_list_clear(&values, 1);
free(config.url.url);
- free((void *)config.section);
+ free(section);
return ret;
}
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index 13621b0d6a..2eb43a28da 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -64,7 +64,8 @@ static int parse_opt_sign_mode(const struct option *opt,
if (unset)
return 0;
- if (parse_sign_mode(arg, val, NULL))
+ if (parse_sign_mode(arg, val, NULL) || (*val == SIGN_STRIP_IF_INVALID) ||
+ (*val == SIGN_SIGN_IF_INVALID) || (*val == SIGN_ABORT_IF_INVALID))
return error(_("unknown %s mode: %s"), opt->long_name, arg);
return 0;
@@ -822,12 +823,6 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
die(_("encountered signed commit %s; use "
"--signed-commits=<mode> to handle it"),
oid_to_hex(&commit->object.oid));
- case SIGN_STRIP_IF_INVALID:
- die(_("'strip-if-invalid' is not a valid mode for "
- "git fast-export with --signed-commits=<mode>"));
- case SIGN_SIGN_IF_INVALID:
- die(_("'sign-if-invalid' is not a valid mode for "
- "git fast-export with --signed-commits=<mode>"));
default:
BUG("invalid signed_commit_mode value %d", signed_commit_mode);
}
@@ -970,12 +965,6 @@ static void handle_tag(const char *name, struct tag *tag)
die(_("encountered signed tag %s; use "
"--signed-tags=<mode> to handle it"),
oid_to_hex(&tag->object.oid));
- case SIGN_STRIP_IF_INVALID:
- die(_("'strip-if-invalid' is not a valid mode for "
- "git fast-export with --signed-tags=<mode>"));
- case SIGN_SIGN_IF_INVALID:
- die(_("'sign-if-invalid' is not a valid mode for "
- "git fast-export with --signed-tags=<mode>"));
default:
BUG("invalid signed_commit_mode value %d", signed_commit_mode);
}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 9fc6c35b74..82bc6dcc00 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -191,6 +191,7 @@ static const char *global_prefix;
static enum sign_mode signed_tag_mode = SIGN_VERBATIM;
static enum sign_mode signed_commit_mode = SIGN_VERBATIM;
static const char *signed_commit_keyid;
+static const char *signed_tag_keyid;
/* Memory pools */
static struct mem_pool fi_mem_pool = {
@@ -2892,6 +2893,9 @@ static void handle_signature_if_invalid(struct strbuf *new_data,
ret = verify_commit_buffer(tmp_buf.buf, tmp_buf.len, &signature_check);
if (ret) {
+ if (mode == SIGN_ABORT_IF_INVALID)
+ die(_("aborting due to invalid signature"));
+
warn_invalid_signature(&signature_check, msg->buf, mode);
if (mode == SIGN_SIGN_IF_INVALID) {
@@ -2983,6 +2987,7 @@ static void parse_new_commit(const char *arg)
case SIGN_VERBATIM:
case SIGN_STRIP_IF_INVALID:
case SIGN_SIGN_IF_INVALID:
+ case SIGN_ABORT_IF_INVALID:
import_one_signature(&sig_sha1, &sig_sha256, v);
break;
@@ -3068,7 +3073,8 @@ static void parse_new_commit(const char *arg)
encoding);
if ((signed_commit_mode == SIGN_STRIP_IF_INVALID ||
- signed_commit_mode == SIGN_SIGN_IF_INVALID) &&
+ signed_commit_mode == SIGN_SIGN_IF_INVALID ||
+ signed_commit_mode == SIGN_ABORT_IF_INVALID) &&
(sig_sha1.hash_algo || sig_sha256.hash_algo))
handle_signature_if_invalid(&new_data, &sig_sha1, &sig_sha256,
&msg, signed_commit_mode);
@@ -3084,7 +3090,50 @@ static void parse_new_commit(const char *arg)
b->last_commit = object_count_by_type[OBJ_COMMIT];
}
-static void handle_tag_signature(struct strbuf *msg, const char *name)
+static void handle_tag_signature_if_invalid(struct strbuf *buf,
+ struct strbuf *msg,
+ size_t sig_offset)
+{
+ struct strbuf signature = STRBUF_INIT;
+ struct strbuf payload = STRBUF_INIT;
+ struct signature_check sigc = { 0 };
+
+ strbuf_addbuf(&payload, buf);
+ strbuf_addch(&payload, '\n');
+ strbuf_add(&payload, msg->buf, sig_offset);
+ strbuf_add(&signature, msg->buf + sig_offset, msg->len - sig_offset);
+
+ sigc.payload_type = SIGNATURE_PAYLOAD_TAG;
+ sigc.payload = strbuf_detach(&payload, &sigc.payload_len);
+
+ if (!check_signature(&sigc, signature.buf, signature.len))
+ goto out;
+
+ if (signed_tag_mode == SIGN_ABORT_IF_INVALID)
+ die(_("aborting due to invalid signature"));
+
+ strbuf_setlen(msg, sig_offset);
+
+ if (signed_tag_mode == SIGN_SIGN_IF_INVALID) {
+ strbuf_attach(&payload, sigc.payload, sigc.payload_len,
+ sigc.payload_len + 1);
+ sigc.payload = NULL;
+ strbuf_reset(&signature);
+
+ if (sign_buffer(&payload, &signature, signed_tag_keyid,
+ SIGN_BUFFER_USE_DEFAULT_KEY))
+ die(_("failed to sign tag object"));
+
+ strbuf_addbuf(msg, &signature);
+ }
+
+out:
+ signature_check_clear(&sigc);
+ strbuf_release(&signature);
+ strbuf_release(&payload);
+}
+
+static void handle_tag_signature(struct strbuf *buf, struct strbuf *msg, const char *name)
{
size_t sig_offset = parse_signed_buffer(msg->buf, msg->len);
@@ -3110,17 +3159,16 @@ static void handle_tag_signature(struct strbuf *msg, const char *name)
/* Truncate the buffer to remove the signature */
strbuf_setlen(msg, sig_offset);
break;
+ case SIGN_ABORT_IF_INVALID:
+ case SIGN_SIGN_IF_INVALID:
+ case SIGN_STRIP_IF_INVALID:
+ handle_tag_signature_if_invalid(buf, msg, sig_offset);
+ break;
/* Third, aborting modes */
case SIGN_ABORT:
die(_("encountered signed tag; use "
"--signed-tags=<mode> to handle it"));
- case SIGN_STRIP_IF_INVALID:
- die(_("'strip-if-invalid' is not a valid mode for "
- "git fast-import with --signed-tags=<mode>"));
- case SIGN_SIGN_IF_INVALID:
- die(_("'sign-if-invalid' is not a valid mode for "
- "git fast-import with --signed-tags=<mode>"));
default:
BUG("invalid signed_tag_mode value %d from tag '%s'",
signed_tag_mode, name);
@@ -3190,8 +3238,6 @@ static void parse_new_tag(const char *arg)
/* tag payload/message */
parse_data(&msg, 0, NULL);
- handle_tag_signature(&msg, t->name);
-
/* build the tag object */
strbuf_reset(&new_data);
@@ -3203,6 +3249,9 @@ static void parse_new_tag(const char *arg)
if (tagger)
strbuf_addf(&new_data,
"tagger %s\n", tagger);
+
+ handle_tag_signature(&new_data, &msg, t->name);
+
strbuf_addch(&new_data, '\n');
strbuf_addbuf(&new_data, &msg);
free(tagger);
@@ -3307,7 +3356,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
cat_blob_write("\n", 1);
if (oe && oe->pack_id == pack_id) {
last_blob.offset = oe->idx.offset;
- strbuf_attach(&last_blob.data, buf, size, size);
+ strbuf_attach(&last_blob.data, buf, size, size + 1);
last_blob.depth = oe->depth;
} else
free(buf);
@@ -3713,7 +3762,7 @@ static int parse_one_option(const char *option)
if (parse_sign_mode(option, &signed_commit_mode, &signed_commit_keyid))
usagef(_("unknown --signed-commits mode '%s'"), option);
} else if (skip_prefix(option, "signed-tags=", &option)) {
- if (parse_sign_mode(option, &signed_tag_mode, NULL))
+ if (parse_sign_mode(option, &signed_tag_mode, &signed_tag_keyid))
usagef(_("unknown --signed-tags mode '%s'"), option);
} else if (!strcmp(option, "quiet")) {
show_stats = 0;
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 4795b2a13c..a22c319467 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -946,7 +946,7 @@ static int update_local_ref(struct ref *ref,
int fast_forward = 0;
if (!odb_has_object(the_repository->objects, &ref->new_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
if (oideq(&ref->old_oid, &ref->new_oid)) {
@@ -1396,7 +1396,7 @@ static int check_exist_and_connected(struct ref *ref_map)
*/
for (r = rm; r; r = r->next) {
if (!odb_has_object(the_repository->objects, &r->old_oid,
- HAS_OBJECT_RECHECK_PACKED))
+ ODB_HAS_OBJECT_RECHECK_PACKED))
return -1;
}
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 9bab32effe..248f8ff5a0 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -1,4 +1,3 @@
-#define USE_THE_REPOSITORY_VARIABLE
#include "builtin.h"
#include "gettext.h"
#include "hex.h"
@@ -42,8 +41,8 @@ static int check_full = 1;
static int connectivity_only;
static int check_strict;
static int keep_cache_objects;
-static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT;
-static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT;
+static struct fsck_options fsck_walk_options;
+static struct fsck_options fsck_obj_options;
static int errors_found;
static int write_lost_and_found;
static int verbose;
@@ -66,14 +65,14 @@ static const char *describe_object(const struct object_id *oid)
return fsck_describe_object(&fsck_walk_options, oid);
}
-static const char *printable_type(const struct object_id *oid,
+static const char *printable_type(struct repository *repo,
+ const struct object_id *oid,
enum object_type type)
{
const char *ret;
if (type == OBJ_NONE)
- type = odb_read_object_info(the_repository->objects,
- oid, NULL);
+ type = odb_read_object_info(repo->objects, oid, NULL);
ret = type_name(type);
if (!ret)
@@ -82,17 +81,17 @@ static const char *printable_type(const struct object_id *oid,
return ret;
}
-static int objerror(struct object *obj, const char *err)
+static int objerror(struct repository *repo, struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
/* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
fprintf_ln(stderr, _("error in %s %s: %s"),
- printable_type(&obj->oid, obj->type),
+ printable_type(repo, &obj->oid, obj->type),
describe_object(&obj->oid), err);
return -1;
}
-static int fsck_objects_error_func(struct fsck_options *o UNUSED,
+static int fsck_objects_error_func(struct fsck_options *o,
void *fsck_report,
enum fsck_msg_type msg_type,
enum fsck_msg_id msg_id UNUSED,
@@ -106,13 +105,13 @@ static int fsck_objects_error_func(struct fsck_options *o UNUSED,
case FSCK_WARN:
/* TRANSLATORS: e.g. warning in tree 01bfda: <more explanation> */
fprintf_ln(stderr, _("warning in %s %s: %s"),
- printable_type(oid, object_type),
+ printable_type(o->repo, oid, object_type),
describe_object(oid), message);
return 0;
case FSCK_ERROR:
/* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
fprintf_ln(stderr, _("error in %s %s: %s"),
- printable_type(oid, object_type),
+ printable_type(o->repo, oid, object_type),
describe_object(oid), message);
return 1;
default:
@@ -124,7 +123,7 @@ static int fsck_objects_error_func(struct fsck_options *o UNUSED,
static struct object_array pending;
static int mark_object(struct object *obj, enum object_type type,
- void *data, struct fsck_options *options UNUSED)
+ void *data, struct fsck_options *options)
{
struct object *parent = data;
@@ -136,7 +135,7 @@ static int mark_object(struct object *obj, enum object_type type,
if (!obj) {
/* ... these references to parent->fld are safe here */
printf_ln(_("broken link from %7s %s"),
- printable_type(&parent->oid, parent->type),
+ printable_type(options->repo, &parent->oid, parent->type),
describe_object(&parent->oid));
printf_ln(_("broken link from %7s %s"),
(type == OBJ_ANY ? _("unknown") : type_name(type)),
@@ -147,13 +146,13 @@ static int mark_object(struct object *obj, enum object_type type,
if (type != OBJ_ANY && obj->type != type)
/* ... and the reference to parent is safe here */
- objerror(parent, _("wrong object type in link"));
+ objerror(options->repo, parent, _("wrong object type in link"));
if (obj->flags & REACHABLE)
return 0;
obj->flags |= REACHABLE;
- if (is_promisor_object(the_repository, &obj->oid))
+ if (is_promisor_object(options->repo, &obj->oid))
/*
* Further recursion does not need to be performed on this
* object since it is a promisor object (so it does not need to
@@ -162,13 +161,13 @@ static int mark_object(struct object *obj, enum object_type type,
return 0;
if (!(obj->flags & HAS_OBJ)) {
- if (parent && !odb_has_object(the_repository->objects, &obj->oid,
- HAS_OBJECT_RECHECK_PACKED)) {
+ if (parent && !odb_has_object(options->repo->objects, &obj->oid,
+ ODB_HAS_OBJECT_RECHECK_PACKED)) {
printf_ln(_("broken link from %7s %s\n"
" to %7s %s"),
- printable_type(&parent->oid, parent->type),
+ printable_type(options->repo, &parent->oid, parent->type),
describe_object(&parent->oid),
- printable_type(&obj->oid, obj->type),
+ printable_type(options->repo, &obj->oid, obj->type),
describe_object(&obj->oid));
errors_found |= ERROR_REACHABLE;
}
@@ -181,7 +180,7 @@ static int mark_object(struct object *obj, enum object_type type,
static void mark_object_reachable(struct object *obj)
{
- mark_object(obj, OBJ_ANY, NULL, NULL);
+ mark_object(obj, OBJ_ANY, NULL, &fsck_walk_options);
}
static int traverse_one_object(struct object *obj)
@@ -195,13 +194,13 @@ static int traverse_one_object(struct object *obj)
return result;
}
-static int traverse_reachable(void)
+static int traverse_reachable(struct repository *repo)
{
struct progress *progress = NULL;
unsigned int nr = 0;
int result = 0;
if (show_progress)
- progress = start_delayed_progress(the_repository,
+ progress = start_delayed_progress(repo,
_("Checking connectivity"), 0);
while (pending.nr) {
result |= traverse_one_object(object_array_pop(&pending));
@@ -222,10 +221,11 @@ static int mark_used(struct object *obj, enum object_type type UNUSED,
static int mark_unreachable_referents(const struct object_id *oid,
struct object_info *oi UNUSED,
- void *data UNUSED)
+ void *data)
{
- struct fsck_options options = FSCK_OPTIONS_DEFAULT;
- struct object *obj = lookup_object(the_repository, oid);
+ struct repository *repo = data;
+ struct fsck_options options;
+ struct object *obj = lookup_object(data, oid);
if (!obj || !(obj->flags & HAS_OBJ))
return 0; /* not part of our original set */
@@ -237,12 +237,13 @@ static int mark_unreachable_referents(const struct object_id *oid,
* (and we want to avoid parsing blobs).
*/
if (obj->type == OBJ_NONE) {
- enum object_type type = odb_read_object_info(the_repository->objects,
+ enum object_type type = odb_read_object_info(repo->objects,
&obj->oid, NULL);
if (type > 0)
object_as_type(obj, type, 0);
}
+ fsck_options_init(&options, repo, FSCK_OPTIONS_DEFAULT);
options.walk = mark_used;
fsck_walk(obj, NULL, &options);
if (obj->type == OBJ_TREE)
@@ -254,7 +255,7 @@ static int mark_unreachable_referents(const struct object_id *oid,
/*
* Check a single reachable object
*/
-static void check_reachable_object(struct object *obj)
+static void check_reachable_object(struct repository *repo, struct object *obj)
{
/*
* We obviously want the object to be parsed,
@@ -262,12 +263,12 @@ static void check_reachable_object(struct object *obj)
* do a full fsck
*/
if (!(obj->flags & HAS_OBJ)) {
- if (is_promisor_object(the_repository, &obj->oid))
+ if (is_promisor_object(repo, &obj->oid))
return;
- if (has_object_pack(the_repository, &obj->oid))
+ if (has_object_pack(repo, &obj->oid))
return; /* it is in pack - forget about it */
printf_ln(_("missing %s %s"),
- printable_type(&obj->oid, obj->type),
+ printable_type(repo, &obj->oid, obj->type),
describe_object(&obj->oid));
errors_found |= ERROR_REACHABLE;
return;
@@ -277,7 +278,7 @@ static void check_reachable_object(struct object *obj)
/*
* Check a single unreachable object
*/
-static void check_unreachable_object(struct object *obj)
+static void check_unreachable_object(struct repository *repo, struct object *obj)
{
/*
* Missing unreachable object? Ignore it. It's not like
@@ -294,7 +295,7 @@ static void check_unreachable_object(struct object *obj)
*/
if (show_unreachable) {
printf_ln(_("unreachable %s %s"),
- printable_type(&obj->oid, obj->type),
+ printable_type(repo, &obj->oid, obj->type),
describe_object(&obj->oid));
return;
}
@@ -314,22 +315,22 @@ static void check_unreachable_object(struct object *obj)
if (!(obj->flags & USED)) {
if (show_dangling)
printf_ln(_("dangling %s %s"),
- printable_type(&obj->oid, obj->type),
+ printable_type(repo, &obj->oid, obj->type),
describe_object(&obj->oid));
if (write_lost_and_found) {
- char *filename = repo_git_path(the_repository, "lost-found/%s/%s",
+ char *filename = repo_git_path(repo, "lost-found/%s/%s",
obj->type == OBJ_COMMIT ? "commit" : "other",
describe_object(&obj->oid));
FILE *f;
- if (safe_create_leading_directories_const(the_repository, filename)) {
+ if (safe_create_leading_directories_const(repo, filename)) {
error(_("could not create lost-found"));
free(filename);
return;
}
f = xfopen(filename, "w");
if (obj->type == OBJ_BLOB) {
- if (odb_stream_blob_to_fd(the_repository->objects, fileno(f),
+ if (odb_stream_blob_to_fd(repo->objects, fileno(f),
&obj->oid, NULL, 1))
die_errno(_("could not write '%s'"), filename);
} else
@@ -349,23 +350,23 @@ static void check_unreachable_object(struct object *obj)
*/
}
-static void check_object(struct object *obj)
+static void check_object(struct repository *repo, struct object *obj)
{
if (verbose)
fprintf_ln(stderr, _("Checking %s"), describe_object(&obj->oid));
if (obj->flags & REACHABLE)
- check_reachable_object(obj);
+ check_reachable_object(repo, obj);
else
- check_unreachable_object(obj);
+ check_unreachable_object(repo, obj);
}
-static void check_connectivity(void)
+static void check_connectivity(struct repository *repo)
{
int i, max;
/* Traverse the pending reachable objects */
- traverse_reachable();
+ traverse_reachable(repo);
/*
* With --connectivity-only, we won't have actually opened and marked
@@ -383,24 +384,25 @@ static void check_connectivity(void)
* and ignore any that weren't present in our earlier
* traversal.
*/
- odb_for_each_object(the_repository->objects, NULL,
- mark_unreachable_referents, NULL, 0);
+ odb_for_each_object(repo->objects, NULL,
+ mark_unreachable_referents, repo, 0);
}
/* Look up all the requirements, warn about missing objects.. */
- max = get_max_object_index(the_repository);
+ max = get_max_object_index(repo);
if (verbose)
fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
for (i = 0; i < max; i++) {
- struct object *obj = get_indexed_object(the_repository, i);
+ struct object *obj = get_indexed_object(repo, i);
if (obj)
- check_object(obj);
+ check_object(repo, obj);
}
}
-static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
+static int fsck_obj(struct repository *repo,
+ struct object *obj, void *buffer, unsigned long size)
{
int err;
@@ -410,11 +412,11 @@ static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
if (verbose)
fprintf_ln(stderr, _("Checking %s %s"),
- printable_type(&obj->oid, obj->type),
+ printable_type(repo, &obj->oid, obj->type),
describe_object(&obj->oid));
if (fsck_walk(obj, NULL, &fsck_obj_options))
- objerror(obj, _("broken links"));
+ objerror(repo, obj, _("broken links"));
err = fsck_object(obj, buffer, size, &fsck_obj_options);
if (err)
goto out;
@@ -432,7 +434,7 @@ static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
if (show_tags && tag->tagged) {
printf_ln(_("tagged %s %s (%s) in %s"),
- printable_type(&tag->tagged->oid, tag->tagged->type),
+ printable_type(repo, &tag->tagged->oid, tag->tagged->type),
describe_object(&tag->tagged->oid),
tag->tag,
describe_object(&tag->object.oid));
@@ -446,15 +448,16 @@ out:
}
static int fsck_obj_buffer(const struct object_id *oid, enum object_type type,
- unsigned long size, void *buffer, int *eaten)
+ unsigned long size, void *buffer, int *eaten, void *cb_data)
{
+ struct repository *repo = cb_data;
+ struct object *obj;
+
/*
* Note, buffer may be NULL if type is OBJ_BLOB. See
* verify_packfile(), data_valid variable for details.
*/
- struct object *obj;
- obj = parse_object_buffer(the_repository, oid, type, size, buffer,
- eaten);
+ obj = parse_object_buffer(repo, oid, type, size, buffer, eaten);
if (!obj) {
errors_found |= ERROR_OBJECT;
return error(_("%s: object corrupt or missing"),
@@ -462,18 +465,19 @@ static int fsck_obj_buffer(const struct object_id *oid, enum object_type type,
}
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
- return fsck_obj(obj, buffer, size);
+ return fsck_obj(repo, obj, buffer, size);
}
static int default_refs;
-static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
- timestamp_t timestamp)
+static void fsck_handle_reflog_oid(struct repository *repo,
+ const char *refname, struct object_id *oid,
+ timestamp_t timestamp)
{
struct object *obj;
if (!is_null_oid(oid)) {
- obj = lookup_object(the_repository, oid);
+ obj = lookup_object(repo, oid);
if (obj && (obj->flags & HAS_OBJ)) {
if (timestamp)
fsck_put_object_name(&fsck_walk_options, oid,
@@ -481,7 +485,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
refname, timestamp);
obj->flags |= USED;
mark_object_reachable(obj);
- } else if (!is_promisor_object(the_repository, oid)) {
+ } else if (!is_promisor_object(repo, oid)) {
error(_("%s: invalid reflog entry %s"),
refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
@@ -493,8 +497,10 @@ static int fsck_handle_reflog_ent(const char *refname,
struct object_id *ooid, struct object_id *noid,
const char *email UNUSED,
timestamp_t timestamp, int tz UNUSED,
- const char *message UNUSED, void *cb_data UNUSED)
+ const char *message UNUSED, void *cb_data)
{
+ struct repository *repo = cb_data;
+
if (now && timestamp > now)
return 0;
@@ -502,19 +508,20 @@ static int fsck_handle_reflog_ent(const char *refname,
fprintf_ln(stderr, _("Checking reflog %s->%s"),
oid_to_hex(ooid), oid_to_hex(noid));
- fsck_handle_reflog_oid(refname, ooid, 0);
- fsck_handle_reflog_oid(refname, noid, timestamp);
+ fsck_handle_reflog_oid(repo, refname, ooid, 0);
+ fsck_handle_reflog_oid(repo, refname, noid, timestamp);
return 0;
}
static int fsck_handle_reflog(const char *logname, void *cb_data)
{
struct strbuf refname = STRBUF_INIT;
+ struct worktree *wt = cb_data;
- strbuf_worktree_ref(cb_data, &refname, logname);
- refs_for_each_reflog_ent(get_main_ref_store(the_repository),
+ strbuf_worktree_ref(wt, &refname, logname);
+ refs_for_each_reflog_ent(get_main_ref_store(wt->repo),
refname.buf, fsck_handle_reflog_ent,
- NULL);
+ wt->repo);
strbuf_release(&refname);
return 0;
}
@@ -532,14 +539,20 @@ struct snapshot {
/* TODO: Consider also snapshotting the index of each worktree. */
};
+struct snapshot_ref_data {
+ struct repository *repo;
+ struct snapshot *snap;
+};
+
static int snapshot_ref(const struct reference *ref, void *cb_data)
{
- struct snapshot *snap = cb_data;
+ struct snapshot_ref_data *data = cb_data;
+ struct snapshot *snap = data->snap;
struct object *obj;
- obj = parse_object(the_repository, ref->oid);
+ obj = parse_object(data->repo, ref->oid);
if (!obj) {
- if (is_promisor_object(the_repository, ref->oid)) {
+ if (is_promisor_object(data->repo, ref->oid)) {
/*
* Increment default_refs anyway, because this is a
* valid ref.
@@ -567,11 +580,12 @@ static int snapshot_ref(const struct reference *ref, void *cb_data)
return 0;
}
-static int fsck_handle_ref(const struct reference *ref, void *cb_data UNUSED)
+static int fsck_handle_ref(const struct reference *ref, void *cb_data)
{
+ struct repository *repo = cb_data;
struct object *obj;
- obj = parse_object(the_repository, ref->oid);
+ obj = parse_object(repo, ref->oid);
obj->flags |= USED;
fsck_put_object_name(&fsck_walk_options,
ref->oid, "%s", ref->name);
@@ -580,11 +594,16 @@ static int fsck_handle_ref(const struct reference *ref, void *cb_data UNUSED)
return 0;
}
-static void snapshot_refs(struct snapshot *snap, int argc, const char **argv)
+static void snapshot_refs(struct repository *repo,
+ struct snapshot *snap, int argc, const char **argv)
{
struct refs_for_each_ref_options opts = {
.flags = REFS_FOR_EACH_INCLUDE_BROKEN,
};
+ struct snapshot_ref_data data = {
+ .repo = repo,
+ .snap = snap,
+ };
struct worktree **worktrees, **p;
const char *head_points_at;
struct object_id head_oid;
@@ -592,13 +611,13 @@ static void snapshot_refs(struct snapshot *snap, int argc, const char **argv)
for (int i = 0; i < argc; i++) {
const char *arg = argv[i];
struct object_id oid;
- if (!repo_get_oid(the_repository, arg, &oid)) {
+ if (!repo_get_oid(repo, arg, &oid)) {
struct reference ref = {
.name = arg,
.oid = &oid,
};
- snapshot_ref(&ref, snap);
+ snapshot_ref(&ref, &data);
continue;
}
error(_("invalid parameter: expected sha1, got '%s'"), arg);
@@ -610,8 +629,8 @@ static void snapshot_refs(struct snapshot *snap, int argc, const char **argv)
return;
}
- refs_for_each_ref_ext(get_main_ref_store(the_repository),
- snapshot_ref, snap, &opts);
+ refs_for_each_ref_ext(get_main_ref_store(repo),
+ snapshot_ref, &data, &opts);
worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
@@ -620,7 +639,7 @@ static void snapshot_refs(struct snapshot *snap, int argc, const char **argv)
strbuf_worktree_ref(wt, &refname, "HEAD");
- head_points_at = refs_resolve_ref_unsafe(get_main_ref_store(the_repository),
+ head_points_at = refs_resolve_ref_unsafe(get_main_ref_store(repo),
refname.buf, 0, &head_oid, NULL);
if (head_points_at && !is_null_oid(&head_oid)) {
@@ -629,7 +648,7 @@ static void snapshot_refs(struct snapshot *snap, int argc, const char **argv)
.oid = &head_oid,
};
- snapshot_ref(&ref, snap);
+ snapshot_ref(&ref, &data);
}
strbuf_release(&refname);
@@ -653,7 +672,7 @@ static void free_snapshot_refs(struct snapshot *snap)
free(snap->ref);
}
-static void process_refs(struct snapshot *snap)
+static void process_refs(struct repository *repo, struct snapshot *snap)
{
struct worktree **worktrees, **p;
@@ -662,7 +681,7 @@ static void process_refs(struct snapshot *snap)
.name = snap->ref[i].refname,
.oid = &snap->ref[i].oid,
};
- fsck_handle_ref(&ref, NULL);
+ fsck_handle_ref(&ref, repo);
}
if (include_reflogs) {
@@ -694,27 +713,28 @@ static void process_refs(struct snapshot *snap)
}
}
-struct for_each_loose_cb
-{
+struct for_each_loose_cb {
+ struct repository *repo;
struct progress *progress;
};
static int fsck_loose(const struct object_id *oid, const char *path,
- void *data UNUSED)
+ void *cb_data)
{
+ struct for_each_loose_cb *data = cb_data;
struct object *obj;
enum object_type type = OBJ_NONE;
unsigned long size;
void *contents = NULL;
int eaten;
struct object_info oi = OBJECT_INFO_INIT;
- struct object_id real_oid = *null_oid(the_hash_algo);
+ struct object_id real_oid = *null_oid(data->repo->hash_algo);
int err = 0;
oi.sizep = &size;
oi.typep = &type;
- if (read_loose_object(the_repository, path, oid, &real_oid, &contents, &oi) < 0) {
+ if (read_loose_object(data->repo, path, oid, &real_oid, &contents, &oi) < 0) {
if (contents && !oideq(&real_oid, oid))
err = error(_("%s: hash-path mismatch, found at: %s"),
oid_to_hex(&real_oid), path);
@@ -731,7 +751,7 @@ static int fsck_loose(const struct object_id *oid, const char *path,
if (!contents && type != OBJ_BLOB)
BUG("read_loose_object streamed a non-blob");
- obj = parse_object_buffer(the_repository, oid, type, size,
+ obj = parse_object_buffer(data->repo, oid, type, size,
contents, &eaten);
if (!obj) {
@@ -745,7 +765,7 @@ static int fsck_loose(const struct object_id *oid, const char *path,
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
- if (fsck_obj(obj, contents, size))
+ if (fsck_obj(data->repo, obj, contents, size))
errors_found |= ERROR_OBJECT;
if (!eaten)
@@ -769,10 +789,11 @@ static int fsck_subdir(unsigned int nr, const char *path UNUSED, void *data)
return 0;
}
-static void fsck_source(struct odb_source *source)
+static void fsck_source(struct repository *repo, struct odb_source *source)
{
struct progress *progress = NULL;
struct for_each_loose_cb cb_data = {
+ .repo = source->odb->repo,
.progress = progress,
};
@@ -780,7 +801,7 @@ static void fsck_source(struct odb_source *source)
fprintf_ln(stderr, _("Checking object directory"));
if (show_progress)
- progress = start_progress(the_repository,
+ progress = start_progress(repo,
_("Checking object directories"), 256);
for_each_loose_file_in_source(source, fsck_loose,
@@ -789,7 +810,7 @@ static void fsck_source(struct odb_source *source)
stop_progress(&progress);
}
-static int fsck_cache_tree(struct cache_tree *it, const char *index_path)
+static int fsck_cache_tree(struct repository *repo, struct cache_tree *it, const char *index_path)
{
int i;
int err = 0;
@@ -798,7 +819,7 @@ static int fsck_cache_tree(struct cache_tree *it, const char *index_path)
fprintf_ln(stderr, _("Checking cache tree of %s"), index_path);
if (0 <= it->entry_count) {
- struct object *obj = parse_object(the_repository, &it->oid);
+ struct object *obj = parse_object(repo, &it->oid);
if (!obj) {
error(_("%s: invalid sha1 pointer in cache-tree of %s"),
oid_to_hex(&it->oid), index_path);
@@ -809,10 +830,10 @@ static int fsck_cache_tree(struct cache_tree *it, const char *index_path)
fsck_put_object_name(&fsck_walk_options, &it->oid, ":");
mark_object_reachable(obj);
if (obj->type != OBJ_TREE)
- err |= objerror(obj, _("non-tree in cache-tree"));
+ err |= objerror(repo, obj, _("non-tree in cache-tree"));
}
for (i = 0; i < it->subtree_nr; i++)
- err |= fsck_cache_tree(it->down[i]->cache_tree, index_path);
+ err |= fsck_cache_tree(repo, it->down[i]->cache_tree, index_path);
return err;
}
@@ -838,7 +859,7 @@ static int fsck_resolve_undo(struct index_state *istate,
if (!ru->mode[i] || !S_ISREG(ru->mode[i]))
continue;
- obj = parse_object(the_repository, &ru->oid[i]);
+ obj = parse_object(istate->repo, &ru->oid[i]);
if (!obj) {
error(_("%s: invalid sha1 pointer in resolve-undo of %s"),
oid_to_hex(&ru->oid[i]),
@@ -870,7 +891,7 @@ static void fsck_index(struct index_state *istate, const char *index_path,
mode = istate->cache[i]->ce_mode;
if (S_ISGITLINK(mode))
continue;
- blob = lookup_blob(the_repository,
+ blob = lookup_blob(istate->repo,
&istate->cache[i]->oid);
if (!blob)
continue;
@@ -883,15 +904,16 @@ static void fsck_index(struct index_state *istate, const char *index_path,
mark_object_reachable(obj);
}
if (istate->cache_tree)
- fsck_cache_tree(istate->cache_tree, index_path);
+ fsck_cache_tree(istate->repo, istate->cache_tree, index_path);
fsck_resolve_undo(istate, index_path);
}
static int mark_object_for_connectivity(const struct object_id *oid,
struct object_info *oi UNUSED,
- void *cb_data UNUSED)
+ void *cb_data)
{
- struct object *obj = lookup_unknown_object(the_repository, oid);
+ struct repository *repo = cb_data;
+ struct object *obj = lookup_unknown_object(repo, oid);
obj->flags |= HAS_OBJ;
return 0;
}
@@ -906,7 +928,7 @@ static int check_pack_rev_indexes(struct repository *r, int show_progress)
if (show_progress) {
repo_for_each_pack(r, p)
pack_count++;
- progress = start_delayed_progress(the_repository,
+ progress = start_delayed_progress(r,
"Verifying reverse pack-indexes", pack_count);
pack_count = 0;
}
@@ -986,7 +1008,7 @@ static struct option fsck_opts[] = {
int cmd_fsck(int argc,
const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
struct odb_source *source;
struct snapshot snap = {
@@ -1004,7 +1026,10 @@ int cmd_fsck(int argc,
argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0);
+ fsck_options_init(&fsck_walk_options, repo, FSCK_OPTIONS_DEFAULT);
fsck_walk_options.walk = mark_object;
+
+ fsck_options_init(&fsck_obj_options, repo, FSCK_OPTIONS_DEFAULT);
fsck_obj_options.walk = mark_used;
fsck_obj_options.error_func = fsck_objects_error_func;
if (check_strict)
@@ -1023,11 +1048,11 @@ int cmd_fsck(int argc,
if (name_objects)
fsck_enable_object_names(&fsck_walk_options);
- repo_config(the_repository, git_fsck_config, &fsck_obj_options);
- prepare_repo_settings(the_repository);
+ repo_config(repo, git_fsck_config, &fsck_obj_options);
+ prepare_repo_settings(repo);
if (check_references)
- fsck_refs(the_repository);
+ fsck_refs(repo);
/*
* Take a snapshot of the refs before walking objects to avoid looking
@@ -1035,18 +1060,18 @@ int cmd_fsck(int argc,
* objects. We can still walk over new objects that are added during the
* execution of fsck but won't miss any objects that were reachable.
*/
- snapshot_refs(&snap, argc, argv);
+ snapshot_refs(repo, &snap, argc, argv);
/* Ensure we get a "fresh" view of the odb */
- odb_reprepare(the_repository->objects);
+ odb_reprepare(repo->objects);
if (connectivity_only) {
- odb_for_each_object(the_repository->objects, NULL,
- mark_object_for_connectivity, NULL, 0);
+ odb_for_each_object(repo->objects, NULL,
+ mark_object_for_connectivity, repo, 0);
} else {
- odb_prepare_alternates(the_repository->objects);
- for (source = the_repository->objects->sources; source; source = source->next)
- fsck_source(source);
+ odb_prepare_alternates(repo->objects);
+ for (source = repo->objects->sources; source; source = source->next)
+ fsck_source(repo, source);
if (check_full) {
struct packed_git *p;
@@ -1054,20 +1079,20 @@ int cmd_fsck(int argc,
struct progress *progress = NULL;
if (show_progress) {
- repo_for_each_pack(the_repository, p) {
+ repo_for_each_pack(repo, p) {
if (open_pack_index(p))
continue;
total += p->num_objects;
}
- progress = start_progress(the_repository,
+ progress = start_progress(repo,
_("Checking objects"), total);
}
- repo_for_each_pack(the_repository, p) {
+ repo_for_each_pack(repo, p) {
/* verify gives error messages itself */
- if (verify_pack(the_repository,
- p, fsck_obj_buffer,
+ if (verify_pack(repo,
+ p, fsck_obj_buffer, repo,
progress, count))
errors_found |= ERROR_PACK;
count += p->num_objects;
@@ -1080,7 +1105,7 @@ int cmd_fsck(int argc,
}
/* Process the snapshotted refs and the reflogs. */
- process_refs(&snap);
+ process_refs(repo, &snap);
/* If not given any explicit objects, process index files too. */
if (!argc)
@@ -1100,7 +1125,7 @@ int cmd_fsck(int argc,
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
struct index_state istate =
- INDEX_STATE_INIT(the_repository);
+ INDEX_STATE_INIT(repo);
char *path, *wt_gitdir;
/*
@@ -1121,17 +1146,17 @@ int cmd_fsck(int argc,
free_worktrees(worktrees);
}
- errors_found |= check_pack_rev_indexes(the_repository, show_progress);
- if (verify_bitmap_files(the_repository))
+ errors_found |= check_pack_rev_indexes(repo, show_progress);
+ if (verify_bitmap_files(repo))
errors_found |= ERROR_BITMAP;
- check_connectivity();
+ check_connectivity(repo);
- if (the_repository->settings.core_commit_graph) {
+ if (repo->settings.core_commit_graph) {
struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
- odb_prepare_alternates(the_repository->objects);
- for (source = the_repository->objects->sources; source; source = source->next) {
+ odb_prepare_alternates(repo->objects);
+ for (source = repo->objects->sources; source; source = source->next) {
child_process_init(&commit_graph_verify);
commit_graph_verify.git_cmd = 1;
strvec_pushl(&commit_graph_verify.args, "commit-graph",
@@ -1145,11 +1170,11 @@ int cmd_fsck(int argc,
}
}
- if (the_repository->settings.core_multi_pack_index) {
+ if (repo->settings.core_multi_pack_index) {
struct child_process midx_verify = CHILD_PROCESS_INIT;
- odb_prepare_alternates(the_repository->objects);
- for (source = the_repository->objects->sources; source; source = source->next) {
+ odb_prepare_alternates(repo->objects);
+ for (source = repo->objects->sources; source; source = source->next) {
child_process_init(&midx_verify);
midx_verify.git_cmd = 1;
strvec_pushl(&midx_verify.args, "multi-pack-index",
diff --git a/builtin/gc.c b/builtin/gc.c
index 5d8d358f7a..3a71e314c9 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -467,37 +467,19 @@ out:
static int too_many_loose_objects(int limit)
{
/*
- * Quickly check if a "gc" is needed, by estimating how
- * many loose objects there are. Because SHA-1 is evenly
- * distributed, we can check only one and get a reasonable
- * estimate.
+ * This is weird, but stems from legacy behaviour: the GC auto
+ * threshold was always essentially interpreted as if it was rounded up
+ * to the next multiple 256 of, so we retain this behaviour for now.
*/
- DIR *dir;
- struct dirent *ent;
- int auto_threshold;
- int num_loose = 0;
- int needed = 0;
- const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
- char *path;
+ int auto_threshold = DIV_ROUND_UP(limit, 256) * 256;
+ unsigned long loose_count;
- path = repo_git_path(the_repository, "objects/17");
- dir = opendir(path);
- free(path);
- if (!dir)
+ if (odb_source_loose_count_objects(the_repository->objects->sources,
+ ODB_COUNT_OBJECTS_APPROXIMATE,
+ &loose_count) < 0)
return 0;
- auto_threshold = DIV_ROUND_UP(limit, 256);
- while ((ent = readdir(dir)) != NULL) {
- if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
- ent->d_name[hexsz_loose] != '\0')
- continue;
- if (++num_loose > auto_threshold) {
- needed = 1;
- break;
- }
- }
- closedir(dir);
- return needed;
+ return loose_count > auto_threshold;
}
static struct packed_git *find_base_packs(struct string_list *packs,
@@ -592,9 +574,13 @@ static uint64_t total_ram(void)
static uint64_t estimate_repack_memory(struct gc_config *cfg,
struct packed_git *pack)
{
- unsigned long nr_objects = repo_approximate_object_count(the_repository);
+ unsigned long nr_objects;
size_t os_cache, heap;
+ if (odb_count_objects(the_repository->objects,
+ ODB_COUNT_OBJECTS_APPROXIMATE, &nr_objects) < 0)
+ return 0;
+
if (!pack || !nr_objects)
return 0;
diff --git a/builtin/history.c b/builtin/history.c
index 568dc75ee7..9526938085 100644
--- a/builtin/history.c
+++ b/builtin/history.c
@@ -437,8 +437,8 @@ static int cmd_history_reword(int argc,
enum ref_action action = REF_ACTION_DEFAULT;
int dry_run = 0;
struct option options[] = {
- OPT_CALLBACK_F(0, "update-refs", &action, N_("<action>"),
- N_("control which refs should be updated (branches|head)"),
+ OPT_CALLBACK_F(0, "update-refs", &action, "(branches|head)",
+ N_("control which refs should be updated"),
PARSE_OPT_NONEG, parse_ref_action),
OPT_BOOL('n', "dry-run", &dry_run,
N_("perform a dry-run without updating any refs")),
@@ -666,8 +666,8 @@ static int cmd_history_split(int argc,
enum ref_action action = REF_ACTION_DEFAULT;
int dry_run = 0;
struct option options[] = {
- OPT_CALLBACK_F(0, "update-refs", &action, N_("<refs>"),
- N_("control ref update behavior (branches|head|print)"),
+ OPT_CALLBACK_F(0, "update-refs", &action, "(branches|head)",
+ N_("control ref update behavior"),
PARSE_OPT_NONEG, parse_ref_action),
OPT_BOOL('n', "dry-run", &dry_run,
N_("perform a dry-run without updating any refs")),
diff --git a/builtin/hook.c b/builtin/hook.c
index 83020dfb4f..c0585587e5 100644
--- a/builtin/hook.c
+++ b/builtin/hook.c
@@ -4,14 +4,22 @@
#include "environment.h"
#include "gettext.h"
#include "hook.h"
+#include "hook-list.h"
#include "parse-options.h"
-#include "strvec.h"
-#include "abspath.h"
#define BUILTIN_HOOK_RUN_USAGE \
- N_("git hook run [--ignore-missing] [--to-stdin=<path>] <hook-name> [-- <hook-args>]")
+ N_("git hook run [--allow-unknown-hook-name] [--ignore-missing] [--to-stdin=<path>] <hook-name> [-- <hook-args>]")
#define BUILTIN_HOOK_LIST_USAGE \
- N_("git hook list [-z] <hook-name>")
+ N_("git hook list [--allow-unknown-hook-name] [-z] [--show-scope] <hook-name>")
+
+static int is_known_hook(const char *name)
+{
+ const char **p;
+ for (p = hook_name_list; *p; p++)
+ if (!strcmp(*p, name))
+ return 1;
+ return 0;
+}
static const char * const builtin_hook_usage[] = {
BUILTIN_HOOK_RUN_USAGE,
@@ -35,11 +43,17 @@ static int list(int argc, const char **argv, const char *prefix,
struct string_list_item *item;
const char *hookname = NULL;
int line_terminator = '\n';
+ int show_scope = 0;
+ int allow_unknown = 0;
int ret = 0;
struct option list_options[] = {
OPT_SET_INT('z', NULL, &line_terminator,
N_("use NUL as line terminator"), '\0'),
+ OPT_BOOL(0, "show-scope", &show_scope,
+ N_("show the config scope that defined each hook")),
+ OPT_BOOL(0, "allow-unknown-hook-name", &allow_unknown,
+ N_("allow running a hook with a non-native hook name")),
OPT_END(),
};
@@ -51,15 +65,22 @@ static int list(int argc, const char **argv, const char *prefix,
* arguments later they probably should be caught by parse_options.
*/
if (argc != 1)
- usage_msg_opt(_("You must specify a hook event name to list."),
+ usage_msg_opt(_("you must specify a hook event name to list"),
builtin_hook_list_usage, list_options);
hookname = argv[0];
+ if (!allow_unknown && !is_known_hook(hookname)) {
+ error(_("unknown hook event '%s';\n"
+ "use --allow-unknown-hook-name to allow non-native hook names"),
+ hookname);
+ return 1;
+ }
+
head = list_hooks(repo, hookname, NULL);
if (!head->nr) {
- warning(_("No hooks found for event '%s'"), hookname);
+ warning(_("no hooks found for event '%s'"), hookname);
ret = 1; /* no hooks found */
goto cleanup;
}
@@ -71,16 +92,27 @@ static int list(int argc, const char **argv, const char *prefix,
case HOOK_TRADITIONAL:
printf("%s%c", _("hook from hookdir"), line_terminator);
break;
- case HOOK_CONFIGURED:
- printf("%s%c", h->u.configured.friendly_name, line_terminator);
+ case HOOK_CONFIGURED: {
+ const char *name = h->u.configured.friendly_name;
+ const char *scope = show_scope ?
+ config_scope_name(h->u.configured.scope) : NULL;
+ if (scope)
+ printf("%s\t%s%s%c", scope,
+ h->u.configured.disabled ? "disabled\t" : "",
+ name, line_terminator);
+ else
+ printf("%s%s%c",
+ h->u.configured.disabled ? "disabled\t" : "",
+ name, line_terminator);
break;
+ }
default:
BUG("unknown hook kind");
}
}
cleanup:
- hook_list_clear(head, NULL);
+ string_list_clear_func(head, hook_free);
free(head);
return ret;
}
@@ -91,8 +123,11 @@ static int run(int argc, const char **argv, const char *prefix,
int i;
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
int ignore_missing = 0;
+ int allow_unknown = 0;
const char *hook_name;
struct option run_options[] = {
+ OPT_BOOL(0, "allow-unknown-hook-name", &allow_unknown,
+ N_("allow running a hook with a non-native hook name")),
OPT_BOOL(0, "ignore-missing", &ignore_missing,
N_("silently ignore missing requested <hook-name>")),
OPT_STRING(0, "to-stdin", &opt.path_to_stdin, N_("path"),
@@ -124,6 +159,14 @@ static int run(int argc, const char **argv, const char *prefix,
repo_config(the_repository, git_default_config, NULL);
hook_name = argv[0];
+
+ if (!allow_unknown && !is_known_hook(hook_name)) {
+ error(_("unknown hook event '%s';\n"
+ "use --allow-unknown-hook-name to allow non-native hook names"),
+ hook_name);
+ return 1;
+ }
+
if (!ignore_missing)
opt.error_if_missing = 1;
ret = run_hooks_opt(the_repository, hook_name, &opt);
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index d1e47279a8..ca7784dc2c 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -136,7 +136,7 @@ static int nr_threads;
static int from_stdin;
static int strict;
static int do_fsck_object;
-static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
+static struct fsck_options fsck_options;
static int verbose;
static const char *progress_title;
static int show_resolving_progress;
@@ -891,7 +891,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
if (startup_info->have_repository) {
read_lock();
collision_test_needed = odb_has_object(the_repository->objects, oid,
- HAS_OBJECT_FETCH_PROMISOR);
+ ODB_HAS_OBJECT_FETCH_PROMISOR);
read_unlock();
}
@@ -1908,6 +1908,8 @@ int cmd_index_pack(int argc,
show_usage_if_asked(argc, argv, index_pack_usage);
disable_replace_refs();
+
+ fsck_options_init(&fsck_options, the_repository, FSCK_OPTIONS_MISSING_GITMODULES);
fsck_options.walk = mark_link;
reset_pack_idx_option(&opts);
diff --git a/builtin/interpret-trailers.c b/builtin/interpret-trailers.c
index acaf42b2d9..e7e86e9523 100644
--- a/builtin/interpret-trailers.c
+++ b/builtin/interpret-trailers.c
@@ -156,7 +156,7 @@ int cmd_interpret_trailers(int argc,
N_("action if trailer is missing"), option_parse_if_missing),
OPT_BOOL(0, "only-trailers", &opts.only_trailers, N_("output only the trailers")),
- OPT_BOOL(0, "only-input", &opts.only_input, N_("do not apply trailer.* configuration variables")),
+ OPT_BOOL(0, "only-input", &opts.only_input, N_("do not apply trailer.<key-alias> configuration variables")),
OPT_BOOL(0, "unfold", &opts.unfold, N_("reformat multiline trailer values as single-line values")),
OPT_CALLBACK_F(0, "parse", &opts, NULL, N_("alias for --only-trailers --only-input --unfold"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG, parse_opt_parse),
diff --git a/builtin/log.c b/builtin/log.c
index 89e8b8f011..8c0939dd42 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -40,6 +40,7 @@
#include "progress.h"
#include "commit-slab.h"
#include "advice.h"
+#include "utf8.h"
#include "commit-reach.h"
#include "range-diff.h"
@@ -886,6 +887,7 @@ struct format_config {
char *signature;
char *signature_file;
enum cover_setting config_cover_letter;
+ char *fmt_cover_letter_commit_list;
char *config_output_directory;
enum cover_from_description cover_from_description_mode;
int show_notes;
@@ -930,6 +932,7 @@ static void format_config_release(struct format_config *cfg)
string_list_clear(&cfg->extra_cc, 0);
strbuf_release(&cfg->sprefix);
free(cfg->fmt_patch_suffix);
+ free(cfg->fmt_cover_letter_commit_list);
}
static enum cover_from_description parse_cover_from_description(const char *arg)
@@ -1052,6 +1055,10 @@ static int git_format_config(const char *var, const char *value,
cfg->config_cover_letter = git_config_bool(var, value) ? COVER_ON : COVER_OFF;
return 0;
}
+ if (!strcmp(var, "format.commitlistformat")) {
+ FREE_AND_NULL(cfg->fmt_cover_letter_commit_list);
+ return git_config_string(&cfg->fmt_cover_letter_commit_list, var, value);
+ }
if (!strcmp(var, "format.outputdirectory")) {
FREE_AND_NULL(cfg->config_output_directory);
return git_config_string(&cfg->config_output_directory, var, value);
@@ -1335,13 +1342,54 @@ static void get_notes_args(struct strvec *arg, struct rev_info *rev)
}
}
+static void generate_shortlog_cover_letter(struct shortlog *log,
+ struct rev_info *rev,
+ struct commit **list,
+ int nr)
+{
+ shortlog_init(log);
+ log->wrap_lines = 1;
+ log->wrap = MAIL_DEFAULT_WRAP;
+ log->in1 = 2;
+ log->in2 = 4;
+ log->file = rev->diffopt.file;
+ log->groups = SHORTLOG_GROUP_AUTHOR;
+ shortlog_finish_setup(log);
+ for (int i = 0; i < nr; i++)
+ shortlog_add_commit(log, list[i]);
+
+ shortlog_output(log);
+}
+
+static void generate_commit_list_cover(FILE *cover_file, const char *format,
+ struct commit **list, int n)
+{
+ struct strbuf commit_line = STRBUF_INIT;
+ struct pretty_print_context ctx = {0};
+ struct rev_info rev = REV_INFO_INIT;
+
+ rev.total = n;
+ ctx.rev = &rev;
+ for (int i = 1; i <= n; i++) {
+ rev.nr = i;
+ repo_format_commit_message(the_repository, list[n - i], format,
+ &commit_line, &ctx);
+ fprintf(cover_file, "%s\n", commit_line.buf);
+ strbuf_reset(&commit_line);
+ }
+ fprintf(cover_file, "\n");
+
+ strbuf_release(&commit_line);
+}
+
static void make_cover_letter(struct rev_info *rev, int use_separate_file,
struct commit *origin,
int nr, struct commit **list,
const char *description_file,
const char *branch_name,
int quiet,
- const struct format_config *cfg)
+ const struct format_config *cfg,
+ const char *format)
{
const char *from;
struct shortlog log;
@@ -1388,18 +1436,17 @@ static void make_cover_letter(struct rev_info *rev, int use_separate_file,
free(pp.after_subject);
strbuf_release(&sb);
- shortlog_init(&log);
- log.wrap_lines = 1;
- log.wrap = MAIL_DEFAULT_WRAP;
- log.in1 = 2;
- log.in2 = 4;
- log.file = rev->diffopt.file;
- log.groups = SHORTLOG_GROUP_AUTHOR;
- shortlog_finish_setup(&log);
- for (i = 0; i < nr; i++)
- shortlog_add_commit(&log, list[i]);
-
- shortlog_output(&log);
+ if (skip_prefix(format, "log:", &format))
+ generate_commit_list_cover(rev->diffopt.file, format, list, nr);
+ else if (!strcmp(format, "shortlog"))
+ generate_shortlog_cover_letter(&log, rev, list, nr);
+ else if (!strcmp(format, "modern"))
+ generate_commit_list_cover(rev->diffopt.file, "%w(72)[%(count)/%(total)] %s",
+ list, nr);
+ else if (strchr(format, '%'))
+ generate_commit_list_cover(rev->diffopt.file, format, list, nr);
+ else
+ die(_("'%s' is not a valid format string"), format);
/* We can only do diffstat with a unique reference point */
if (origin)
@@ -1917,6 +1964,7 @@ int cmd_format_patch(int argc,
int just_numbers = 0;
int ignore_if_in_upstream = 0;
int cover_letter = -1;
+ const char *cover_letter_fmt = NULL;
int boundary_count = 0;
int no_binary_diff = 0;
int zero_commit = 0;
@@ -1963,6 +2011,8 @@ int cmd_format_patch(int argc,
N_("print patches to standard out")),
OPT_BOOL(0, "cover-letter", &cover_letter,
N_("generate a cover letter")),
+ OPT_STRING(0, "commit-list-format", &cover_letter_fmt, N_("format-spec"),
+ N_("format spec used for the commit list in the cover letter")),
OPT_BOOL(0, "numbered-files", &just_numbers,
N_("use simple number sequence for output file names")),
OPT_STRING(0, "suffix", &fmt_patch_suffix, N_("sfx"),
@@ -2300,6 +2350,15 @@ int cmd_format_patch(int argc,
/* nothing to do */
goto done;
total = list.nr;
+
+ if (!cover_letter_fmt) {
+ cover_letter_fmt = cfg.fmt_cover_letter_commit_list;
+ if (!cover_letter_fmt)
+ cover_letter_fmt = "shortlog";
+ } else if (cover_letter == -1) {
+ cover_letter = 1;
+ }
+
if (cover_letter == -1) {
if (cfg.config_cover_letter == COVER_AUTO)
cover_letter = (total > 1);
@@ -2386,12 +2445,14 @@ int cmd_format_patch(int argc,
}
rev.numbered_files = just_numbers;
rev.patch_suffix = fmt_patch_suffix;
+
if (cover_letter) {
if (cfg.thread)
gen_message_id(&rev, "cover");
make_cover_letter(&rev, !!output_directory,
origin, list.nr, list.items,
- description_file, branch_name, quiet, &cfg);
+ description_file, branch_name, quiet, &cfg,
+ cover_letter_fmt);
print_bases(&bases, rev.diffopt.file);
print_signature(signature, rev.diffopt.file);
total++;
diff --git a/builtin/merge-file.c b/builtin/merge-file.c
index 084fdfec58..59a9792208 100644
--- a/builtin/merge-file.c
+++ b/builtin/merge-file.c
@@ -108,7 +108,8 @@ int cmd_merge_file(int argc,
return error_errno("failed to redirect stderr to /dev/null");
}
- if (object_id)
+ if (!repo && object_id)
+ /* emit the correct "not a git repo" error in this case */
setup_git_directory();
for (i = 0; i < 3; i++) {
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 7cf6e1230a..f40264a878 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -16,7 +16,7 @@ static char const * const builtin_mktag_usage[] = {
};
static int option_strict = 1;
-static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct fsck_options fsck_options;
static int mktag_fsck_error_func(struct fsck_options *o UNUSED,
void *fsck_report UNUSED,
@@ -75,7 +75,7 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
int cmd_mktag(int argc,
const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
static struct option builtin_mktag_options[] = {
OPT_BOOL(0, "strict", &option_strict,
@@ -94,6 +94,7 @@ int cmd_mktag(int argc,
if (strbuf_read(&buf, 0, 0) < 0)
die_errno(_("could not read from stdin"));
+ fsck_options_init(&fsck_options, repo, FSCK_OPTIONS_STRICT);
fsck_options.error_func = mktag_fsck_error_func;
fsck_set_msg_type_from_ids(&fsck_options, FSCK_MSG_EXTRA_HEADER_ENTRY,
FSCK_WARN);
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 5f364aa816..0f72d96c02 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -9,12 +9,18 @@
#include "strbuf.h"
#include "trace2.h"
#include "odb.h"
+#include "odb/source.h"
#include "replace-object.h"
#include "repository.h"
#define BUILTIN_MIDX_WRITE_USAGE \
- N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]" \
- "[--refs-snapshot=<path>]")
+ N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]\n" \
+ " [--[no-]bitmap] [--[no-]incremental] [--[no-]stdin-packs]\n" \
+ " [--refs-snapshot=<path>]")
+
+#define BUILTIN_MIDX_COMPACT_USAGE \
+ N_("git multi-pack-index [<options>] compact [--[no-]incremental]\n" \
+ " [--[no-]bitmap] <from> <to>")
#define BUILTIN_MIDX_VERIFY_USAGE \
N_("git multi-pack-index [<options>] verify")
@@ -29,6 +35,10 @@ static char const * const builtin_multi_pack_index_write_usage[] = {
BUILTIN_MIDX_WRITE_USAGE,
NULL
};
+static char const * const builtin_multi_pack_index_compact_usage[] = {
+ BUILTIN_MIDX_COMPACT_USAGE,
+ NULL
+};
static char const * const builtin_multi_pack_index_verify_usage[] = {
BUILTIN_MIDX_VERIFY_USAGE,
NULL
@@ -43,6 +53,7 @@ static char const * const builtin_multi_pack_index_repack_usage[] = {
};
static char const * const builtin_multi_pack_index_usage[] = {
BUILTIN_MIDX_WRITE_USAGE,
+ BUILTIN_MIDX_COMPACT_USAGE,
BUILTIN_MIDX_VERIFY_USAGE,
BUILTIN_MIDX_EXPIRE_USAGE,
BUILTIN_MIDX_REPACK_USAGE,
@@ -84,6 +95,8 @@ static struct option common_opts[] = {
N_("directory"),
N_("object directory containing set of packfile and pack-index pairs"),
parse_object_dir),
+ OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"),
+ MIDX_PROGRESS),
OPT_END(),
};
@@ -138,8 +151,6 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
N_("pack for reuse when computing a multi-pack bitmap")),
OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"),
MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX),
- OPT_BIT(0, "progress", &opts.flags,
- N_("force progress reporting"), MIDX_PROGRESS),
OPT_BIT(0, "incremental", &opts.flags,
N_("write a new incremental MIDX"), MIDX_WRITE_INCREMENTAL),
OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
@@ -194,14 +205,78 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
return ret;
}
+static int cmd_multi_pack_index_compact(int argc, const char **argv,
+ const char *prefix,
+ struct repository *repo)
+{
+ struct multi_pack_index *m, *cur;
+ struct multi_pack_index *from_midx = NULL;
+ struct multi_pack_index *to_midx = NULL;
+ struct odb_source *source;
+ int ret;
+
+ struct option *options;
+ static struct option builtin_multi_pack_index_compact_options[] = {
+ OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"),
+ MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX),
+ OPT_BIT(0, "incremental", &opts.flags,
+ N_("write a new incremental MIDX"), MIDX_WRITE_INCREMENTAL),
+ OPT_END(),
+ };
+
+ repo_config(repo, git_multi_pack_index_write_config, NULL);
+
+ options = add_common_options(builtin_multi_pack_index_compact_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
+ argc = parse_options(argc, argv, prefix,
+ options, builtin_multi_pack_index_compact_usage,
+ 0);
+
+ if (argc != 2)
+ usage_with_options(builtin_multi_pack_index_compact_usage,
+ options);
+ source = handle_object_dir_option(the_repository);
+
+ FREE_AND_NULL(options);
+
+ m = get_multi_pack_index(source);
+
+ for (cur = m; cur && !(from_midx && to_midx); cur = cur->base_midx) {
+ const char *midx_csum = midx_get_checksum_hex(cur);
+
+ if (!from_midx && !strcmp(midx_csum, argv[0]))
+ from_midx = cur;
+ if (!to_midx && !strcmp(midx_csum, argv[1]))
+ to_midx = cur;
+ }
+
+ if (!from_midx)
+ die(_("could not find MIDX: %s"), argv[0]);
+ if (!to_midx)
+ die(_("could not find MIDX: %s"), argv[1]);
+ if (from_midx == to_midx)
+ die(_("MIDX compaction endpoints must be unique"));
+
+ for (m = from_midx; m; m = m->base_midx) {
+ if (m == to_midx)
+ die(_("MIDX %s must be an ancestor of %s"), argv[0], argv[1]);
+ }
+
+ ret = write_midx_file_compact(source, from_midx, to_midx, opts.flags);
+
+ return ret;
+}
+
static int cmd_multi_pack_index_verify(int argc, const char **argv,
const char *prefix,
struct repository *repo UNUSED)
{
struct option *options;
static struct option builtin_multi_pack_index_verify_options[] = {
- OPT_BIT(0, "progress", &opts.flags,
- N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
struct odb_source *source;
@@ -231,8 +306,6 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv,
{
struct option *options;
static struct option builtin_multi_pack_index_expire_options[] = {
- OPT_BIT(0, "progress", &opts.flags,
- N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
struct odb_source *source;
@@ -264,8 +337,6 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv,
static struct option builtin_multi_pack_index_repack_options[] = {
OPT_UNSIGNED(0, "batch-size", &opts.batch_size,
N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
- OPT_BIT(0, "progress", &opts.flags,
- N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
struct odb_source *source;
@@ -300,6 +371,7 @@ int cmd_multi_pack_index(int argc,
struct option builtin_multi_pack_index_options[] = {
OPT_SUBCOMMAND("repack", &fn, cmd_multi_pack_index_repack),
OPT_SUBCOMMAND("write", &fn, cmd_multi_pack_index_write),
+ OPT_SUBCOMMAND("compact", &fn, cmd_multi_pack_index_compact),
OPT_SUBCOMMAND("verify", &fn, cmd_multi_pack_index_verify),
OPT_SUBCOMMAND("expire", &fn, cmd_multi_pack_index_expire),
OPT_END(),
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index 6188cf98ce..d6594ada53 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -12,7 +12,6 @@
#include "object-name.h"
#include "pager.h"
#include "parse-options.h"
-#include "prio-queue.h"
#include "hash-lookup.h"
#include "commit-slab.h"
#include "commit-graph.h"
@@ -178,7 +177,7 @@ static void name_rev(struct commit *start_commit,
const char *tip_name, timestamp_t taggerdate,
int from_tag, int deref, struct mem_pool *string_pool)
{
- struct prio_queue queue;
+ struct commit_stack stack = COMMIT_STACK_INIT;
struct commit *commit;
struct commit_stack parents_to_queue = COMMIT_STACK_INIT;
struct rev_name *start_name;
@@ -197,10 +196,9 @@ static void name_rev(struct commit *start_commit,
else
start_name->tip_name = mem_pool_strdup(string_pool, tip_name);
- memset(&queue, 0, sizeof(queue)); /* Use the prio_queue as LIFO */
- prio_queue_put(&queue, start_commit);
+ commit_stack_push(&stack, start_commit);
- while ((commit = prio_queue_get(&queue))) {
+ while ((commit = commit_stack_pop(&stack))) {
struct rev_name *name = get_commit_rev_name(commit);
struct commit_list *parents;
int parent_number = 1;
@@ -241,13 +239,13 @@ static void name_rev(struct commit *start_commit,
}
}
- /* The first parent must come out first from the prio_queue */
+ /* The first parent must come out first from the stack */
while (parents_to_queue.nr)
- prio_queue_put(&queue,
- commit_stack_pop(&parents_to_queue));
+ commit_stack_push(&stack,
+ commit_stack_pop(&parents_to_queue));
}
- clear_prio_queue(&queue);
+ commit_stack_clear(&stack);
commit_stack_clear(&parents_to_queue);
}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index da1087930c..dd2480a73d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -28,6 +28,7 @@
#include "reachable.h"
#include "oid-array.h"
#include "strvec.h"
+#include "strmap.h"
#include "list.h"
#include "packfile.h"
#include "object-file.h"
@@ -217,6 +218,7 @@ static int have_non_local_packs;
static int incremental;
static int ignore_packed_keep_on_disk;
static int ignore_packed_keep_in_core;
+static int ignore_packed_keep_in_core_open;
static int ignore_packed_keep_in_core_has_cruft;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
@@ -1632,7 +1634,8 @@ static int want_found_object(const struct object_id *oid, int exclude,
/*
* Then handle .keep first, as we have a fast(er) path there.
*/
- if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core) {
+ if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core ||
+ ignore_packed_keep_in_core_open) {
/*
* Set the flags for the kept-pack cache to be the ones we want
* to ignore.
@@ -1646,6 +1649,8 @@ static int want_found_object(const struct object_id *oid, int exclude,
flags |= KEPT_PACK_ON_DISK;
if (ignore_packed_keep_in_core)
flags |= KEPT_PACK_IN_CORE;
+ if (ignore_packed_keep_in_core_open)
+ flags |= KEPT_PACK_IN_CORE_OPEN;
/*
* If the object is in a pack that we want to ignore, *and* we
@@ -1657,6 +1662,8 @@ static int want_found_object(const struct object_id *oid, int exclude,
return 0;
if (ignore_packed_keep_in_core && p->pack_keep_in_core)
return 0;
+ if (ignore_packed_keep_in_core_open && p->pack_keep_in_core_open)
+ return 0;
if (has_object_kept_pack(p->repo, oid, flags))
return 0;
} else {
@@ -3756,6 +3763,7 @@ static int add_object_entry_from_pack(const struct object_id *oid,
void *_data)
{
off_t ofs;
+ struct object_info oi = OBJECT_INFO_INIT;
enum object_type type = OBJ_NONE;
display_progress(progress_state, ++nr_seen);
@@ -3763,29 +3771,34 @@ static int add_object_entry_from_pack(const struct object_id *oid,
if (have_duplicate_entry(oid, 0))
return 0;
- ofs = nth_packed_object_offset(p, pos);
- if (!want_object_in_pack(oid, 0, &p, &ofs))
- return 0;
-
- if (p) {
- struct object_info oi = OBJECT_INFO_INIT;
+ stdin_packs_found_nr++;
- oi.typep = &type;
- if (packed_object_info(p, ofs, &oi) < 0) {
- die(_("could not get type of object %s in pack %s"),
- oid_to_hex(oid), p->pack_name);
- } else if (type == OBJ_COMMIT) {
- struct rev_info *revs = _data;
- /*
- * commits in included packs are used as starting points for the
- * subsequent revision walk
- */
- add_pending_oid(revs, NULL, oid, 0);
- }
+ ofs = nth_packed_object_offset(p, pos);
- stdin_packs_found_nr++;
+ oi.typep = &type;
+ if (packed_object_info(p, ofs, &oi) < 0) {
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ } else if (type == OBJ_COMMIT) {
+ struct rev_info *revs = _data;
+ /*
+ * commits in included packs are used as starting points
+ * for the subsequent revision walk
+ *
+ * Note that we do want to walk through commits that are
+ * present in excluded-open ('!') packs to pick up any
+ * objects reachable from them not present in the
+ * excluded-closed ('^') packs.
+ *
+ * However, we'll only add those objects to the packing
+ * list after checking `want_object_in_pack()` below.
+ */
+ add_pending_oid(revs, NULL, oid, 0);
}
+ if (!want_object_in_pack(oid, 0, &p, &ofs))
+ return 0;
+
create_object_entry(oid, type, 0, 0, 0, p, ofs);
return 0;
@@ -3835,107 +3848,197 @@ static void show_commit_pack_hint(struct commit *commit, void *data)
}
+/*
+ * stdin_pack_info_kind specifies how a pack specified over stdin
+ * should be treated when pack-objects is invoked with --stdin-packs.
+ *
+ * - STDIN_PACK_INCLUDE: objects in any packs with this flag bit set
+ * should be included in the output pack, unless they appear in an
+ * excluded pack.
+ *
+ * - STDIN_PACK_EXCLUDE_CLOSED: objects in any packs with this flag
+ * bit set should be excluded from the output pack.
+ *
+ * - STDIN_PACK_EXCLUDE_OPEN: objects in any packs with this flag
+ * bit set should be excluded from the output pack, but are not
+ * guaranteed to be closed under reachability.
+ *
+ * Objects in packs whose 'kind' bits include STDIN_PACK_INCLUDE or
+ * STDIN_PACK_EXCLUDE_OPEN are used as traversal tips when invoked
+ * with --stdin-packs=follow.
+ */
+enum stdin_pack_info_kind {
+ STDIN_PACK_INCLUDE = (1<<0),
+ STDIN_PACK_EXCLUDE_CLOSED = (1<<1),
+ STDIN_PACK_EXCLUDE_OPEN = (1<<2),
+};
+
+struct stdin_pack_info {
+ struct packed_git *p;
+ enum stdin_pack_info_kind kind;
+};
+
static int pack_mtime_cmp(const void *_a, const void *_b)
{
- struct packed_git *a = ((const struct string_list_item*)_a)->util;
- struct packed_git *b = ((const struct string_list_item*)_b)->util;
+ struct stdin_pack_info *a = ((const struct string_list_item*)_a)->util;
+ struct stdin_pack_info *b = ((const struct string_list_item*)_b)->util;
/*
* order packs by descending mtime so that objects are laid out
* roughly as newest-to-oldest
*/
- if (a->mtime < b->mtime)
+ if (a->p->mtime < b->p->mtime)
return 1;
- else if (b->mtime < a->mtime)
+ else if (b->p->mtime < a->p->mtime)
return -1;
else
return 0;
}
-static void read_packs_list_from_stdin(struct rev_info *revs)
+static int stdin_packs_include_check_obj(struct object *obj, void *data UNUSED)
+{
+ return !has_object_kept_pack(to_pack.repo, &obj->oid,
+ KEPT_PACK_IN_CORE);
+}
+
+static int stdin_packs_include_check(struct commit *commit, void *data)
+{
+ return stdin_packs_include_check_obj((struct object *)commit, data);
+}
+
+static void stdin_packs_add_pack_entries(struct strmap *packs,
+ struct rev_info *revs)
+{
+ struct string_list keys = STRING_LIST_INIT_NODUP;
+ struct string_list_item *item;
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ strmap_for_each_entry(packs, &iter, entry) {
+ struct stdin_pack_info *info = entry->value;
+ if (!info->p)
+ die(_("could not find pack '%s'"), entry->key);
+
+ string_list_append(&keys, entry->key)->util = info;
+ }
+
+ /*
+ * Order packs by ascending mtime; use QSORT directly to access the
+ * string_list_item's ->util pointer, which string_list_sort() does not
+ * provide.
+ */
+ QSORT(keys.items, keys.nr, pack_mtime_cmp);
+
+ for_each_string_list_item(item, &keys) {
+ struct stdin_pack_info *info = item->util;
+
+ if (info->kind & STDIN_PACK_EXCLUDE_OPEN) {
+ /*
+ * When open-excluded packs ("!") are present, stop
+ * the parent walk at closed-excluded ("^") packs.
+ * Objects behind a "^" boundary are guaranteed to
+ * have closure and should not be rescued.
+ */
+ revs->include_check = stdin_packs_include_check;
+ revs->include_check_obj = stdin_packs_include_check_obj;
+ }
+
+ if ((info->kind & STDIN_PACK_INCLUDE) ||
+ (info->kind & STDIN_PACK_EXCLUDE_OPEN))
+ for_each_object_in_pack(info->p,
+ add_object_entry_from_pack,
+ revs,
+ ODB_FOR_EACH_OBJECT_PACK_ORDER);
+ }
+
+ string_list_clear(&keys, 0);
+}
+
+static void stdin_packs_read_input(struct rev_info *revs,
+ enum stdin_packs_mode mode)
{
struct strbuf buf = STRBUF_INIT;
- struct string_list include_packs = STRING_LIST_INIT_DUP;
- struct string_list exclude_packs = STRING_LIST_INIT_DUP;
- struct string_list_item *item = NULL;
+ struct strmap packs = STRMAP_INIT;
struct packed_git *p;
while (strbuf_getline(&buf, stdin) != EOF) {
- if (!buf.len)
+ struct stdin_pack_info *info;
+ enum stdin_pack_info_kind kind = STDIN_PACK_INCLUDE;
+ const char *key = buf.buf;
+
+ if (!*key)
continue;
+ else if (*key == '^')
+ kind = STDIN_PACK_EXCLUDE_CLOSED;
+ else if (*key == '!' && mode == STDIN_PACKS_MODE_FOLLOW)
+ kind = STDIN_PACK_EXCLUDE_OPEN;
- if (*buf.buf == '^')
- string_list_append(&exclude_packs, buf.buf + 1);
- else
- string_list_append(&include_packs, buf.buf);
+ if (kind != STDIN_PACK_INCLUDE)
+ key++;
+
+ info = strmap_get(&packs, key);
+ if (!info) {
+ CALLOC_ARRAY(info, 1);
+ strmap_put(&packs, key, info);
+ }
+
+ info->kind |= kind;
strbuf_reset(&buf);
}
- string_list_sort_u(&include_packs, 0);
- string_list_sort_u(&exclude_packs, 0);
-
repo_for_each_pack(the_repository, p) {
- const char *pack_name = pack_basename(p);
+ struct stdin_pack_info *info;
- if ((item = string_list_lookup(&include_packs, pack_name))) {
+ info = strmap_get(&packs, pack_basename(p));
+ if (!info)
+ continue;
+
+ if (info->kind & STDIN_PACK_INCLUDE) {
if (exclude_promisor_objects && p->pack_promisor)
die(_("packfile %s is a promisor but --exclude-promisor-objects was given"), p->pack_name);
- item->util = p;
- }
- if ((item = string_list_lookup(&exclude_packs, pack_name)))
- item->util = p;
- }
- /*
- * Arguments we got on stdin may not even be packs. First
- * check that to avoid segfaulting later on in
- * e.g. pack_mtime_cmp(), excluded packs are handled below.
- *
- * Since we first parsed our STDIN and then sorted the input
- * lines the pack we error on will be whatever line happens to
- * sort first. This is lazy, it's enough that we report one
- * bad case here, we don't need to report the first/last one,
- * or all of them.
- */
- for_each_string_list_item(item, &include_packs) {
- struct packed_git *p = item->util;
- if (!p)
- die(_("could not find pack '%s'"), item->string);
- if (!is_pack_valid(p))
- die(_("packfile %s cannot be accessed"), p->pack_name);
- }
+ /*
+ * Arguments we got on stdin may not even be
+ * packs. First check that to avoid segfaulting
+ * later on in e.g. pack_mtime_cmp(), excluded
+ * packs are handled below.
+ */
+ if (!is_pack_valid(p))
+ die(_("packfile %s cannot be accessed"), p->pack_name);
+ }
- /*
- * Then, handle all of the excluded packs, marking them as
- * kept in-core so that later calls to add_object_entry()
- * discards any objects that are also found in excluded packs.
- */
- for_each_string_list_item(item, &exclude_packs) {
- struct packed_git *p = item->util;
- if (!p)
- die(_("could not find pack '%s'"), item->string);
- p->pack_keep_in_core = 1;
- }
+ if (info->kind & STDIN_PACK_EXCLUDE_CLOSED) {
+ /*
+ * Marking excluded packs as kept in-core so
+ * that later calls to add_object_entry()
+ * discards any objects that are also found in
+ * excluded packs.
+ */
+ p->pack_keep_in_core = 1;
+ }
- /*
- * Order packs by ascending mtime; use QSORT directly to access the
- * string_list_item's ->util pointer, which string_list_sort() does not
- * provide.
- */
- QSORT(include_packs.items, include_packs.nr, pack_mtime_cmp);
+ if (info->kind & STDIN_PACK_EXCLUDE_OPEN) {
+ /*
+ * Marking excluded open packs as kept in-core
+ * (open) for the same reason as we marked
+ * exclude closed packs as kept in-core.
+ *
+ * Use a separate flag here to ensure we don't
+ * halt our traversal at these packs, since they
+ * are not guaranteed to have closure.
+ *
+ */
+ p->pack_keep_in_core_open = 1;
+ }
- for_each_string_list_item(item, &include_packs) {
- struct packed_git *p = item->util;
- for_each_object_in_pack(p,
- add_object_entry_from_pack,
- revs,
- ODB_FOR_EACH_OBJECT_PACK_ORDER);
+ info->p = p;
}
+ stdin_packs_add_pack_entries(&packs, revs);
+
strbuf_release(&buf);
- string_list_clear(&include_packs, 0);
- string_list_clear(&exclude_packs, 0);
+ strmap_clear(&packs, 1);
}
static void add_unreachable_loose_objects(struct rev_info *revs);
@@ -3972,7 +4075,15 @@ static void read_stdin_packs(enum stdin_packs_mode mode, int rev_list_unpacked)
/* avoids adding objects in excluded packs */
ignore_packed_keep_in_core = 1;
- read_packs_list_from_stdin(&revs);
+ if (mode == STDIN_PACKS_MODE_FOLLOW) {
+ /*
+ * In '--stdin-packs=follow' mode, additionally ignore
+ * objects in excluded-open packs to prevent them from
+ * appearing in the resulting pack.
+ */
+ ignore_packed_keep_in_core_open = 1;
+ }
+ stdin_packs_read_input(&revs, mode);
if (rev_list_unpacked)
add_unreachable_loose_objects(&revs);
@@ -3983,6 +4094,8 @@ static void read_stdin_packs(enum stdin_packs_mode mode, int rev_list_unpacked)
show_object_pack_hint,
&mode);
+ release_revisions(&revs);
+
trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found",
stdin_packs_found_nr);
trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints",
@@ -4359,6 +4472,12 @@ static void add_objects_in_unpacked_packs(void)
{
struct odb_source *source;
time_t mtime;
+ struct odb_for_each_object_options opts = {
+ .flags = ODB_FOR_EACH_OBJECT_PACK_ORDER |
+ ODB_FOR_EACH_OBJECT_LOCAL_ONLY |
+ ODB_FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS |
+ ODB_FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS,
+ };
struct object_info oi = {
.mtimep = &mtime,
};
@@ -4371,11 +4490,7 @@ static void add_objects_in_unpacked_packs(void)
continue;
if (packfile_store_for_each_object(files->packed, &oi,
- add_object_in_unpacked_pack, NULL,
- ODB_FOR_EACH_OBJECT_PACK_ORDER |
- ODB_FOR_EACH_OBJECT_LOCAL_ONLY |
- ODB_FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS |
- ODB_FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS))
+ add_object_in_unpacked_pack, NULL, &opts))
die(_("cannot open pack index"));
}
}
diff --git a/builtin/rebase.c b/builtin/rebase.c
index a1c7d78196..fa4f5d9306 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -182,8 +182,7 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
replay.signoff = opts->signoff;
- for (size_t i = 0; i < opts->trailer_args.nr; i++)
- strvec_push(&replay.trailer_args, opts->trailer_args.v[i]);
+ strvec_pushv(&replay.trailer_args, opts->trailer_args.v);
replay.allow_ff = !(opts->flags & REBASE_FORCE);
if (opts->allow_rerere_autoupdate)
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index e34edff406..dada55884a 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -3,46 +3,45 @@
#include "builtin.h"
#include "abspath.h"
-
+#include "commit.h"
+#include "commit-reach.h"
#include "config.h"
+#include "connect.h"
+#include "connected.h"
#include "environment.h"
+#include "exec-cmd.h"
+#include "fsck.h"
#include "gettext.h"
+#include "gpg-interface.h"
#include "hex.h"
-#include "lockfile.h"
-#include "pack.h"
-#include "refs.h"
-#include "pkt-line.h"
-#include "sideband.h"
-#include "run-command.h"
#include "hook.h"
-#include "exec-cmd.h"
-#include "commit.h"
+#include "lockfile.h"
#include "object.h"
-#include "remote.h"
-#include "connect.h"
-#include "string-list.h"
-#include "oid-array.h"
-#include "connected.h"
-#include "strvec.h"
-#include "version.h"
-#include "gpg-interface.h"
-#include "sigchain.h"
-#include "fsck.h"
-#include "tmp-objdir.h"
-#include "oidset.h"
-#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
#include "odb.h"
+#include "oid-array.h"
+#include "oidset.h"
+#include "pack.h"
+#include "packfile.h"
+#include "parse-options.h"
+#include "pkt-line.h"
#include "protocol.h"
-#include "commit-reach.h"
+#include "refs.h"
+#include "remote.h"
+#include "run-command.h"
#include "server-info.h"
+#include "setup.h"
+#include "shallow.h"
+#include "sideband.h"
+#include "sigchain.h"
+#include "string-list.h"
+#include "strvec.h"
+#include "tmp-objdir.h"
#include "trace.h"
#include "trace2.h"
+#include "version.h"
#include "worktree.h"
-#include "shallow.h"
-#include "setup.h"
-#include "parse-options.h"
static const char * const receive_pack_usage[] = {
N_("git receive-pack <git-dir>"),
@@ -904,11 +903,14 @@ static int feed_receive_hook_cb(int hook_stdin_fd, void *pp_cb UNUSED, void *pp_
static void *receive_hook_feed_state_alloc(void *feed_pipe_ctx)
{
struct receive_hook_feed_state *init_state = feed_pipe_ctx;
- struct receive_hook_feed_state *data = xcalloc(1, sizeof(*data));
+ struct receive_hook_feed_state *data;
+
+ CALLOC_ARRAY(data, 1);
data->report = init_state->report;
data->cmd = init_state->cmd;
data->skip_broken = init_state->skip_broken;
strbuf_init(&data->buf, 0);
+
return data;
}
@@ -928,7 +930,11 @@ static int run_receive_hook(struct command *commands,
{
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
struct command *iter = commands;
- struct receive_hook_feed_state feed_init_state = { 0 };
+ struct receive_hook_feed_state feed_init_state = {
+ .cmd = commands,
+ .skip_broken = skip_broken,
+ .buf = STRBUF_INIT,
+ };
struct async sideband_async;
int sideband_async_started = 0;
int saved_stderr = -1;
@@ -961,8 +967,6 @@ static int run_receive_hook(struct command *commands,
prepare_sideband_async(&sideband_async, &saved_stderr, &sideband_async_started);
/* set up stdin callback */
- feed_init_state.cmd = commands;
- feed_init_state.skip_broken = skip_broken;
opt.feed_pipe_ctx = &feed_init_state;
opt.feed_pipe = feed_receive_hook_cb;
opt.feed_pipe_cb_data_alloc = receive_hook_feed_state_alloc;
@@ -1380,32 +1384,16 @@ static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
return 0;
}
-/*
- * NEEDSWORK: we should consolidate various implementations of "are we
- * on an unborn branch?" test into one, and make the unified one more
- * robust. !get_sha1() based check used here and elsewhere would not
- * allow us to tell an unborn branch from corrupt ref, for example.
- * For the purpose of fixing "deploy-to-update does not work when
- * pushing into an empty repository" issue, this should suffice for
- * now.
- */
-static int head_has_history(void)
-{
- struct object_id oid;
-
- return !repo_get_oid(the_repository, "HEAD", &oid);
-}
-
static const char *push_to_deploy(unsigned char *sha1,
struct strvec *env,
- const char *work_tree)
+ const struct worktree *worktree)
{
struct child_process child = CHILD_PROCESS_INIT;
strvec_pushl(&child.args, "update-index", "-q", "--ignore-submodules",
"--refresh", NULL);
strvec_pushv(&child.env, env->v);
- child.dir = work_tree;
+ child.dir = worktree->path;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
child.git_cmd = 1;
@@ -1417,7 +1405,7 @@ static const char *push_to_deploy(unsigned char *sha1,
strvec_pushl(&child.args, "diff-files", "--quiet",
"--ignore-submodules", "--", NULL);
strvec_pushv(&child.env, env->v);
- child.dir = work_tree;
+ child.dir = worktree->path;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
child.git_cmd = 1;
@@ -1427,9 +1415,16 @@ static const char *push_to_deploy(unsigned char *sha1,
child_process_init(&child);
strvec_pushl(&child.args, "diff-index", "--quiet", "--cached",
"--ignore-submodules",
- /* diff-index with either HEAD or an empty tree */
- head_has_history() ? "HEAD" : empty_tree_oid_hex(the_repository->hash_algo),
- "--", NULL);
+ /*
+ * diff-index with either HEAD or an empty tree
+ *
+ * NEEDSWORK: is_null_oid() cannot know whether it's an
+ * unborn HEAD or a corrupt ref. It works for now because
+ * it's only needed to know if we are comparing HEAD or an
+ * empty tree.
+ */
+ !is_null_oid(&worktree->head_oid) ? "HEAD" :
+ empty_tree_oid_hex(the_repository->hash_algo), "--", NULL);
strvec_pushv(&child.env, env->v);
child.no_stdin = 1;
child.no_stdout = 1;
@@ -1442,7 +1437,7 @@ static const char *push_to_deploy(unsigned char *sha1,
strvec_pushl(&child.args, "read-tree", "-u", "-m", hash_to_hex(sha1),
NULL);
strvec_pushv(&child.env, env->v);
- child.dir = work_tree;
+ child.dir = worktree->path;
child.no_stdin = 1;
child.no_stdout = 1;
child.stdout_to_stderr = 0;
@@ -1490,7 +1485,7 @@ static const char *update_worktree(unsigned char *sha1, const struct worktree *w
retval = push_to_checkout(sha1, &invoked_hook, &env, worktree->path);
if (!invoked_hook)
- retval = push_to_deploy(sha1, &env, worktree->path);
+ retval = push_to_deploy(sha1, &env, worktree);
strvec_clear(&env);
free(git_dir);
@@ -1546,7 +1541,7 @@ static const char *update(struct command *cmd, struct shallow_info *si)
if (!is_null_oid(new_oid) &&
!odb_has_object(the_repository->objects, new_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
error("unpack should have generated %s, "
"but I can't find it!", oid_to_hex(new_oid));
ret = "bad pack";
diff --git a/builtin/refs.c b/builtin/refs.c
index 3064f888b2..e3125bc61b 100644
--- a/builtin/refs.c
+++ b/builtin/refs.c
@@ -78,9 +78,9 @@ out:
}
static int cmd_refs_verify(int argc, const char **argv, const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
- struct fsck_options fsck_refs_options = FSCK_REFS_OPTIONS_DEFAULT;
+ struct fsck_options fsck_refs_options;
struct worktree **worktrees;
const char * const verify_usage[] = {
REFS_VERIFY_USAGE,
@@ -93,6 +93,8 @@ static int cmd_refs_verify(int argc, const char **argv, const char *prefix,
};
int ret = 0;
+ fsck_options_init(&fsck_refs_options, repo, FSCK_OPTIONS_REFS);
+
argc = parse_options(argc, argv, prefix, options, verify_usage, 0);
if (argc)
usage(_("'git refs verify' takes no arguments"));
diff --git a/builtin/remote.c b/builtin/remote.c
index 0fddaa1773..de989ea3ba 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -473,7 +473,7 @@ static int get_push_ref_states(const struct ref *remote_refs,
else if (is_null_oid(&ref->old_oid))
info->status = PUSH_STATUS_CREATE;
else if (odb_has_object(the_repository->objects, &ref->old_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR) &&
ref_newer(&ref->new_oid, &ref->old_oid))
info->status = PUSH_STATUS_FASTFORWARD;
else
diff --git a/builtin/repack.c b/builtin/repack.c
index f6bb04bef7..4c5a82c2c8 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -369,8 +369,23 @@ int cmd_repack(int argc,
*/
for (i = 0; i < geometry.split; i++)
fprintf(in, "%s\n", pack_basename(geometry.pack[i]));
- for (i = geometry.split; i < geometry.pack_nr; i++)
- fprintf(in, "^%s\n", pack_basename(geometry.pack[i]));
+ for (i = geometry.split; i < geometry.pack_nr; i++) {
+ const char *basename = pack_basename(geometry.pack[i]);
+ char marker = '^';
+
+ if (!midx_must_contain_cruft &&
+ !string_list_has_string(&existing.midx_packs,
+ basename)) {
+ /*
+ * Assume non-MIDX'd packs are not
+ * necessarily closed under
+ * reachability.
+ */
+ marker = '!';
+ }
+
+ fprintf(in, "%c%s\n", marker, basename);
+ }
fclose(in);
}
diff --git a/builtin/repo.c b/builtin/repo.c
index 55f9b9095c..71a5c1c29c 100644
--- a/builtin/repo.c
+++ b/builtin/repo.c
@@ -20,11 +20,27 @@
#include "tree-walk.h"
#include "utf8.h"
+#define REPO_INFO_USAGE \
+ "git repo info [--format=(lines|nul) | -z] [--all | <key>...]", \
+ "git repo info --keys [--format=(lines|nul) | -z]"
+
+#define REPO_STRUCTURE_USAGE \
+ "git repo structure [--format=(table|lines|nul) | -z]"
+
static const char *const repo_usage[] = {
- "git repo info [--format=(lines|nul) | -z] [--all | <key>...]",
- "git repo info --keys [--format=(lines|nul) | -z]",
- "git repo structure [--format=(table|lines|nul) | -z]",
- NULL
+ REPO_INFO_USAGE,
+ REPO_STRUCTURE_USAGE,
+ NULL,
+};
+
+static const char *const repo_info_usage[] = {
+ REPO_INFO_USAGE,
+ NULL,
+};
+
+static const char *const repo_structure_usage[] = {
+ REPO_STRUCTURE_USAGE,
+ NULL,
};
typedef int get_value_fn(struct repository *repo, struct strbuf *buf);
@@ -214,7 +230,7 @@ static int cmd_repo_info(int argc, const char **argv, const char *prefix,
OPT_END()
};
- argc = parse_options(argc, argv, prefix, options, repo_usage, 0);
+ argc = parse_options(argc, argv, prefix, options, repo_info_usage, 0);
if (show_keys && (all_keys || argc))
die(_("--keys cannot be used with a <key> or --all"));
@@ -879,7 +895,7 @@ static int cmd_repo_structure(int argc, const char **argv, const char *prefix,
OPT_END()
};
- argc = parse_options(argc, argv, prefix, options, repo_usage, 0);
+ argc = parse_options(argc, argv, prefix, options, repo_structure_usage, 0);
if (argc)
usage(_("too many arguments"));
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 01a62800e8..218b5f34d6 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -267,21 +267,20 @@ static int show_file(const char *arg, int output_prefix)
static int try_difference(const char *arg)
{
- char *dotdot;
+ const char *dotdot;
struct object_id start_oid;
struct object_id end_oid;
const char *end;
const char *start;
+ char *to_free;
int symmetric;
static const char head_by_default[] = "HEAD";
if (!(dotdot = strstr(arg, "..")))
return 0;
+ start = to_free = xmemdupz(arg, dotdot - arg);
end = dotdot + 2;
- start = arg;
symmetric = (*end == '.');
-
- *dotdot = 0;
end += symmetric;
if (!*end)
@@ -295,7 +294,7 @@ static int try_difference(const char *arg)
* Just ".."? That is not a range but the
* pathspec for the parent directory.
*/
- *dotdot = '.';
+ free(to_free);
return 0;
}
@@ -308,7 +307,7 @@ static int try_difference(const char *arg)
a = lookup_commit_reference(the_repository, &start_oid);
b = lookup_commit_reference(the_repository, &end_oid);
if (!a || !b) {
- *dotdot = '.';
+ free(to_free);
return 0;
}
if (repo_get_merge_bases(the_repository, a, b, &exclude) < 0)
@@ -318,16 +317,16 @@ static int try_difference(const char *arg)
show_rev(REVERSED, &commit->object.oid, NULL);
}
}
- *dotdot = '.';
+ free(to_free);
return 1;
}
- *dotdot = '.';
+ free(to_free);
return 0;
}
static int try_parent_shorthands(const char *arg)
{
- char *dotdot;
+ const char *mark;
struct object_id oid;
struct commit *commit;
struct commit_list *parents;
@@ -335,38 +334,39 @@ static int try_parent_shorthands(const char *arg)
int include_rev = 0;
int include_parents = 0;
int exclude_parent = 0;
+ char *to_free;
- if ((dotdot = strstr(arg, "^!"))) {
+ if ((mark = strstr(arg, "^!"))) {
include_rev = 1;
- if (dotdot[2])
+ if (mark[2])
return 0;
- } else if ((dotdot = strstr(arg, "^@"))) {
+ } else if ((mark = strstr(arg, "^@"))) {
include_parents = 1;
- if (dotdot[2])
+ if (mark[2])
return 0;
- } else if ((dotdot = strstr(arg, "^-"))) {
+ } else if ((mark = strstr(arg, "^-"))) {
include_rev = 1;
exclude_parent = 1;
- if (dotdot[2]) {
+ if (mark[2]) {
char *end;
- exclude_parent = strtoul(dotdot + 2, &end, 10);
+ exclude_parent = strtoul(mark + 2, &end, 10);
if (*end != '\0' || !exclude_parent)
return 0;
}
} else
return 0;
- *dotdot = 0;
+ arg = to_free = xmemdupz(arg, mark - arg);
if (repo_get_oid_committish(the_repository, arg, &oid) ||
!(commit = lookup_commit_reference(the_repository, &oid))) {
- *dotdot = '^';
+ free(to_free);
return 0;
}
if (exclude_parent &&
exclude_parent > commit_list_count(commit->parents)) {
- *dotdot = '^';
+ free(to_free);
return 0;
}
@@ -387,7 +387,7 @@ static int try_parent_shorthands(const char *arg)
free(name);
}
- *dotdot = '^';
+ free(to_free);
return 1;
}
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index 5d31acea7c..d508441632 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -37,7 +37,7 @@ static void show_one(const struct show_one_options *opts,
struct object_id peeled;
if (!odb_has_object(the_repository->objects, ref->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
die("git show-ref: bad ref %s (%s)", ref->name,
oid_to_hex(ref->oid));
diff --git a/builtin/stash.c b/builtin/stash.c
index 95c5005b0b..0d27b2fb1f 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -50,10 +50,10 @@
#define BUILTIN_STASH_STORE_USAGE \
N_("git stash store [(-m | --message) <message>] [-q | --quiet] <commit>")
#define BUILTIN_STASH_PUSH_USAGE \
- N_("git stash [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
+ N_("git stash [push] [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
" [-u | --include-untracked] [-a | --all] [(-m | --message) <message>]\n" \
" [--pathspec-from-file=<file> [--pathspec-file-nul]]\n" \
- " [--] [<pathspec>...]]")
+ " [--] [<pathspec>...]")
#define BUILTIN_STASH_SAVE_USAGE \
N_("git stash save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
" [-u | --include-untracked] [-a | --all] [<message>]")
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index ff8b05d1ba..2f589e3b37 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -29,6 +29,7 @@
#include "object-file.h"
#include "object-name.h"
#include "odb.h"
+#include "odb/source.h"
#include "advice.h"
#include "branch.h"
#include "list-objects-filter-options.h"
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index 6fc64e9e4b..e01cf6e360 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -29,7 +29,7 @@ static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
static struct git_hash_ctx ctx;
-static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct fsck_options fsck_options;
static struct progress *progress;
/*
@@ -449,7 +449,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
if (!delta_data)
return;
if (odb_has_object(the_repository->objects, &base_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
; /* Ok we have this one */
else if (resolve_against_held(nr, &base_oid,
delta_data, delta_size))
@@ -613,7 +613,7 @@ static void unpack_all(void)
int cmd_unpack_objects(int argc,
const char **argv,
const char *prefix UNUSED,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
int i;
struct object_id oid;
@@ -627,6 +627,8 @@ int cmd_unpack_objects(int argc,
show_usage_if_asked(argc, argv, unpack_usage);
+ fsck_options_init(&fsck_options, repo, FSCK_OPTIONS_STRICT);
+
for (i = 1 ; i < argc; i++) {
const char *arg = argv[i];
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 4035b1cb06..4fd6f7575f 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -692,25 +692,8 @@ static int can_use_local_refs(const struct add_opts *opts)
if (refs_head_ref(get_main_ref_store(the_repository), first_valid_ref, NULL)) {
return 1;
} else if (refs_for_each_branch_ref(get_main_ref_store(the_repository), first_valid_ref, NULL)) {
- if (!opts->quiet) {
- struct strbuf path = STRBUF_INIT;
- struct strbuf contents = STRBUF_INIT;
- char *wt_gitdir = get_worktree_git_dir(NULL);
-
- strbuf_add_real_path(&path, wt_gitdir);
- strbuf_addstr(&path, "/HEAD");
- strbuf_read_file(&contents, path.buf, 64);
- strbuf_stripspace(&contents, NULL);
- strbuf_strip_suffix(&contents, "\n");
-
- warning(_("HEAD points to an invalid (or orphaned) reference.\n"
- "HEAD path: '%s'\n"
- "HEAD contents: '%s'"),
- path.buf, contents.buf);
- strbuf_release(&path);
- strbuf_release(&contents);
- free(wt_gitdir);
- }
+ if (!opts->quiet)
+ warning(_("HEAD points to an invalid (or orphaned) reference.\n"));
return 1;
}
return 0;
diff --git a/cache-tree.c b/cache-tree.c
index 60bcc07c3b..7881b42aa2 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -239,7 +239,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
return 0;
if (it->entry_count < 0 ||
odb_has_object(the_repository->objects, &it->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return 0;
for (i = 0; i < it->subtree_nr; i++) {
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
@@ -292,7 +292,7 @@ static int update_one(struct cache_tree *it,
if (0 <= it->entry_count &&
odb_has_object(the_repository->objects, &it->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return it->entry_count;
/*
@@ -400,7 +400,7 @@ static int update_one(struct cache_tree *it,
if (is_null_oid(oid) ||
(!ce_missing_ok &&
!odb_has_object(the_repository->objects, oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))) {
strbuf_release(&buffer);
if (expected_missing)
return -1;
@@ -448,7 +448,7 @@ static int update_one(struct cache_tree *it,
struct object_id oid;
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
OBJ_TREE, &oid);
- if (odb_has_object(the_repository->objects, &oid, HAS_OBJECT_RECHECK_PACKED))
+ if (odb_has_object(the_repository->objects, &oid, ODB_HAS_OBJECT_RECHECK_PACKED))
oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
@@ -456,7 +456,7 @@ static int update_one(struct cache_tree *it,
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
OBJ_TREE, &it->oid);
} else if (odb_write_object_ext(the_repository->objects, buffer.buf, buffer.len, OBJ_TREE,
- &it->oid, NULL, flags & WRITE_TREE_SILENT ? WRITE_OBJECT_SILENT : 0)) {
+ &it->oid, NULL, flags & WRITE_TREE_SILENT ? ODB_WRITE_OBJECT_SILENT : 0)) {
strbuf_release(&buffer);
return -1;
}
@@ -488,12 +488,12 @@ int cache_tree_update(struct index_state *istate, int flags)
prefetch_cache_entries(istate, must_check_existence);
trace_performance_enter();
- trace2_region_enter("cache_tree", "update", the_repository);
+ trace2_region_enter("cache_tree", "update", istate->repo);
transaction = odb_transaction_begin(the_repository->objects);
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
"", 0, &skip, flags);
odb_transaction_commit(transaction);
- trace2_region_leave("cache_tree", "update", the_repository);
+ trace2_region_leave("cache_tree", "update", istate->repo);
trace_performance_leave("cache_tree_update");
if (i < 0)
return i;
diff --git a/cbtree.c b/cbtree.c
index cf8cf75b89..4ab794bddc 100644
--- a/cbtree.c
+++ b/cbtree.c
@@ -96,26 +96,28 @@ struct cb_node *cb_lookup(struct cb_tree *t, const uint8_t *k, size_t klen)
return p && !memcmp(p->k, k, klen) ? p : NULL;
}
-static enum cb_next cb_descend(struct cb_node *p, cb_iter fn, void *arg)
+static int cb_descend(struct cb_node *p, cb_iter fn, void *arg)
{
if (1 & (uintptr_t)p) {
struct cb_node *q = cb_node_of(p);
- enum cb_next n = cb_descend(q->child[0], fn, arg);
-
- return n == CB_BREAK ? n : cb_descend(q->child[1], fn, arg);
+ int ret = cb_descend(q->child[0], fn, arg);
+ if (ret)
+ return ret;
+ return cb_descend(q->child[1], fn, arg);
} else {
return fn(p, arg);
}
}
-void cb_each(struct cb_tree *t, const uint8_t *kpfx, size_t klen,
- cb_iter fn, void *arg)
+int cb_each(struct cb_tree *t, const uint8_t *kpfx, size_t klen,
+ cb_iter fn, void *arg)
{
struct cb_node *p = t->root;
struct cb_node *top = p;
size_t i = 0;
- if (!p) return; /* empty tree */
+ if (!p)
+ return 0; /* empty tree */
/* Walk tree, maintaining top pointer */
while (1 & (uintptr_t)p) {
@@ -130,7 +132,8 @@ void cb_each(struct cb_tree *t, const uint8_t *kpfx, size_t klen,
for (i = 0; i < klen; i++) {
if (p->k[i] != kpfx[i])
- return; /* "best" match failed */
+ return 0; /* "best" match failed */
}
- cb_descend(top, fn, arg);
+
+ return cb_descend(top, fn, arg);
}
diff --git a/cbtree.h b/cbtree.h
index 43193abdda..c374b1b3db 100644
--- a/cbtree.h
+++ b/cbtree.h
@@ -30,11 +30,6 @@ struct cb_tree {
struct cb_node *root;
};
-enum cb_next {
- CB_CONTINUE = 0,
- CB_BREAK = 1
-};
-
#define CBTREE_INIT { 0 }
static inline void cb_init(struct cb_tree *t)
@@ -46,9 +41,15 @@ static inline void cb_init(struct cb_tree *t)
struct cb_node *cb_lookup(struct cb_tree *, const uint8_t *k, size_t klen);
struct cb_node *cb_insert(struct cb_tree *, struct cb_node *, size_t klen);
-typedef enum cb_next (*cb_iter)(struct cb_node *, void *arg);
+/*
+ * Callback invoked by `cb_each()` for each node in the critbit tree. A return
+ * value of 0 will cause the iteration to continue, a non-zero return code will
+ * cause iteration to abort. The error code will be relayed back from
+ * `cb_each()` in that case.
+ */
+typedef int (*cb_iter)(struct cb_node *, void *arg);
-void cb_each(struct cb_tree *, const uint8_t *kpfx, size_t klen,
- cb_iter, void *arg);
+int cb_each(struct cb_tree *, const uint8_t *kpfx, size_t klen,
+ cb_iter, void *arg);
#endif /* CBTREE_H */
diff --git a/ci/run-static-analysis.sh b/ci/run-static-analysis.sh
index 9e9c72681d..ba67e80b4d 100755
--- a/ci/run-static-analysis.sh
+++ b/ci/run-static-analysis.sh
@@ -10,7 +10,7 @@ make coccicheck
set +x
fail=
-for cocci_patch in contrib/coccinelle/*.patch
+for cocci_patch in tools/coccinelle/*.patch
do
if test -s "$cocci_patch"
then
diff --git a/commit-graph.c b/commit-graph.c
index f8e24145a5..9abe62bd5a 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -1319,6 +1319,37 @@ static int write_graph_chunk_data(struct hashfile *f,
return 0;
}
+/*
+ * Compute the generation offset between the commit date and its generation.
+ * This is what's ultimately stored as generation number in the commit graph.
+ *
+ * Note that the computation of the commit date is more involved than you might
+ * think. Instead of using the full commit date, we're in fact masking bits so
+ * that only the 34 lowest bits are considered. This results from the fact that
+ * commit graphs themselves only ever store 34 bits of the commit date
+ * themselves.
+ *
+ * This means that if we have a commit date that exceeds 34 bits we'll end up
+ * in situations where depending on whether the commit has been parsed from the
+ * object database or the commit graph we'll have different dates, where the
+ * ones parsed from the object database would have full 64 bit precision.
+ *
+ * But ultimately, we only ever want the offset to be relative to what we
+ * actually end up storing on disk, and hence we have to mask all the other
+ * bits.
+ */
+static timestamp_t compute_generation_offset(struct commit *c)
+{
+ timestamp_t masked_date;
+
+ if (sizeof(timestamp_t) > 4)
+ masked_date = c->date & (((timestamp_t) 1 << 34) - 1);
+ else
+ masked_date = c->date;
+
+ return commit_graph_data_at(c)->generation - masked_date;
+}
+
static int write_graph_chunk_generation_data(struct hashfile *f,
void *data)
{
@@ -1329,7 +1360,7 @@ static int write_graph_chunk_generation_data(struct hashfile *f,
struct commit *c = ctx->commits.items[i];
timestamp_t offset;
repo_parse_commit(ctx->r, c);
- offset = commit_graph_data_at(c)->generation - c->date;
+ offset = compute_generation_offset(c);
display_progress(ctx->progress, ++ctx->progress_cnt);
if (offset > GENERATION_NUMBER_V2_OFFSET_MAX) {
@@ -1350,7 +1381,7 @@ static int write_graph_chunk_generation_data_overflow(struct hashfile *f,
int i;
for (i = 0; i < ctx->commits.nr; i++) {
struct commit *c = ctx->commits.items[i];
- timestamp_t offset = commit_graph_data_at(c)->generation - c->date;
+ timestamp_t offset = compute_generation_offset(c);
display_progress(ctx->progress, ++ctx->progress_cnt);
if (offset > GENERATION_NUMBER_V2_OFFSET_MAX) {
@@ -1741,7 +1772,7 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx)
for (i = 0; i < ctx->commits.nr; i++) {
struct commit *c = ctx->commits.items[i];
- timestamp_t offset = commit_graph_data_at(c)->generation - c->date;
+ timestamp_t offset = compute_generation_offset(c);
if (offset > GENERATION_NUMBER_V2_OFFSET_MAX)
ctx->num_generation_data_overflows++;
}
@@ -1969,6 +2000,9 @@ static void fill_oids_from_all_packs(struct write_commit_graph_context *ctx)
{
struct odb_source *source;
enum object_type type;
+ struct odb_for_each_object_options opts = {
+ .flags = ODB_FOR_EACH_OBJECT_PACK_ORDER,
+ };
struct object_info oi = {
.typep = &type,
};
@@ -1983,7 +2017,7 @@ static void fill_oids_from_all_packs(struct write_commit_graph_context *ctx)
for (source = ctx->r->objects->sources; source; source = source->next) {
struct odb_source_files *files = odb_source_files_downcast(source);
packfile_store_for_each_object(files->packed, &oi, add_packed_commits_oi,
- ctx, ODB_FOR_EACH_OBJECT_PACK_ORDER);
+ ctx, &opts);
}
if (ctx->progress_done < ctx->approx_nr_objects)
@@ -2607,7 +2641,8 @@ int write_commit_graph(struct odb_source *source,
replace = ctx.opts->split_flags & COMMIT_GRAPH_SPLIT_REPLACE;
}
- ctx.approx_nr_objects = repo_approximate_object_count(r);
+ if (odb_count_objects(r->objects, ODB_COUNT_OBJECTS_APPROXIMATE, &ctx.approx_nr_objects) < 0)
+ ctx.approx_nr_objects = 0;
if (ctx.append && g) {
for (i = 0; i < g->num_commits; i++) {
diff --git a/commit-reach.c b/commit-reach.c
index 9604bbdcce..d3a9b3ed6f 100644
--- a/commit-reach.c
+++ b/commit-reach.c
@@ -1117,10 +1117,8 @@ void ahead_behind(struct repository *r,
/* STALE is used here, PARENT2 is used by insert_no_dup(). */
repo_clear_commit_marks(r, PARENT2 | STALE);
- while (prio_queue_peek(&queue)) {
- struct commit *c = prio_queue_get(&queue);
- free_bit_array(c);
- }
+ for (size_t i = 0; i < queue.nr; i++)
+ free_bit_array(queue.array[i].data);
clear_bit_arrays(&bit_arrays);
clear_prio_queue(&queue);
}
diff --git a/compat/mingw.c b/compat/mingw.c
index c667a2dcda..2023c16db6 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -1394,6 +1394,9 @@ revert_attrs:
size_t mingw_strftime(char *s, size_t max,
const char *format, const struct tm *tm)
{
+#ifdef _UCRT
+ size_t ret = strftime(s, max, format, tm);
+#else
/* a pointer to the original strftime in case we can't find the UCRT version */
static size_t (*fallback)(char *, size_t, const char *, const struct tm *) = strftime;
size_t ret;
@@ -1404,6 +1407,7 @@ size_t mingw_strftime(char *s, size_t max,
ret = strftime(s, max, format, tm);
else
ret = fallback(s, max, format, tm);
+#endif
if (!ret && errno == EINVAL)
die("invalid strftime format: '%s'", format);
@@ -2460,7 +2464,7 @@ repeat:
if (supports_file_rename_info_ex) {
/*
* Our minimum required Windows version is still set to Windows
- * Vista. We thus have to declare required infrastructure for
+ * 8.1. We thus have to declare required infrastructure for
* FileRenameInfoEx ourselves until we bump _WIN32_WINNT to
* 0x0A00. Furthermore, we have to handle cases where the
* FileRenameInfoEx call isn't supported yet.
diff --git a/compat/nedmalloc/malloc.c.h b/compat/nedmalloc/malloc.c.h
index 814845d4b3..e0c567586c 100644
--- a/compat/nedmalloc/malloc.c.h
+++ b/compat/nedmalloc/malloc.c.h
@@ -500,7 +500,7 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x403
+#define _WIN32_WINNT 0x603
#endif
#include <windows.h>
#define HAVE_MMAP 1
diff --git a/compat/poll/poll.c b/compat/poll/poll.c
index a2becd16cd..ea362b4a8e 100644
--- a/compat/poll/poll.c
+++ b/compat/poll/poll.c
@@ -20,7 +20,7 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
-/* To bump the minimum Windows version to Windows Vista */
+/* To bump the minimum Windows version to Windows 8.1 */
#include "git-compat-util.h"
/* Tell gcc not to warn about the (nfd < 0) tests, below. */
@@ -41,7 +41,7 @@
#if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__
# define WIN32_NATIVE
# if defined (_MSC_VER) && !defined(_WIN32_WINNT)
-# define _WIN32_WINNT 0x0502
+# define _WIN32_WINNT 0x0603
# endif
# include <winsock2.h>
# include <windows.h>
diff --git a/compat/posix.h b/compat/posix.h
index 3c611d2736..94699a03fa 100644
--- a/compat/posix.h
+++ b/compat/posix.h
@@ -76,7 +76,7 @@
#if defined(WIN32) && !defined(__CYGWIN__) /* Both MinGW and MSVC */
# if !defined(_WIN32_WINNT)
-# define _WIN32_WINNT 0x0600
+# define _WIN32_WINNT 0x0603
# endif
#define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */
#include <winsock2.h>
diff --git a/compat/regcomp_enhanced.c b/compat/regcomp_enhanced.c
index 84193ce53b..29c74eee99 100644
--- a/compat/regcomp_enhanced.c
+++ b/compat/regcomp_enhanced.c
@@ -3,6 +3,11 @@
int git_regcomp(regex_t *preg, const char *pattern, int cflags)
{
+ /*
+ * If you are on macOS with clang and fail to compile this line,
+ * https://lore.kernel.org/git/458ad3c1-96df-4575-ee42-e6eb754f25f6@gmx.de/
+ * might be relevant.
+ */
if (!(cflags & REG_EXTENDED))
cflags |= REG_ENHANCED;
return regcomp(preg, pattern, cflags);
diff --git a/compat/win32/flush.c b/compat/win32/flush.c
index 291f90ea94..7244ff69ac 100644
--- a/compat/win32/flush.c
+++ b/compat/win32/flush.c
@@ -6,7 +6,9 @@ int win32_fsync_no_flush(int fd)
{
IO_STATUS_BLOCK io_status;
+#ifndef FLUSH_FLAGS_FILE_DATA_ONLY
#define FLUSH_FLAGS_FILE_DATA_ONLY 1
+#endif
DECLARE_PROC_ADDR(ntdll.dll, NTSTATUS, NTAPI, NtFlushBuffersFileEx,
HANDLE FileHandle, ULONG Flags, PVOID Parameters, ULONG ParameterSize,
diff --git a/compat/winansi.c b/compat/winansi.c
index ac2ffb7869..3ce1900939 100644
--- a/compat/winansi.c
+++ b/compat/winansi.c
@@ -32,47 +32,18 @@ static int non_ascii_used = 0;
static HANDLE hthread, hread, hwrite;
static HANDLE hconsole1, hconsole2;
-#ifdef __MINGW32__
-#if !defined(__MINGW64_VERSION_MAJOR) || __MINGW64_VERSION_MAJOR < 5
-typedef struct _CONSOLE_FONT_INFOEX {
- ULONG cbSize;
- DWORD nFont;
- COORD dwFontSize;
- UINT FontFamily;
- UINT FontWeight;
- WCHAR FaceName[LF_FACESIZE];
-} CONSOLE_FONT_INFOEX, *PCONSOLE_FONT_INFOEX;
-#endif
-#endif
-
static void warn_if_raster_font(void)
{
DWORD fontFamily = 0;
- DECLARE_PROC_ADDR(kernel32.dll, BOOL, WINAPI,
- GetCurrentConsoleFontEx, HANDLE, BOOL,
- PCONSOLE_FONT_INFOEX);
+ CONSOLE_FONT_INFOEX cfi;
/* don't bother if output was ascii only */
if (!non_ascii_used)
return;
- /* GetCurrentConsoleFontEx is available since Vista */
- if (INIT_PROC_ADDR(GetCurrentConsoleFontEx)) {
- CONSOLE_FONT_INFOEX cfi;
- cfi.cbSize = sizeof(cfi);
- if (GetCurrentConsoleFontEx(console, 0, &cfi))
- fontFamily = cfi.FontFamily;
- } else {
- /* pre-Vista: check default console font in registry */
- HKEY hkey;
- if (ERROR_SUCCESS == RegOpenKeyExA(HKEY_CURRENT_USER, "Console",
- 0, KEY_READ, &hkey)) {
- DWORD size = sizeof(fontFamily);
- RegQueryValueExA(hkey, "FontFamily", NULL, NULL,
- (LPVOID) &fontFamily, &size);
- RegCloseKey(hkey);
- }
- }
+ cfi.cbSize = sizeof(cfi);
+ if (GetCurrentConsoleFontEx(console, 0, &cfi))
+ fontFamily = cfi.FontFamily;
if (!(fontFamily & TMPF_TRUETYPE)) {
const wchar_t *msg = L"\nWarning: Your console font probably "
diff --git a/config.mak.dev b/config.mak.dev
index e86b6e1b34..c8dcf78779 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -1,5 +1,5 @@
ifndef COMPILER_FEATURES
-COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
+COMPILER_FEATURES := $(shell ./tools/detect-compiler $(CC))
endif
ifeq ($(filter no-error,$(DEVOPTS)),)
diff --git a/connect.c b/connect.c
index a02583a102..fcd35c5539 100644
--- a/connect.c
+++ b/connect.c
@@ -1054,6 +1054,8 @@ static struct child_process *git_proxy_connect(int fd[2], char *host)
strvec_push(&proxy->args, port);
proxy->in = -1;
proxy->out = -1;
+ proxy->clean_on_exit = 1;
+ proxy->wait_after_clean = 1;
if (start_command(proxy))
die(_("cannot start proxy %s"), git_proxy_command);
fd[0] = proxy->out; /* read from proxy stdout */
@@ -1515,6 +1517,8 @@ struct child_process *git_connect(int fd[2], const char *url,
}
strvec_push(&conn->args, cmd.buf);
+ conn->clean_on_exit = 1;
+ conn->wait_after_clean = 1;
if (start_command(conn))
die(_("unable to fork"));
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index c6cfb874ef..d7a087e584 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -376,7 +376,7 @@ endif()
#function checks
set(function_checks
strcasestr memmem strlcpy strtoimax strtoumax strtoull
- setenv mkdtemp poll pread memmem)
+ setenv mkdtemp poll pread memmem writev)
#unsetenv,hstrerror are incompatible with windows build
if(NOT WIN32)
@@ -421,6 +421,10 @@ if(NOT HAVE_MEMMEM)
list(APPEND compat_SOURCES compat/memmem.c)
endif()
+if(NOT HAVE_WRITEV)
+ list(APPEND compat_SOURCES compat/writev.c)
+endif()
+
if(NOT WIN32)
if(NOT HAVE_UNSETENV)
list(APPEND compat_SOURCES compat/unsetenv.c)
@@ -636,7 +640,7 @@ set(EXCLUSION_PROGS_CACHE ${EXCLUSION_PROGS} CACHE STRING "Programs not built" F
if(NOT EXISTS ${CMAKE_BINARY_DIR}/command-list.h OR NOT EXCLUSION_PROGS_CACHE STREQUAL EXCLUSION_PROGS)
list(REMOVE_ITEM EXCLUSION_PROGS empty)
message("Generating command-list.h")
- execute_process(COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/generate-cmdlist.sh"
+ execute_process(COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/tools/generate-cmdlist.sh"
${EXCLUSION_PROGS}
"${CMAKE_SOURCE_DIR}"
"${CMAKE_BINARY_DIR}/command-list.h")
@@ -644,14 +648,14 @@ endif()
if(NOT EXISTS ${CMAKE_BINARY_DIR}/config-list.h)
message("Generating config-list.h")
- execute_process(COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/generate-configlist.sh"
+ execute_process(COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/tools/generate-configlist.sh"
"${CMAKE_SOURCE_DIR}"
"${CMAKE_BINARY_DIR}/config-list.h")
endif()
if(NOT EXISTS ${CMAKE_BINARY_DIR}/hook-list.h)
message("Generating hook-list.h")
- execute_process(COMMAND "${SH_EXE}" ${CMAKE_SOURCE_DIR}/generate-hooklist.sh
+ execute_process(COMMAND "${SH_EXE}" ${CMAKE_SOURCE_DIR}/tools/generate-hooklist.sh
"${CMAKE_SOURCE_DIR}"
"${CMAKE_BINARY_DIR}/hook-list.h")
endif()
@@ -832,11 +836,11 @@ foreach(script ${git_shell_scripts})
endif()
add_custom_command(OUTPUT "${CMAKE_BINARY_DIR}/${shell_gen_path}"
- COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/generate-script.sh"
+ COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/tools/generate-script.sh"
"${CMAKE_SOURCE_DIR}/${script}.sh"
"${CMAKE_BINARY_DIR}/${shell_gen_path}"
"${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS"
- DEPENDS "${CMAKE_SOURCE_DIR}/generate-script.sh"
+ DEPENDS "${CMAKE_SOURCE_DIR}/tools/generate-script.sh"
"${CMAKE_SOURCE_DIR}/${script}.sh"
VERBATIM)
list(APPEND shell_gen ${CMAKE_BINARY_DIR}/${shell_gen_path})
@@ -875,13 +879,13 @@ foreach(script ${git_perl_scripts} ${perl_modules})
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/${perl_gen_dir}")
add_custom_command(OUTPUT "${CMAKE_BINARY_DIR}/${perl_gen_path}"
- COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/generate-perl.sh"
+ COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/tools/generate-perl.sh"
"${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS"
"${CMAKE_BINARY_DIR}/GIT-VERSION-FILE"
"${CMAKE_BINARY_DIR}/GIT-PERL-HEADER"
"${CMAKE_SOURCE_DIR}/${script}"
"${CMAKE_BINARY_DIR}/${perl_gen_path}"
- DEPENDS "${CMAKE_SOURCE_DIR}/generate-perl.sh"
+ DEPENDS "${CMAKE_SOURCE_DIR}/tools/generate-perl.sh"
"${CMAKE_SOURCE_DIR}/${script}"
"${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS"
"${CMAKE_BINARY_DIR}/GIT-VERSION-FILE"
@@ -892,11 +896,11 @@ add_custom_target(perl-gen ALL DEPENDS ${perl_gen})
# Python script
add_custom_command(OUTPUT "${CMAKE_BINARY_DIR}/git-p4"
- COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/generate-python.sh"
+ COMMAND "${SH_EXE}" "${CMAKE_SOURCE_DIR}/tools/generate-python.sh"
"${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS"
"${CMAKE_SOURCE_DIR}/git-p4.py"
"${CMAKE_BINARY_DIR}/git-p4"
- DEPENDS "${CMAKE_SOURCE_DIR}/generate-python.sh"
+ DEPENDS "${CMAKE_SOURCE_DIR}/tools/generate-python.sh"
"${CMAKE_SOURCE_DIR}/git-p4.py"
"${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS"
VERBATIM)
diff --git a/contrib/diff-highlight/DiffHighlight.pm b/contrib/diff-highlight/DiffHighlight.pm
index 3d061bc0b7..abe457882e 100644
--- a/contrib/diff-highlight/DiffHighlight.pm
+++ b/contrib/diff-highlight/DiffHighlight.pm
@@ -1,6 +1,6 @@
package DiffHighlight;
-require v5.26;
+require v5.008;
use warnings FATAL => 'all';
use strict;
@@ -9,18 +9,11 @@ use File::Spec;
my $NULL = File::Spec->devnull();
-# Highlight by reversing foreground and background. You could do
-# other things like bold or underline if you prefer.
-my @OLD_HIGHLIGHT = (
- color_config('color.diff-highlight.oldnormal'),
- color_config('color.diff-highlight.oldhighlight', "\x1b[7m"),
- color_config('color.diff-highlight.oldreset', "\x1b[27m")
-);
-my @NEW_HIGHLIGHT = (
- color_config('color.diff-highlight.newnormal', $OLD_HIGHLIGHT[0]),
- color_config('color.diff-highlight.newhighlight', $OLD_HIGHLIGHT[1]),
- color_config('color.diff-highlight.newreset', $OLD_HIGHLIGHT[2])
-);
+# The color theme is initially set to nothing here to allow outside callers
+# to set the colors for their application. If nothing is sent in we use
+# colors from git config in load_color_config().
+our @OLD_HIGHLIGHT = ();
+our @NEW_HIGHLIGHT = ();
my $RESET = "\x1b[m";
my $COLOR = qr/\x1b\[[0-9;]*m/;
@@ -138,9 +131,21 @@ sub highlight_stdin {
# of it being used in other settings. Let's handle our own
# fallback, which means we will work even if git can't be run.
sub color_config {
+ our $cached_config;
my ($key, $default) = @_;
- my $s = `git config --get-color $key 2>$NULL`;
- return length($s) ? $s : $default;
+
+ if (!defined $cached_config) {
+ $cached_config = {};
+ my $data = `git config --type=color --get-regexp '^color\.diff-highlight\.' 2>$NULL`;
+ for my $line (split /\n/, $data) {
+ my ($key, $color) = split ' ', $line, 2;
+ $key =~ s/^color\.diff-highlight\.// or next;
+ $cached_config->{$key} = $color;
+ }
+ }
+
+ my $s = $cached_config->{$key};
+ return defined($s) ? $s : $default;
}
sub show_hunk {
@@ -170,6 +175,29 @@ sub show_hunk {
$line_cb->(@queue);
}
+sub load_color_config {
+ # If the colors were NOT set from outside this module we load them on-demand
+ # from the git config. Note that only one of elements 0 and 2 in each
+ # array is used (depending on whether you are doing set/unset on an
+ # attribute, or specifying normal vs highlighted coloring). So we use
+ # element 1 as our check for whether colors were passed in; it should
+ # always be set if you want highlighting to do anything.
+ if (!defined $OLD_HIGHLIGHT[1]) {
+ @OLD_HIGHLIGHT = (
+ color_config('oldnormal'),
+ color_config('oldhighlight', "\x1b[7m"),
+ color_config('oldreset', "\x1b[27m")
+ );
+ }
+ if (!defined $NEW_HIGHLIGHT[1]) {
+ @NEW_HIGHLIGHT = (
+ color_config('newnormal', $OLD_HIGHLIGHT[0]),
+ color_config('newhighlight', $OLD_HIGHLIGHT[1]),
+ color_config('newreset', $OLD_HIGHLIGHT[2])
+ );
+ };
+}
+
sub highlight_pair {
my @a = split_line(shift);
my @b = split_line(shift);
@@ -218,6 +246,7 @@ sub highlight_pair {
}
if (is_pair_interesting(\@a, $pa, $sa, \@b, $pb, $sb)) {
+ load_color_config();
return highlight_line(\@a, $pa, $sa, \@OLD_HIGHLIGHT),
highlight_line(\@b, $pb, $sb, \@NEW_HIGHLIGHT);
}
@@ -273,6 +302,18 @@ sub highlight_line {
# or suffix (disregarding boring bits like whitespace and colorization).
sub is_pair_interesting {
my ($a, $pa, $sa, $b, $pb, $sb) = @_;
+
+ # We hit this case if the prefix consumed the entire line, meaning
+ # that two lines are identical. This generally shouldn't happen,
+ # since it implies the diff isn't minimal (you could shrink the hunk by
+ # making this a context line). But you can see it when the line
+ # content is the same, but the trailing newline is dropped, like:
+ #
+ # -foo
+ # +foo
+ # \No newline at end of file
+ return 0 if $pa == @$a || $pb == @$b;
+
my $prefix_a = join('', @$a[0..($pa-1)]);
my $prefix_b = join('', @$b[0..($pb-1)]);
my $suffix_a = join('', @$a[($sa+1)..$#$a]);
diff --git a/contrib/diff-highlight/README b/contrib/diff-highlight/README
index 1db4440e68..ed8d876a18 100644
--- a/contrib/diff-highlight/README
+++ b/contrib/diff-highlight/README
@@ -39,10 +39,21 @@ visually distracting. Non-diff lines and existing diff coloration is
preserved; the intent is that the output should look exactly the same as
the input, except for the occasional highlight.
+Build/Install
+-------------
+
+You can build the `diff-highlight` script by running `make` from within
+the diff-highlight directory. There is no `make install` target; you can
+copy the built script to your $PATH.
+
+You can run diff-highlight's internal tests by running `make test`. Note
+that you must also build Git itself first (by running `make` from the
+top-level of the project).
+
Use
---
-You can try out the diff-highlight program with:
+You can try out the built diff-highlight program with:
---------------------------------------------
git log -p --color | /path/to/diff-highlight
@@ -127,6 +138,12 @@ Your script may set up one or more of the following variables:
processing a logical chunk of input). The default function flushes
stdout.
+ - @DiffHighlight::OLD_HIGHLIGHT and @DiffHighlight::NEW_HIGHLIGHT - these
+ arrays specify the normal, highlighted, and reset colors (in that order)
+ for old/new lines. If unset, values will be retrieved by calling `git
+ config` (see "Color Config" above). Note that these should be the literal
+ color bytes (starting with an ANSI escape code), not color names.
+
The script may then feed lines, one at a time, to DiffHighlight::handle_line().
When lines are done processing, they will be fed to $line_cb. Note that
DiffHighlight may queue up many input lines (to analyze a whole hunk)
diff --git a/contrib/diff-highlight/t/t9400-diff-highlight.sh b/contrib/diff-highlight/t/t9400-diff-highlight.sh
index dee296739c..b38fe2196a 100755
--- a/contrib/diff-highlight/t/t9400-diff-highlight.sh
+++ b/contrib/diff-highlight/t/t9400-diff-highlight.sh
@@ -7,9 +7,6 @@ TEST_OUTPUT_DIRECTORY=$(pwd)
TEST_DIRECTORY="$CURR_DIR"/../../../t
DIFF_HIGHLIGHT="$CURR_DIR"/../diff-highlight
-CW="$(printf "\033[7m")" # white
-CR="$(printf "\033[27m")" # reset
-
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. "$TEST_DIRECTORY"/test-lib.sh
@@ -41,8 +38,10 @@ dh_test () {
git show >commit.raw
} >/dev/null &&
- "$DIFF_HIGHLIGHT" <diff.raw | test_strip_patch_header >diff.act &&
- "$DIFF_HIGHLIGHT" <commit.raw | test_strip_patch_header >commit.act &&
+ "$DIFF_HIGHLIGHT" <diff.raw >diff.hi &&
+ test_strip_patch_header <diff.hi | test_decode_color >diff.act &&
+ "$DIFF_HIGHLIGHT" <commit.raw >commit.hi &&
+ test_strip_patch_header <commit.hi | test_decode_color >commit.act &&
test_cmp patch.exp diff.act &&
test_cmp patch.exp commit.act
}
@@ -124,8 +123,8 @@ test_expect_success 'diff-highlight highlights the beginning of a line' '
dh_test a b <<-EOF
@@ -1,3 +1,3 @@
aaa
- -${CW}b${CR}bb
- +${CW}0${CR}bb
+ -<REVERSE>b<NOREVERSE>bb
+ +<REVERSE>0<NOREVERSE>bb
ccc
EOF
'
@@ -146,8 +145,8 @@ test_expect_success 'diff-highlight highlights the end of a line' '
dh_test a b <<-EOF
@@ -1,3 +1,3 @@
aaa
- -bb${CW}b${CR}
- +bb${CW}0${CR}
+ -bb<REVERSE>b<NOREVERSE>
+ +bb<REVERSE>0<NOREVERSE>
ccc
EOF
'
@@ -168,8 +167,8 @@ test_expect_success 'diff-highlight highlights the middle of a line' '
dh_test a b <<-EOF
@@ -1,3 +1,3 @@
aaa
- -b${CW}b${CR}b
- +b${CW}0${CR}b
+ -b<REVERSE>b<NOREVERSE>b
+ +b<REVERSE>0<NOREVERSE>b
ccc
EOF
'
@@ -211,8 +210,8 @@ test_expect_failure 'diff-highlight highlights mismatched hunk size' '
dh_test a b <<-EOF
@@ -1,3 +1,3 @@
aaa
- -b${CW}b${CR}b
- +b${CW}0${CR}b
+ -b<REVERSE>b<NOREVERSE>b
+ +b<REVERSE>0<NOREVERSE>b
+ccc
EOF
'
@@ -230,8 +229,8 @@ test_expect_success 'diff-highlight treats multibyte utf-8 as a unit' '
echo "unic${o_stroke}de" >b &&
dh_test a b <<-EOF
@@ -1 +1 @@
- -unic${CW}${o_accent}${CR}de
- +unic${CW}${o_stroke}${CR}de
+ -unic<REVERSE>${o_accent}<NOREVERSE>de
+ +unic<REVERSE>${o_stroke}<NOREVERSE>de
EOF
'
@@ -248,8 +247,8 @@ test_expect_failure 'diff-highlight treats combining code points as a unit' '
echo "unico${combine_circum}de" >b &&
dh_test a b <<-EOF
@@ -1 +1 @@
- -unic${CW}o${combine_accent}${CR}de
- +unic${CW}o${combine_circum}${CR}de
+ -unic<REVERSE>o${combine_accent}<NOREVERSE>de
+ +unic<REVERSE>o${combine_circum}<NOREVERSE>de
EOF
'
@@ -331,13 +330,52 @@ test_expect_success 'diff-highlight handles --graph with leading dash' '
+++ b/file
@@ -1,3 +1,3 @@
before
- -the ${CW}old${CR} line
- +the ${CW}new${CR} line
+ -the <REVERSE>old<NOREVERSE> line
+ +the <REVERSE>new<NOREVERSE> line
-leading dash
EOF
git log --graph -p -1 | "$DIFF_HIGHLIGHT" >actual.raw &&
- trim_graph <actual.raw | sed -n "/^---/,\$p" >actual &&
+ trim_graph <actual.raw | sed -n "/^---/,\$p" | test_decode_color >actual &&
test_cmp expect actual
'
+test_expect_success 'highlight diff that removes final newline' '
+ printf "content\n" >a &&
+ printf "content" >b &&
+ dh_test a b <<-\EOF
+ @@ -1 +1 @@
+ -content
+ +content
+ \ No newline at end of file
+ EOF
+'
+
+test_expect_success 'configure set/reset colors' '
+ test_config color.diff-highlight.oldhighlight bold &&
+ test_config color.diff-highlight.oldreset nobold &&
+ test_config color.diff-highlight.newhighlight italic &&
+ test_config color.diff-highlight.newreset noitalic &&
+ echo "prefix a suffix" >a &&
+ echo "prefix b suffix" >b &&
+ dh_test a b <<-\EOF
+ @@ -1 +1 @@
+ -prefix <BOLD>a<NORMAL_INTENSITY> suffix
+ +prefix <ITALIC>b<NOITALIC> suffix
+ EOF
+'
+
+test_expect_success 'configure normal/highlight colors' '
+ test_config color.diff-highlight.oldnormal red &&
+ test_config color.diff-highlight.oldhighlight magenta &&
+ test_config color.diff-highlight.newnormal green &&
+ test_config color.diff-highlight.newhighlight yellow &&
+ echo "prefix a suffix" >a &&
+ echo "prefix b suffix" >b &&
+ dh_test a b <<-\EOF
+ @@ -1 +1 @@
+ <RED>-prefix <RESET><MAGENTA>a<RESET><RED> suffix<RESET>
+ <GREEN>+prefix <RESET><YELLOW>b<RESET><GREEN> suffix<RESET>
+ EOF
+'
+
test_done
diff --git a/contrib/meson.build b/contrib/meson.build
index a88c5dfe09..569c23ee76 100644
--- a/contrib/meson.build
+++ b/contrib/meson.build
@@ -2,5 +2,4 @@ foreach feature : get_option('contrib')
subdir(feature)
endforeach
-subdir('coccinelle')
subdir('credential')
diff --git a/contrib/subtree/meson.build b/contrib/subtree/meson.build
index 161435abeb..804c315894 100644
--- a/contrib/subtree/meson.build
+++ b/contrib/subtree/meson.build
@@ -3,7 +3,7 @@ git_subtree = custom_target(
output: 'git-subtree',
command: [
shell,
- meson.project_source_root() / 'generate-script.sh',
+ meson.project_source_root() / 'tools/generate-script.sh',
'@INPUT@',
'@OUTPUT@',
meson.project_build_root() / 'GIT-BUILD-OPTIONS',
diff --git a/diff.c b/diff.c
index e87847fa4b..397e38b41c 100644
--- a/diff.c
+++ b/diff.c
@@ -608,6 +608,52 @@ struct emit_callback {
struct strbuf *header;
};
+/*
+ * State for the line-range callback wrappers that sit between
+ * xdi_diff_outf() and fn_out_consume(). xdiff produces a normal,
+ * unfiltered diff; the wrappers intercept each hunk header and line,
+ * track post-image position, and forward only lines that fall within
+ * the requested ranges. Contiguous in-range lines are collected into
+ * range hunks and flushed with a synthetic @@ header so that
+ * fn_out_consume() sees well-formed unified-diff fragments.
+ *
+ * Removal lines ('-') cannot be classified by post-image position, so
+ * they are buffered in pending_rm until the next '+' or ' ' line
+ * reveals whether they precede an in-range line (flush into range hunk) or
+ * an out-of-range line (discard).
+ */
+struct line_range_callback {
+ xdiff_emit_line_fn orig_line_fn;
+ void *orig_cb_data;
+ const struct range_set *ranges; /* 0-based [start, end) */
+ unsigned int cur_range; /* index into the range_set */
+
+ /* Post/pre-image line counters (1-based, set from hunk headers) */
+ long lno_post;
+ long lno_pre;
+
+ /*
+ * Function name from most recent xdiff hunk header;
+ * size matches struct func_line.buf in xdiff/xemit.c.
+ */
+ char func[80];
+ long funclen;
+
+ /* Range hunk being accumulated for the current range */
+ struct strbuf rhunk;
+ long rhunk_old_begin, rhunk_old_count;
+ long rhunk_new_begin, rhunk_new_count;
+ int rhunk_active;
+ int rhunk_has_changes; /* any '+' or '-' lines? */
+
+ /* Removal lines not yet known to be in-range */
+ struct strbuf pending_rm;
+ int pending_rm_count;
+ long pending_rm_pre_begin; /* pre-image line of first pending */
+
+ int ret; /* latched error from orig_line_fn */
+};
+
static int count_lines(const char *data, int size)
{
int count, ch, completely_empty = 1, nl_just_seen = 0;
@@ -2493,6 +2539,188 @@ static int quick_consume(void *priv, char *line UNUSED, unsigned long len UNUSED
return 1;
}
+static void discard_pending_rm(struct line_range_callback *s)
+{
+ strbuf_reset(&s->pending_rm);
+ s->pending_rm_count = 0;
+}
+
+static void flush_rhunk(struct line_range_callback *s)
+{
+ struct strbuf hdr = STRBUF_INIT;
+ const char *p, *end;
+
+ if (!s->rhunk_active || s->ret)
+ return;
+
+ /* Drain any pending removal lines into the range hunk */
+ if (s->pending_rm_count) {
+ strbuf_addbuf(&s->rhunk, &s->pending_rm);
+ s->rhunk_old_count += s->pending_rm_count;
+ s->rhunk_has_changes = 1;
+ discard_pending_rm(s);
+ }
+
+ /*
+ * Suppress context-only hunks: they contain no actual changes
+ * and would just be noise. This can happen when the inflated
+ * ctxlen causes xdiff to emit context covering a range that
+ * has no changes in this commit.
+ */
+ if (!s->rhunk_has_changes) {
+ s->rhunk_active = 0;
+ strbuf_reset(&s->rhunk);
+ return;
+ }
+
+ strbuf_addf(&hdr, "@@ -%ld,%ld +%ld,%ld @@",
+ s->rhunk_old_begin, s->rhunk_old_count,
+ s->rhunk_new_begin, s->rhunk_new_count);
+ if (s->funclen > 0) {
+ strbuf_addch(&hdr, ' ');
+ strbuf_add(&hdr, s->func, s->funclen);
+ }
+ strbuf_addch(&hdr, '\n');
+
+ s->ret = s->orig_line_fn(s->orig_cb_data, hdr.buf, hdr.len);
+ strbuf_release(&hdr);
+
+ /*
+ * Replay buffered lines one at a time through fn_out_consume.
+ * The cast discards const because xdiff_emit_line_fn takes
+ * char *, though fn_out_consume does not modify the buffer.
+ */
+ p = s->rhunk.buf;
+ end = p + s->rhunk.len;
+ while (!s->ret && p < end) {
+ const char *eol = memchr(p, '\n', end - p);
+ unsigned long line_len = eol ? (unsigned long)(eol - p + 1)
+ : (unsigned long)(end - p);
+ s->ret = s->orig_line_fn(s->orig_cb_data, (char *)p, line_len);
+ p += line_len;
+ }
+
+ s->rhunk_active = 0;
+ strbuf_reset(&s->rhunk);
+}
+
+static void line_range_hunk_fn(void *data,
+ long old_begin, long old_nr UNUSED,
+ long new_begin, long new_nr UNUSED,
+ const char *func, long funclen)
+{
+ struct line_range_callback *s = data;
+
+ /*
+ * When count > 0, begin is 1-based. When count == 0, begin is
+ * adjusted down by 1 by xdl_emit_hunk_hdr(), but no lines of
+ * that type will arrive, so the value is unused.
+ *
+ * Any pending removal lines from the previous xdiff hunk are
+ * intentionally left in pending_rm: the line callback will
+ * flush or discard them when the next content line reveals
+ * whether the removals precede in-range content.
+ */
+ s->lno_post = new_begin;
+ s->lno_pre = old_begin;
+
+ if (funclen > 0) {
+ if (funclen > (long)sizeof(s->func))
+ funclen = sizeof(s->func);
+ memcpy(s->func, func, funclen);
+ }
+ s->funclen = funclen;
+}
+
+static int line_range_line_fn(void *priv, char *line, unsigned long len)
+{
+ struct line_range_callback *s = priv;
+ const struct range *cur;
+ long lno_0, cur_pre;
+
+ if (s->ret)
+ return s->ret;
+
+ if (line[0] == '-') {
+ if (!s->pending_rm_count)
+ s->pending_rm_pre_begin = s->lno_pre;
+ s->lno_pre++;
+ strbuf_add(&s->pending_rm, line, len);
+ s->pending_rm_count++;
+ return s->ret;
+ }
+
+ if (line[0] == '\\') {
+ if (s->pending_rm_count)
+ strbuf_add(&s->pending_rm, line, len);
+ else if (s->rhunk_active)
+ strbuf_add(&s->rhunk, line, len);
+ /* otherwise outside tracked range; drop silently */
+ return s->ret;
+ }
+
+ if (line[0] != '+' && line[0] != ' ')
+ BUG("unexpected diff line type '%c'", line[0]);
+
+ lno_0 = s->lno_post - 1;
+ cur_pre = s->lno_pre; /* save before advancing for context lines */
+ s->lno_post++;
+ if (line[0] == ' ')
+ s->lno_pre++;
+
+ /* Advance past ranges we've passed */
+ while (s->cur_range < s->ranges->nr &&
+ lno_0 >= s->ranges->ranges[s->cur_range].end) {
+ if (s->rhunk_active)
+ flush_rhunk(s);
+ discard_pending_rm(s);
+ s->cur_range++;
+ }
+
+ /* Past all ranges */
+ if (s->cur_range >= s->ranges->nr) {
+ discard_pending_rm(s);
+ return s->ret;
+ }
+
+ cur = &s->ranges->ranges[s->cur_range];
+
+ /* Before current range */
+ if (lno_0 < cur->start) {
+ discard_pending_rm(s);
+ return s->ret;
+ }
+
+ /* In range so start a new range hunk if needed */
+ if (!s->rhunk_active) {
+ s->rhunk_active = 1;
+ s->rhunk_has_changes = 0;
+ s->rhunk_new_begin = lno_0 + 1;
+ s->rhunk_old_begin = s->pending_rm_count
+ ? s->pending_rm_pre_begin : cur_pre;
+ s->rhunk_old_count = 0;
+ s->rhunk_new_count = 0;
+ strbuf_reset(&s->rhunk);
+ }
+
+ /* Flush pending removals into range hunk */
+ if (s->pending_rm_count) {
+ strbuf_addbuf(&s->rhunk, &s->pending_rm);
+ s->rhunk_old_count += s->pending_rm_count;
+ s->rhunk_has_changes = 1;
+ discard_pending_rm(s);
+ }
+
+ strbuf_add(&s->rhunk, line, len);
+ s->rhunk_new_count++;
+ if (line[0] == '+')
+ s->rhunk_has_changes = 1;
+ else
+ s->rhunk_old_count++;
+
+ return s->ret;
+}
+
static void pprint_rename(struct strbuf *name, const char *a, const char *b)
{
const char *old_name = a;
@@ -3592,7 +3820,8 @@ static void builtin_diff(const char *name_a,
const char *xfrm_msg,
int must_show_header,
struct diff_options *o,
- int complete_rewrite)
+ int complete_rewrite,
+ const struct range_set *line_ranges)
{
mmfile_t mf1, mf2;
const char *lbl[2];
@@ -3833,6 +4062,52 @@ static void builtin_diff(const char *name_a,
*/
xdi_diff_outf(&mf1, &mf2, NULL, quick_consume,
&ecbdata, &xpp, &xecfg);
+ } else if (line_ranges) {
+ struct line_range_callback lr_state;
+ unsigned int i;
+ long max_span = 0;
+
+ memset(&lr_state, 0, sizeof(lr_state));
+ lr_state.orig_line_fn = fn_out_consume;
+ lr_state.orig_cb_data = &ecbdata;
+ lr_state.ranges = line_ranges;
+ strbuf_init(&lr_state.rhunk, 0);
+ strbuf_init(&lr_state.pending_rm, 0);
+
+ /*
+ * Inflate ctxlen so that all changes within
+ * any single range are merged into one xdiff
+ * hunk and the inter-change context is emitted.
+ * The callback clips back to range boundaries.
+ *
+ * The optimal ctxlen depends on where changes
+ * fall within the range, which is only known
+ * after xdiff runs; the max range span is the
+ * upper bound that guarantees correctness in a
+ * single pass.
+ */
+ for (i = 0; i < line_ranges->nr; i++) {
+ long span = line_ranges->ranges[i].end -
+ line_ranges->ranges[i].start;
+ if (span > max_span)
+ max_span = span;
+ }
+ if (max_span > xecfg.ctxlen)
+ xecfg.ctxlen = max_span;
+
+ if (xdi_diff_outf(&mf1, &mf2,
+ line_range_hunk_fn,
+ line_range_line_fn,
+ &lr_state, &xpp, &xecfg))
+ die("unable to generate diff for %s",
+ one->path);
+
+ flush_rhunk(&lr_state);
+ if (lr_state.ret)
+ die("unable to generate diff for %s",
+ one->path);
+ strbuf_release(&lr_state.rhunk);
+ strbuf_release(&lr_state.pending_rm);
} else if (xdi_diff_outf(&mf1, &mf2, NULL, fn_out_consume,
&ecbdata, &xpp, &xecfg))
die("unable to generate diff for %s", one->path);
@@ -4674,7 +4949,7 @@ static void run_diff_cmd(const struct external_diff *pgm,
builtin_diff(name, other ? other : name,
one, two, xfrm_msg, must_show_header,
- o, complete_rewrite);
+ o, complete_rewrite, p->line_ranges);
if (p->status == DIFF_STATUS_COPIED ||
p->status == DIFF_STATUS_RENAMED)
o->found_changes = 1;
diff --git a/diffcore.h b/diffcore.h
index 9c0a0e7aaf..d75038d1b3 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -19,6 +19,17 @@ struct userdiff_driver;
* in anything else.
*/
+/* A range [start, end). Lines are numbered starting at 0. */
+struct range {
+ long start, end;
+};
+
+/* A set of ranges. The ranges must always be disjoint and sorted. */
+struct range_set {
+ unsigned int alloc, nr;
+ struct range *ranges;
+};
+
/* We internally use unsigned short as the score value,
* and rely on an int capable to hold 32-bits. -B can take
* -Bmerge_score/break_score format and the two scores are
@@ -106,6 +117,11 @@ int diff_filespec_is_binary(struct repository *, struct diff_filespec *);
struct diff_filepair {
struct diff_filespec *one;
struct diff_filespec *two;
+ /*
+ * Tracked line ranges for -L filtering; borrowed from
+ * line_log_data and must not be freed.
+ */
+ const struct range_set *line_ranges;
unsigned short int score;
char status; /* M C R A D U etc. (see Documentation/diff-format.adoc or DIFF_STATUS_* in diff.h) */
unsigned broken_pair : 1;
diff --git a/fetch-pack.c b/fetch-pack.c
index 6ecd468ef7..c8fa0a609a 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -51,7 +51,6 @@ static int server_supports_filtering;
static int advertise_sid;
static struct shallow_lock shallow_lock;
static const char *alternate_shallow_file;
-static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static struct strbuf fsck_msg_types = STRBUF_INIT;
static struct string_list uri_protocols = STRING_LIST_INIT_DUP;
@@ -145,7 +144,7 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
if (commit) {
if (mark_tags_complete_and_check_obj_db) {
if (!odb_has_object(the_repository->objects, oid,
- HAS_OBJECT_RECHECK_PACKED))
+ ODB_HAS_OBJECT_RECHECK_PACKED))
die_in_commit_graph_only(oid);
}
return commit;
@@ -1024,12 +1023,8 @@ static int get_pack(struct fetch_pack_args *args,
fsck_msg_types.buf);
}
- if (index_pack_args) {
- int i;
-
- for (i = 0; i < cmd.args.nr; i++)
- strvec_push(index_pack_args, cmd.args.v[i]);
- }
+ if (index_pack_args)
+ strvec_pushv(index_pack_args, cmd.args.v);
sigchain_push(SIGPIPE, SIG_IGN);
@@ -1100,6 +1095,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
struct shallow_info *si,
struct string_list *pack_lockfiles)
{
+ struct fsck_options fsck_options = { 0 };
struct repository *r = the_repository;
struct ref *ref = copy_ref_list(orig_ref);
struct object_id oid;
@@ -1228,6 +1224,8 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
alternate_shallow_file = setup_temporary_shallow(si->shallow);
} else
alternate_shallow_file = NULL;
+
+ fsck_options_init(&fsck_options, the_repository, FSCK_OPTIONS_MISSING_GITMODULES);
if (get_pack(args, fd, pack_lockfiles, NULL, sought, nr_sought,
&fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
@@ -1235,6 +1233,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
die("fsck failed");
all_done:
+ fsck_options_clear(&fsck_options);
if (negotiator)
negotiator->release(negotiator);
return ref;
@@ -1654,6 +1653,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct string_list *pack_lockfiles)
{
struct repository *r = the_repository;
+ struct fsck_options fsck_options;
struct ref *ref = copy_ref_list(orig_ref);
enum fetch_state state = FETCH_CHECK_LOCAL;
struct oidset common = OIDSET_INIT;
@@ -1671,6 +1671,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct strvec index_pack_args = STRVEC_INIT;
const char *promisor_remote_config;
+ fsck_options_init(&fsck_options, the_repository, FSCK_OPTIONS_MISSING_GITMODULES);
+
if (server_feature_v2("promisor-remote", &promisor_remote_config))
promisor_remote_reply(promisor_remote_config, NULL);
@@ -1882,6 +1884,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
if (negotiator)
negotiator->release(negotiator);
+ fsck_options_clear(&fsck_options);
oidset_clear(&common);
return ref;
}
@@ -2013,7 +2016,7 @@ static void update_shallow(struct fetch_pack_args *args,
struct object_id *oid = si->shallow->oid;
for (i = 0; i < si->shallow->nr; i++)
if (odb_has_object(the_repository->objects, &oid[i],
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
oid_array_append(&extra, &oid[i]);
if (extra.nr) {
setup_alternate_shallow(&shallow_lock,
diff --git a/fsck.c b/fsck.c
index 0f02cf8f77..b72200c352 100644
--- a/fsck.c
+++ b/fsck.c
@@ -1,5 +1,3 @@
-#define USE_THE_REPOSITORY_VARIABLE
-
#include "git-compat-util.h"
#include "date.h"
#include "dir.h"
@@ -207,7 +205,7 @@ void fsck_set_msg_types(struct fsck_options *options, const char *values)
if (equal == len)
die("skiplist requires a path");
oidset_parse_file(&options->skip_oids, buf + equal + 1,
- the_repository->hash_algo);
+ options->repo->hash_algo);
buf += len + 1;
continue;
}
@@ -360,7 +358,7 @@ static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *op
int res = 0;
const char *name;
- if (repo_parse_tree(the_repository, tree))
+ if (repo_parse_tree(options->repo, tree))
return -1;
name = fsck_get_object_name(options, &tree->object.oid);
@@ -375,14 +373,14 @@ static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *op
continue;
if (S_ISDIR(entry.mode)) {
- obj = (struct object *)lookup_tree(the_repository, &entry.oid);
+ obj = (struct object *)lookup_tree(options->repo, &entry.oid);
if (name && obj)
fsck_put_object_name(options, &entry.oid, "%s%s/",
name, entry.path);
result = options->walk(obj, OBJ_TREE, data, options);
}
else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode)) {
- obj = (struct object *)lookup_blob(the_repository, &entry.oid);
+ obj = (struct object *)lookup_blob(options->repo, &entry.oid);
if (name && obj)
fsck_put_object_name(options, &entry.oid, "%s%s",
name, entry.path);
@@ -409,7 +407,7 @@ static int fsck_walk_commit(struct commit *commit, void *data, struct fsck_optio
int result;
const char *name;
- if (repo_parse_commit(the_repository, commit))
+ if (repo_parse_commit(options->repo, commit))
return -1;
name = fsck_get_object_name(options, &commit->object.oid);
@@ -417,7 +415,7 @@ static int fsck_walk_commit(struct commit *commit, void *data, struct fsck_optio
fsck_put_object_name(options, get_commit_tree_oid(commit),
"%s:", name);
- result = options->walk((struct object *) repo_get_commit_tree(the_repository, commit),
+ result = options->walk((struct object *) repo_get_commit_tree(options->repo, commit),
OBJ_TREE, data, options);
if (result < 0)
return result;
@@ -474,7 +472,7 @@ static int fsck_walk_tag(struct tag *tag, void *data, struct fsck_options *optio
{
const char *name = fsck_get_object_name(options, &tag->object.oid);
- if (parse_tag(the_repository, tag))
+ if (parse_tag(options->repo, tag))
return -1;
if (name)
fsck_put_object_name(options, &tag->tagged->oid, "%s", name);
@@ -487,7 +485,7 @@ int fsck_walk(struct object *obj, void *data, struct fsck_options *options)
return -1;
if (obj->type == OBJ_NONE)
- parse_object(the_repository, &obj->oid);
+ parse_object(options->repo, &obj->oid);
switch (obj->type) {
case OBJ_BLOB:
@@ -970,14 +968,14 @@ static int fsck_commit(const struct object_id *oid,
if (buffer >= buffer_end || !skip_prefix(buffer, "tree ", &buffer))
return report(options, oid, OBJ_COMMIT, FSCK_MSG_MISSING_TREE, "invalid format - expected 'tree' line");
- if (parse_oid_hex(buffer, &tree_oid, &p) || *p != '\n') {
+ if (parse_oid_hex_algop(buffer, &tree_oid, &p, options->repo->hash_algo) || *p != '\n') {
err = report(options, oid, OBJ_COMMIT, FSCK_MSG_BAD_TREE_SHA1, "invalid 'tree' line format - bad sha1");
if (err)
return err;
}
buffer = p + 1;
while (buffer < buffer_end && skip_prefix(buffer, "parent ", &buffer)) {
- if (parse_oid_hex(buffer, &parent_oid, &p) || *p != '\n') {
+ if (parse_oid_hex_algop(buffer, &parent_oid, &p, options->repo->hash_algo) || *p != '\n') {
err = report(options, oid, OBJ_COMMIT, FSCK_MSG_BAD_PARENT_SHA1, "invalid 'parent' line format - bad sha1");
if (err)
return err;
@@ -1044,7 +1042,7 @@ int fsck_tag_standalone(const struct object_id *oid, const char *buffer,
ret = report(options, oid, OBJ_TAG, FSCK_MSG_MISSING_OBJECT, "invalid format - expected 'object' line");
goto done;
}
- if (parse_oid_hex(buffer, tagged_oid, &p) || *p != '\n') {
+ if (parse_oid_hex_algop(buffer, tagged_oid, &p, options->repo->hash_algo) || *p != '\n') {
ret = report(options, oid, OBJ_TAG, FSCK_MSG_BAD_OBJECT_SHA1, "invalid 'object' line format - bad sha1");
if (ret)
goto done;
@@ -1336,9 +1334,9 @@ static int fsck_blobs(struct oidset *blobs_found, struct oidset *blobs_done,
if (oidset_contains(blobs_done, oid))
continue;
- buf = odb_read_object(the_repository->objects, oid, &type, &size);
+ buf = odb_read_object(options->repo->objects, oid, &type, &size);
if (!buf) {
- if (is_promisor_object(the_repository, oid))
+ if (is_promisor_object(options->repo, oid))
continue;
ret |= report(options,
oid, OBJ_BLOB, msg_missing,
@@ -1380,6 +1378,54 @@ bool fsck_has_queued_checks(struct fsck_options *options)
!oidset_equal(&options->gitattributes_found, &options->gitattributes_done);
}
+void fsck_options_init(struct fsck_options *options,
+ struct repository *repo,
+ enum fsck_options_type type)
+{
+ static const struct fsck_options defaults[] = {
+ [FSCK_OPTIONS_DEFAULT] = {
+ .skip_oids = OIDSET_INIT,
+ .gitmodules_found = OIDSET_INIT,
+ .gitmodules_done = OIDSET_INIT,
+ .gitattributes_found = OIDSET_INIT,
+ .gitattributes_done = OIDSET_INIT,
+ .error_func = fsck_objects_error_function
+ },
+ [FSCK_OPTIONS_STRICT] = {
+ .strict = 1,
+ .gitmodules_found = OIDSET_INIT,
+ .gitmodules_done = OIDSET_INIT,
+ .gitattributes_found = OIDSET_INIT,
+ .gitattributes_done = OIDSET_INIT,
+ .error_func = fsck_objects_error_function,
+ },
+ [FSCK_OPTIONS_MISSING_GITMODULES] = {
+ .strict = 1,
+ .gitmodules_found = OIDSET_INIT,
+ .gitmodules_done = OIDSET_INIT,
+ .gitattributes_found = OIDSET_INIT,
+ .gitattributes_done = OIDSET_INIT,
+ .error_func = fsck_objects_error_cb_print_missing_gitmodules,
+ },
+ [FSCK_OPTIONS_REFS] = {
+ .error_func = fsck_refs_error_function,
+ },
+ };
+
+ switch (type) {
+ case FSCK_OPTIONS_DEFAULT:
+ case FSCK_OPTIONS_STRICT:
+ case FSCK_OPTIONS_MISSING_GITMODULES:
+ case FSCK_OPTIONS_REFS:
+ memcpy(options, &defaults[type], sizeof(*options));
+ break;
+ default:
+ BUG("unknown fsck options type %d", type);
+ }
+
+ options->repo = repo;
+}
+
void fsck_options_clear(struct fsck_options *options)
{
free(options->msg_type);
diff --git a/fsck.h b/fsck.h
index 65ecbb7fe1..e77935c8a9 100644
--- a/fsck.h
+++ b/fsck.h
@@ -166,7 +166,10 @@ struct fsck_ref_report {
const char *path;
};
+struct repository;
+
struct fsck_options {
+ struct repository *repo;
fsck_walk_func walk;
fsck_error error_func;
unsigned strict;
@@ -180,34 +183,6 @@ struct fsck_options {
kh_oid_map_t *object_names;
};
-#define FSCK_OPTIONS_DEFAULT { \
- .skip_oids = OIDSET_INIT, \
- .gitmodules_found = OIDSET_INIT, \
- .gitmodules_done = OIDSET_INIT, \
- .gitattributes_found = OIDSET_INIT, \
- .gitattributes_done = OIDSET_INIT, \
- .error_func = fsck_objects_error_function \
-}
-#define FSCK_OPTIONS_STRICT { \
- .strict = 1, \
- .gitmodules_found = OIDSET_INIT, \
- .gitmodules_done = OIDSET_INIT, \
- .gitattributes_found = OIDSET_INIT, \
- .gitattributes_done = OIDSET_INIT, \
- .error_func = fsck_objects_error_function, \
-}
-#define FSCK_OPTIONS_MISSING_GITMODULES { \
- .strict = 1, \
- .gitmodules_found = OIDSET_INIT, \
- .gitmodules_done = OIDSET_INIT, \
- .gitattributes_found = OIDSET_INIT, \
- .gitattributes_done = OIDSET_INIT, \
- .error_func = fsck_objects_error_cb_print_missing_gitmodules, \
-}
-#define FSCK_REFS_OPTIONS_DEFAULT { \
- .error_func = fsck_refs_error_function, \
-}
-
/* descend in all linked child objects
* the return value is:
* -1 error in processing the object
@@ -255,6 +230,17 @@ int fsck_finish(struct fsck_options *options);
*/
bool fsck_has_queued_checks(struct fsck_options *options);
+enum fsck_options_type {
+ FSCK_OPTIONS_DEFAULT,
+ FSCK_OPTIONS_STRICT,
+ FSCK_OPTIONS_MISSING_GITMODULES,
+ FSCK_OPTIONS_REFS,
+};
+
+void fsck_options_init(struct fsck_options *options,
+ struct repository *repo,
+ enum fsck_options_type type);
+
/*
* Clear the fsck_options struct, freeing any allocated memory.
*/
diff --git a/git-compat-util.h b/git-compat-util.h
index bebcf9f698..4b4ea2498f 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -34,10 +34,6 @@ struct strbuf;
# define DISABLE_WARNING(warning)
#endif
-#ifdef DISABLE_SIGN_COMPARE_WARNINGS
-DISABLE_WARNING(-Wsign-compare)
-#endif
-
#undef FLEX_ARRAY
#define FLEX_ARRAY /* empty - weather balloon to require C99 FAM */
@@ -1099,3 +1095,7 @@ extern int not_supposed_to_survive;
#endif /* CHECK_ASSERTION_SIDE_EFFECTS */
#endif
+
+#ifdef DISABLE_SIGN_COMPARE_WARNINGS
+DISABLE_WARNING(-Wsign-compare)
+#endif
diff --git a/git-curl-compat.h b/git-curl-compat.h
index 659e5a3875..dccdd4d6e5 100644
--- a/git-curl-compat.h
+++ b/git-curl-compat.h
@@ -38,6 +38,14 @@
#endif
/**
+ * CURLINFO_RETRY_AFTER was added in 7.66.0, released in September 2019.
+ * It allows curl to automatically parse Retry-After headers.
+ */
+#if LIBCURL_VERSION_NUM >= 0x074200
+#define GIT_CURL_HAVE_CURLINFO_RETRY_AFTER 1
+#endif
+
+/**
* CURLOPT_PROTOCOLS_STR and CURLOPT_REDIR_PROTOCOLS_STR were added in 7.85.0,
* released in August 2022.
*/
diff --git a/git.c b/git.c
index 2b212e6675..5a40eab8a2 100644
--- a/git.c
+++ b/git.c
@@ -877,8 +877,7 @@ static int run_argv(struct strvec *args)
commit_pager_choice();
strvec_push(&cmd.args, "git");
- for (size_t i = 0; i < args->nr; i++)
- strvec_push(&cmd.args, args->v[i]);
+ strvec_pushv(&cmd.args, args->v);
trace_argv_printf(cmd.args.v, "trace: exec:");
diff --git a/gpg-interface.c b/gpg-interface.c
index d517425034..dafd5371fa 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -1164,6 +1164,8 @@ int parse_sign_mode(const char *arg, enum sign_mode *mode, const char **keyid)
*mode = SIGN_WARN_STRIP;
} else if (!strcmp(arg, "strip")) {
*mode = SIGN_STRIP;
+ } else if (!strcmp(arg, "abort-if-invalid")) {
+ *mode = SIGN_ABORT_IF_INVALID;
} else if (!strcmp(arg, "strip-if-invalid")) {
*mode = SIGN_STRIP_IF_INVALID;
} else if (!strcmp(arg, "sign-if-invalid")) {
diff --git a/gpg-interface.h b/gpg-interface.h
index a365586ce1..3d95f5ec14 100644
--- a/gpg-interface.h
+++ b/gpg-interface.h
@@ -115,6 +115,7 @@ void print_signature_buffer(const struct signature_check *sigc,
/* Modes for --signed-tags=<mode> and --signed-commits=<mode> options. */
enum sign_mode {
SIGN_ABORT,
+ SIGN_ABORT_IF_INVALID,
SIGN_WARN_VERBATIM,
SIGN_VERBATIM,
SIGN_WARN_STRIP,
diff --git a/hash.c b/hash.c
index 553f2008ea..e925b9754e 100644
--- a/hash.c
+++ b/hash.c
@@ -317,3 +317,21 @@ const struct git_hash_algo *unsafe_hash_algo(const struct git_hash_algo *algop)
/* Otherwise use the default one. */
return algop;
}
+
+unsigned oid_common_prefix_hexlen(const struct object_id *a,
+ const struct object_id *b)
+{
+ unsigned rawsz = hash_algos[a->algo].rawsz;
+
+ for (unsigned i = 0; i < rawsz; i++) {
+ if (a->hash[i] == b->hash[i])
+ continue;
+
+ if ((a->hash[i] ^ b->hash[i]) & 0xf0)
+ return i * 2;
+ else
+ return i * 2 + 1;
+ }
+
+ return rawsz * 2;
+}
diff --git a/hash.h b/hash.h
index d51efce1d3..c082a53c9a 100644
--- a/hash.h
+++ b/hash.h
@@ -396,6 +396,9 @@ static inline int oideq(const struct object_id *oid1, const struct object_id *oi
return !memcmp(oid1->hash, oid2->hash, GIT_MAX_RAWSZ);
}
+unsigned oid_common_prefix_hexlen(const struct object_id *a,
+ const struct object_id *b);
+
static inline void oidcpy(struct object_id *dst, const struct object_id *src)
{
memcpy(dst->hash, src->hash, GIT_MAX_RAWSZ);
diff --git a/hook.c b/hook.c
index 2c8252b2c4..cc23276d27 100644
--- a/hook.c
+++ b/hook.c
@@ -1,16 +1,16 @@
#include "git-compat-util.h"
#include "abspath.h"
#include "advice.h"
+#include "config.h"
+#include "environment.h"
#include "gettext.h"
#include "hook.h"
-#include "path.h"
#include "parse.h"
+#include "path.h"
#include "run-command.h"
-#include "config.h"
+#include "setup.h"
#include "strbuf.h"
#include "strmap.h"
-#include "environment.h"
-#include "setup.h"
const char *find_hook(struct repository *r, const char *name)
{
@@ -52,34 +52,26 @@ const char *find_hook(struct repository *r, const char *name)
return path.buf;
}
-static void hook_clear(struct hook *h, cb_data_free_fn cb_data_free)
+void hook_free(void *p, const char *str UNUSED)
{
+ struct hook *h = p;
+
if (!h)
return;
- if (h->kind == HOOK_TRADITIONAL)
+ if (h->kind == HOOK_TRADITIONAL) {
free((void *)h->u.traditional.path);
- else if (h->kind == HOOK_CONFIGURED) {
+ } else if (h->kind == HOOK_CONFIGURED) {
free((void *)h->u.configured.friendly_name);
free((void *)h->u.configured.command);
}
- if (cb_data_free)
- cb_data_free(h->feed_pipe_cb_data);
+ if (h->data_free && h->feed_pipe_cb_data)
+ h->data_free(h->feed_pipe_cb_data);
free(h);
}
-void hook_list_clear(struct string_list *hooks, cb_data_free_fn cb_data_free)
-{
- struct string_list_item *item;
-
- for_each_string_list_item(item, hooks)
- hook_clear(item->util, cb_data_free);
-
- string_list_clear(hooks, 0);
-}
-
/* Helper to detect and add default "traditional" hooks from the hookdir. */
static void list_hooks_add_default(struct repository *r, const char *hookname,
struct string_list *hook_list,
@@ -91,7 +83,7 @@ static void list_hooks_add_default(struct repository *r, const char *hookname,
if (!hook_path)
return;
- h = xcalloc(1, sizeof(struct hook));
+ CALLOC_ARRAY(h, 1);
/*
* If the hook is to run in a specific dir, a relative path can
@@ -100,9 +92,15 @@ static void list_hooks_add_default(struct repository *r, const char *hookname,
if (options && options->dir)
hook_path = absolute_path(hook_path);
- /* Setup per-hook internal state cb data */
- if (options && options->feed_pipe_cb_data_alloc)
+ /*
+ * Setup per-hook internal state callback data.
+ * When provided, the alloc/free callbacks are always provided
+ * together, so use them to alloc/free the internal hook state.
+ */
+ if (options && options->feed_pipe_cb_data_alloc) {
h->feed_pipe_cb_data = options->feed_pipe_cb_data_alloc(options->feed_pipe_ctx);
+ h->data_free = options->feed_pipe_cb_data_free;
+ }
h->kind = HOOK_TRADITIONAL;
h->u.traditional.path = xstrdup(hook_path);
@@ -110,19 +108,21 @@ static void list_hooks_add_default(struct repository *r, const char *hookname,
string_list_append(hook_list, hook_path)->util = h;
}
-static void unsorted_string_list_remove(struct string_list *list,
- const char *str)
-{
- struct string_list_item *item = unsorted_string_list_lookup(list, str);
- if (item)
- unsorted_string_list_delete_item(list, item - list->items, 0);
-}
+/*
+ * Cache entry stored as the .util pointer of string_list items inside the
+ * hook config cache.
+ */
+struct hook_config_cache_entry {
+ char *command;
+ enum config_scope scope;
+ bool disabled;
+};
/*
* Callback struct to collect all hook.* keys in a single config pass.
* commands: friendly-name to command map.
* event_hooks: event-name to list of friendly-names map.
- * disabled_hooks: set of friendly-names with hook.name.enabled = false.
+ * disabled_hooks: set of friendly-names with hook.<friendly-name>.enabled = false.
*/
struct hook_all_config_cb {
struct strmap commands;
@@ -132,7 +132,7 @@ struct hook_all_config_cb {
/* repo_config() callback that collects all hook.* configuration in one pass. */
static int hook_config_lookup_all(const char *key, const char *value,
- const struct config_context *ctx UNUSED,
+ const struct config_context *ctx,
void *cb_data)
{
struct hook_all_config_cb *data = cb_data;
@@ -156,20 +156,32 @@ static int hook_config_lookup_all(const char *key, const char *value,
struct strmap_entry *e;
strmap_for_each_entry(&data->event_hooks, &iter, e)
- unsorted_string_list_remove(e->value, hook_name);
+ unsorted_string_list_remove(e->value, hook_name, 0);
} else {
struct string_list *hooks =
strmap_get(&data->event_hooks, value);
if (!hooks) {
- hooks = xcalloc(1, sizeof(*hooks));
+ CALLOC_ARRAY(hooks, 1);
string_list_init_dup(hooks);
strmap_put(&data->event_hooks, value, hooks);
}
/* Re-insert if necessary to preserve last-seen order. */
- unsorted_string_list_remove(hooks, hook_name);
- string_list_append(hooks, hook_name);
+ unsorted_string_list_remove(hooks, hook_name, 0);
+
+ if (!ctx->kvi)
+ BUG("hook config callback called without key-value info");
+
+ /*
+ * Stash the config scope in the util pointer for
+ * later retrieval in build_hook_config_map(). This
+ * intermediate struct is transient and never leaves
+ * that function, so we pack the enum value into the
+ * pointer rather than heap-allocating a wrapper.
+ */
+ string_list_append(hooks, hook_name)->util =
+ (void *)(uintptr_t)ctx->kvi->scope;
}
} else if (!strcmp(subkey, "command")) {
/* Store command overwriting the old value */
@@ -186,7 +198,7 @@ static int hook_config_lookup_all(const char *key, const char *value,
break;
case 1: /* enabled: undo a prior disabled entry */
unsorted_string_list_remove(&data->disabled_hooks,
- hook_name);
+ hook_name, 0);
break;
default:
break; /* ignore unrecognised values */
@@ -202,8 +214,10 @@ static int hook_config_lookup_all(const char *key, const char *value,
* every item's string is the hook's friendly-name and its util pointer is
* the corresponding command string. Both strings are owned by the map.
*
- * Disabled hooks and hooks missing a command are already filtered out at
- * parse time, so callers can iterate the list directly.
+ * Disabled hooks are kept in the cache with entry->disabled set, so that
+ * "git hook list" can display them. A non-disabled hook missing a command
+ * is fatal; a disabled hook missing a command emits a warning and is kept
+ * in the cache with entry->command = NULL.
*/
void hook_cache_clear(struct strmap *cache)
{
@@ -212,7 +226,12 @@ void hook_cache_clear(struct strmap *cache)
strmap_for_each_entry(cache, &iter, e) {
struct string_list *hooks = e->value;
- string_list_clear(hooks, 1); /* free util (command) pointers */
+ for (size_t i = 0; i < hooks->nr; i++) {
+ struct hook_config_cache_entry *entry = hooks->items[i].util;
+ free(entry->command);
+ free(entry);
+ }
+ string_list_clear(hooks, 0);
free(hooks);
}
strmap_clear(cache, 0);
@@ -235,28 +254,39 @@ static void build_hook_config_map(struct repository *r, struct strmap *cache)
/* Construct the cache from parsed configs. */
strmap_for_each_entry(&cb_data.event_hooks, &iter, e) {
struct string_list *hook_names = e->value;
- struct string_list *hooks = xcalloc(1, sizeof(*hooks));
+ struct string_list *hooks;
+ CALLOC_ARRAY(hooks, 1);
string_list_init_dup(hooks);
for (size_t i = 0; i < hook_names->nr; i++) {
const char *hname = hook_names->items[i].string;
+ enum config_scope scope =
+ (enum config_scope)(uintptr_t)hook_names->items[i].util;
+ struct hook_config_cache_entry *entry;
char *command;
- /* filter out disabled hooks */
- if (unsorted_string_list_lookup(&cb_data.disabled_hooks,
- hname))
- continue;
+ bool is_disabled =
+ !!unsorted_string_list_lookup(
+ &cb_data.disabled_hooks, hname);
command = strmap_get(&cb_data.commands, hname);
- if (!command)
- die(_("'hook.%s.command' must be configured or "
- "'hook.%s.event' must be removed;"
- " aborting."), hname, hname);
+ if (!command) {
+ if (is_disabled)
+ warning(_("disabled hook '%s' has no "
+ "command configured"), hname);
+ else
+ die(_("'hook.%s.command' must be configured or "
+ "'hook.%s.event' must be removed;"
+ " aborting."), hname, hname);
+ }
- /* util stores the command; owned by the cache. */
- string_list_append(hooks, hname)->util =
- xstrdup(command);
+ /* util stores a cache entry; owned by the cache. */
+ CALLOC_ARRAY(entry, 1);
+ entry->command = xstrdup_or_null(command);
+ entry->scope = scope;
+ entry->disabled = is_disabled;
+ string_list_append(hooks, hname)->util = entry;
}
strmap_put(cache, e->key, hooks);
@@ -289,7 +319,7 @@ static struct strmap *get_hook_config_cache(struct repository *r)
* it just once on the first call.
*/
if (!r->hook_config_cache) {
- r->hook_config_cache = xcalloc(1, sizeof(*cache));
+ CALLOC_ARRAY(r->hook_config_cache, 1);
strmap_init(r->hook_config_cache);
build_hook_config_map(r, r->hook_config_cache);
}
@@ -297,9 +327,9 @@ static struct strmap *get_hook_config_cache(struct repository *r)
} else {
/*
* Out-of-repo calls (no gitdir) allocate and return a temporary
- * map cache which gets free'd immediately by the caller.
+ * cache which gets freed immediately by the caller.
*/
- cache = xcalloc(1, sizeof(*cache));
+ CALLOC_ARRAY(cache, 1);
strmap_init(cache);
build_hook_config_map(r, cache);
}
@@ -318,17 +348,28 @@ static void list_hooks_add_configured(struct repository *r,
/* Iterate through configured hooks and initialize internal states */
for (size_t i = 0; configured_hooks && i < configured_hooks->nr; i++) {
const char *friendly_name = configured_hooks->items[i].string;
- const char *command = configured_hooks->items[i].util;
- struct hook *hook = xcalloc(1, sizeof(struct hook));
+ struct hook_config_cache_entry *entry = configured_hooks->items[i].util;
+ struct hook *hook;
- if (options && options->feed_pipe_cb_data_alloc)
+ CALLOC_ARRAY(hook, 1);
+
+ /*
+ * When provided, the alloc/free callbacks are always provided
+ * together, so use them to alloc/free the internal hook state.
+ */
+ if (options && options->feed_pipe_cb_data_alloc) {
hook->feed_pipe_cb_data =
options->feed_pipe_cb_data_alloc(
options->feed_pipe_ctx);
+ hook->data_free = options->feed_pipe_cb_data_free;
+ }
hook->kind = HOOK_CONFIGURED;
hook->u.configured.friendly_name = xstrdup(friendly_name);
- hook->u.configured.command = xstrdup(command);
+ hook->u.configured.command =
+ entry->command ? xstrdup(entry->command) : NULL;
+ hook->u.configured.scope = entry->scope;
+ hook->u.configured.disabled = entry->disabled;
string_list_append(list, friendly_name)->util = hook;
}
@@ -351,7 +392,7 @@ struct string_list *list_hooks(struct repository *r, const char *hookname,
if (!hookname)
BUG("null hookname was provided to hook_list()!");
- hook_head = xmalloc(sizeof(struct string_list));
+ CALLOC_ARRAY(hook_head, 1);
string_list_init_dup(hook_head);
/* Add hooks from the config, e.g. hook.myhook.event = pre-commit */
@@ -366,8 +407,17 @@ struct string_list *list_hooks(struct repository *r, const char *hookname,
int hook_exists(struct repository *r, const char *name)
{
struct string_list *hooks = list_hooks(r, name, NULL);
- int exists = hooks->nr > 0;
- hook_list_clear(hooks, NULL);
+ int exists = 0;
+
+ for (size_t i = 0; i < hooks->nr; i++) {
+ struct hook *h = hooks->items[i].util;
+ if (h->kind == HOOK_TRADITIONAL ||
+ !h->u.configured.disabled) {
+ exists = 1;
+ break;
+ }
+ }
+ string_list_clear_func(hooks, hook_free);
free(hooks);
return exists;
}
@@ -381,10 +431,11 @@ static int pick_next_hook(struct child_process *cp,
struct string_list *hook_list = hook_cb->hook_command_list;
struct hook *h;
- if (hook_cb->hook_to_run_index >= hook_list->nr)
- return 0;
-
- h = hook_list->items[hook_cb->hook_to_run_index++].util;
+ do {
+ if (hook_cb->hook_to_run_index >= hook_list->nr)
+ return 0;
+ h = hook_list->items[hook_cb->hook_to_run_index++].util;
+ } while (h->kind == HOOK_CONFIGURED && h->u.configured.disabled);
cp->no_stdin = 1;
strvec_pushv(&cp->env, hook_cb->options->env.v);
@@ -414,7 +465,11 @@ static int pick_next_hook(struct child_process *cp,
} else if (h->kind == HOOK_CONFIGURED) {
/* to enable oneliners, let config-specified hooks run in shell. */
cp->use_shell = true;
+ if (!h->u.configured.command)
+ BUG("non-disabled HOOK_CONFIGURED hook has no command");
strvec_push(&cp->args, h->u.configured.command);
+ } else {
+ BUG("unknown hook kind");
}
if (!cp->args.nr)
@@ -501,8 +556,7 @@ int run_hooks_opt(struct repository *r, const char *hook_name,
* Ensure cb_data copy and free functions are either provided together,
* or neither one is provided.
*/
- if ((options->feed_pipe_cb_data_alloc && !options->feed_pipe_cb_data_free) ||
- (!options->feed_pipe_cb_data_alloc && options->feed_pipe_cb_data_free))
+ if (!options->feed_pipe_cb_data_alloc != !options->feed_pipe_cb_data_free)
BUG("feed_pipe_cb_data_alloc and feed_pipe_cb_data_free must be set together");
if (options->invoked_hook)
@@ -518,7 +572,7 @@ int run_hooks_opt(struct repository *r, const char *hook_name,
run_processes_parallel(&opts);
ret = cb_data.rc;
cleanup:
- hook_list_clear(cb_data.hook_command_list, options->feed_pipe_cb_data_free);
+ string_list_clear_func(cb_data.hook_command_list, hook_free);
free(cb_data.hook_command_list);
run_hooks_opt_clear(options);
return ret;
diff --git a/hook.h b/hook.h
index e949f5d488..5c5628dd1f 100644
--- a/hook.h
+++ b/hook.h
@@ -1,17 +1,21 @@
#ifndef HOOK_H
#define HOOK_H
-#include "strvec.h"
+#include "config.h"
#include "run-command.h"
#include "string-list.h"
#include "strmap.h"
+#include "strvec.h"
struct repository;
+typedef void (*hook_data_free_fn)(void *data);
+typedef void *(*hook_data_alloc_fn)(void *init_ctx);
+
/**
* Represents a hook command to be run.
* Hooks can be:
* 1. "traditional" (found in the hooks directory)
- * 2. "configured" (defined in Git's configuration via hook.<name>.event).
+ * 2. "configured" (defined in Git's configuration via hook.<friendly-name>.event).
* The 'kind' field determines which part of the union 'u' is valid.
*/
struct hook {
@@ -26,6 +30,8 @@ struct hook {
struct {
const char *friendly_name;
const char *command;
+ enum config_scope scope;
+ bool disabled;
} configured;
} u;
@@ -41,13 +47,17 @@ struct hook {
* Only useful when using `run_hooks_opt.feed_pipe`, otherwise ignore it.
*/
void *feed_pipe_cb_data;
-};
-typedef void (*cb_data_free_fn)(void *data);
-typedef void *(*cb_data_alloc_fn)(void *init_ctx);
+ /**
+ * Callback to free `feed_pipe_cb_data`.
+ *
+ * It is called automatically and points to the `feed_pipe_cb_data_free`
+ * provided via the `run_hook_opt` parameter.
+ */
+ hook_data_free_fn data_free;
+};
-struct run_hooks_opt
-{
+struct run_hooks_opt {
/* Environment vars to be set for each hook */
struct strvec env;
@@ -132,14 +142,14 @@ struct run_hooks_opt
*
* The `feed_pipe_ctx` pointer can be used to pass initialization data.
*/
- cb_data_alloc_fn feed_pipe_cb_data_alloc;
+ hook_data_alloc_fn feed_pipe_cb_data_alloc;
/**
* Called to free the memory initialized by `feed_pipe_cb_data_alloc`.
*
* Must always be provided when `feed_pipe_cb_data_alloc` is provided.
*/
- cb_data_free_fn feed_pipe_cb_data_free;
+ hook_data_free_fn feed_pipe_cb_data_free;
};
#define RUN_HOOKS_OPT_INIT { \
@@ -186,10 +196,10 @@ struct string_list *list_hooks(struct repository *r, const char *hookname,
struct run_hooks_opt *options);
/**
- * Frees the memory allocated for the hook list, including the `struct hook`
- * items and their internal state.
+ * Frees a struct hook stored as the util pointer of a string_list_item.
+ * Suitable for use as a string_list_clear_func_t callback.
*/
-void hook_list_clear(struct string_list *hooks, cb_data_free_fn cb_data_free);
+void hook_free(void *p, const char *str);
/**
* Frees the hook configuration cache stored in `struct repository`.
diff --git a/http-push.c b/http-push.c
index 9ae6062198..06c3acbb5d 100644
--- a/http-push.c
+++ b/http-push.c
@@ -1449,7 +1449,7 @@ static void one_remote_ref(const char *refname)
*/
if (repo->can_update_info_refs &&
!odb_has_object(the_repository->objects, &ref->old_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
obj = lookup_unknown_object(the_repository, &ref->old_oid);
fprintf(stderr, " fetch %s for %s\n",
oid_to_hex(&ref->old_oid), refname);
@@ -1655,7 +1655,7 @@ static int delete_remote_branch(const char *pattern, int force)
if (is_null_oid(&head_oid))
return error("Unable to resolve remote HEAD");
if (!odb_has_object(the_repository->objects, &head_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", oid_to_hex(&head_oid));
/* Remote branch must resolve to a known object */
@@ -1663,7 +1663,7 @@ static int delete_remote_branch(const char *pattern, int force)
return error("Unable to resolve remote branch %s",
remote_ref->name);
if (!odb_has_object(the_repository->objects, &remote_ref->old_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, oid_to_hex(&remote_ref->old_oid));
/* Remote branch must be an ancestor of remote HEAD */
@@ -1886,7 +1886,7 @@ int cmd_main(int argc, const char **argv)
!is_null_oid(&ref->old_oid) &&
!ref->force) {
if (!odb_has_object(the_repository->objects, &ref->old_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) ||
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR) ||
!ref_newer(&ref->peer_ref->new_oid,
&ref->old_oid)) {
/*
diff --git a/http-walker.c b/http-walker.c
index e886e64866..1b6d496548 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -139,7 +139,7 @@ static int fill_active_slot(void *data UNUSED)
obj_req = list_entry(pos, struct object_request, node);
if (obj_req->state == WAITING) {
if (odb_has_object(the_repository->objects, &obj_req->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
obj_req->state = COMPLETE;
else {
start_object_request(obj_req);
@@ -495,7 +495,7 @@ static int fetch_object(struct walker *walker, const struct object_id *oid)
return error("Couldn't find request for %s in the queue", hex);
if (odb_has_object(the_repository->objects, &obj_req->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
if (obj_req->req)
abort_http_object_request(&obj_req->req);
abort_object_request(obj_req);
diff --git a/http.c b/http.c
index 8ea1b9d1f6..d8d016891b 100644
--- a/http.c
+++ b/http.c
@@ -22,6 +22,8 @@
#include "object-file.h"
#include "odb.h"
#include "tempfile.h"
+#include "date.h"
+#include "trace2.h"
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
static int trace_curl_data = 1;
@@ -149,6 +151,11 @@ static char *cached_accept_language;
static char *http_ssl_backend;
static int http_schannel_check_revoke = 1;
+
+static long http_retry_after = 0;
+static long http_max_retries = 0;
+static long http_max_retry_time = 300;
+
/*
* With the backend being set to `schannel`, setting sslCAinfo would override
* the Certificate Store in cURL v7.60.0 and later, which is not what we want
@@ -209,7 +216,7 @@ static inline int is_hdr_continuation(const char *ptr, const size_t size)
return size && (*ptr == ' ' || *ptr == '\t');
}
-static size_t fwrite_wwwauth(char *ptr, size_t eltsize, size_t nmemb, void *p UNUSED)
+static size_t fwrite_wwwauth(char *ptr, size_t eltsize, size_t nmemb, void *p MAYBE_UNUSED)
{
size_t size = eltsize * nmemb;
struct strvec *values = &http_auth.wwwauth_headers;
@@ -575,6 +582,21 @@ static int http_options(const char *var, const char *value,
return 0;
}
+ if (!strcmp("http.retryafter", var)) {
+ http_retry_after = git_config_int(var, value, ctx->kvi);
+ return 0;
+ }
+
+ if (!strcmp("http.maxretries", var)) {
+ http_max_retries = git_config_int(var, value, ctx->kvi);
+ return 0;
+ }
+
+ if (!strcmp("http.maxretrytime", var)) {
+ http_max_retry_time = git_config_int(var, value, ctx->kvi);
+ return 0;
+ }
+
/* Fall back on the default ones */
return git_default_config(var, value, ctx, data);
}
@@ -1422,6 +1444,10 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
set_long_from_env(&curl_tcp_keepintvl, "GIT_TCP_KEEPINTVL");
set_long_from_env(&curl_tcp_keepcnt, "GIT_TCP_KEEPCNT");
+ set_long_from_env(&http_retry_after, "GIT_HTTP_RETRY_AFTER");
+ set_long_from_env(&http_max_retries, "GIT_HTTP_MAX_RETRIES");
+ set_long_from_env(&http_max_retry_time, "GIT_HTTP_MAX_RETRY_TIME");
+
curl_default = get_curl_handle();
}
@@ -1871,6 +1897,10 @@ static int handle_curl_result(struct slot_results *results)
}
return HTTP_REAUTH;
}
+ } else if (results->http_code == 429) {
+ trace2_data_intmax("http", the_repository, "http/429-retry-after",
+ results->retry_after);
+ return HTTP_RATE_LIMITED;
} else {
if (results->http_connectcode == 407)
credential_reject(the_repository, &proxy_auth);
@@ -1886,6 +1916,7 @@ int run_one_slot(struct active_request_slot *slot,
struct slot_results *results)
{
slot->results = results;
+
if (!start_active_slot(slot)) {
xsnprintf(curl_errorstr, sizeof(curl_errorstr),
"failed to start HTTP request");
@@ -2119,10 +2150,10 @@ static void http_opt_request_remainder(CURL *curl, off_t pos)
static int http_request(const char *url,
void *result, int target,
- const struct http_get_options *options)
+ struct http_get_options *options)
{
struct active_request_slot *slot;
- struct slot_results results;
+ struct slot_results results = { .retry_after = -1 };
struct curl_slist *headers = http_copy_default_headers();
struct strbuf buf = STRBUF_INIT;
const char *accept_language;
@@ -2156,22 +2187,19 @@ static int http_request(const char *url,
headers = curl_slist_append(headers, accept_language);
strbuf_addstr(&buf, "Pragma:");
- if (options && options->no_cache)
+ if (options->no_cache)
strbuf_addstr(&buf, " no-cache");
- if (options && options->initial_request &&
+ if (options->initial_request &&
http_follow_config == HTTP_FOLLOW_INITIAL)
curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 1L);
headers = curl_slist_append(headers, buf.buf);
/* Add additional headers here */
- if (options && options->extra_headers) {
+ if (options->extra_headers) {
const struct string_list_item *item;
- if (options && options->extra_headers) {
- for_each_string_list_item(item, options->extra_headers) {
- headers = curl_slist_append(headers, item->string);
- }
- }
+ for_each_string_list_item(item, options->extra_headers)
+ headers = curl_slist_append(headers, item->string);
}
headers = http_append_auth_header(&http_auth, headers);
@@ -2183,7 +2211,18 @@ static int http_request(const char *url,
ret = run_one_slot(slot, &results);
- if (options && options->content_type) {
+#ifdef GIT_CURL_HAVE_CURLINFO_RETRY_AFTER
+ if (ret == HTTP_RATE_LIMITED) {
+ curl_off_t retry_after;
+ if (curl_easy_getinfo(slot->curl, CURLINFO_RETRY_AFTER,
+ &retry_after) == CURLE_OK && retry_after > 0)
+ results.retry_after = (long)retry_after;
+ }
+#endif
+
+ options->retry_after = results.retry_after;
+
+ if (options->content_type) {
struct strbuf raw = STRBUF_INIT;
curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE, &raw);
extract_content_type(&raw, options->content_type,
@@ -2191,7 +2230,7 @@ static int http_request(const char *url,
strbuf_release(&raw);
}
- if (options && options->effective_url)
+ if (options->effective_url)
curlinfo_strbuf(slot->curl, CURLINFO_EFFECTIVE_URL,
options->effective_url);
@@ -2253,22 +2292,66 @@ static int update_url_from_redirect(struct strbuf *base,
return 1;
}
-static int http_request_reauth(const char *url,
+/*
+ * Compute the retry delay for an HTTP 429 response.
+ * Returns a negative value if configuration is invalid (delay exceeds
+ * http.maxRetryTime), otherwise returns the delay in seconds (>= 0).
+ */
+static long handle_rate_limit_retry(long slot_retry_after)
+{
+ /* Use the slot-specific retry_after value or configured default */
+ if (slot_retry_after >= 0) {
+ /* Check if retry delay exceeds maximum allowed */
+ if (slot_retry_after > http_max_retry_time) {
+ error(_("response requested a delay greater than http.maxRetryTime (%ld > %ld seconds)"),
+ slot_retry_after, http_max_retry_time);
+ trace2_data_string("http", the_repository,
+ "http/429-error", "exceeds-max-retry-time");
+ trace2_data_intmax("http", the_repository,
+ "http/429-requested-delay", slot_retry_after);
+ return -1;
+ }
+ return slot_retry_after;
+ } else {
+ /* No Retry-After header provided, use configured default */
+ if (http_retry_after > http_max_retry_time) {
+ error(_("configured http.retryAfter exceeds http.maxRetryTime (%ld > %ld seconds)"),
+ http_retry_after, http_max_retry_time);
+ trace2_data_string("http", the_repository,
+ "http/429-error", "config-exceeds-max-retry-time");
+ return -1;
+ }
+ trace2_data_string("http", the_repository,
+ "http/429-retry-source", "config-default");
+ return http_retry_after;
+ }
+}
+
+static int http_request_recoverable(const char *url,
void *result, int target,
struct http_get_options *options)
{
+ static struct http_get_options empty_opts;
int i = 3;
int ret;
+ int rate_limit_retries = http_max_retries;
+
+ if (!options)
+ options = &empty_opts;
if (always_auth_proactively())
credential_fill(the_repository, &http_auth, 1);
ret = http_request(url, result, target, options);
- if (ret != HTTP_OK && ret != HTTP_REAUTH)
+ if (ret != HTTP_OK && ret != HTTP_REAUTH && ret != HTTP_RATE_LIMITED)
return ret;
- if (options && options->effective_url && options->base_url) {
+ /* If retries are disabled and we got a 429, fail immediately */
+ if (ret == HTTP_RATE_LIMITED && !http_max_retries)
+ return HTTP_ERROR;
+
+ if (options->effective_url && options->base_url) {
if (update_url_from_redirect(options->base_url,
url, options->effective_url)) {
credential_from_url(&http_auth, options->base_url->buf);
@@ -2276,7 +2359,9 @@ static int http_request_reauth(const char *url,
}
}
- while (ret == HTTP_REAUTH && --i) {
+ while ((ret == HTTP_REAUTH && --i) ||
+ (ret == HTTP_RATE_LIMITED && --rate_limit_retries)) {
+ long retry_delay = -1;
/*
* The previous request may have put cruft into our output stream; we
* should clear it out before making our next request.
@@ -2301,11 +2386,28 @@ static int http_request_reauth(const char *url,
default:
BUG("Unknown http_request target");
}
+ if (ret == HTTP_RATE_LIMITED) {
+ retry_delay = handle_rate_limit_retry(options->retry_after);
+ if (retry_delay < 0)
+ return HTTP_ERROR;
- credential_fill(the_repository, &http_auth, 1);
+ if (retry_delay > 0) {
+ warning(_("rate limited, waiting %ld seconds before retry"), retry_delay);
+ trace2_data_intmax("http", the_repository,
+ "http/retry-sleep-seconds", retry_delay);
+ sleep(retry_delay);
+ }
+ } else if (ret == HTTP_REAUTH) {
+ credential_fill(the_repository, &http_auth, 1);
+ }
ret = http_request(url, result, target, options);
}
+ if (ret == HTTP_RATE_LIMITED) {
+ trace2_data_string("http", the_repository,
+ "http/429-error", "retries-exhausted");
+ return HTTP_RATE_LIMITED;
+ }
return ret;
}
@@ -2313,7 +2415,7 @@ int http_get_strbuf(const char *url,
struct strbuf *result,
struct http_get_options *options)
{
- return http_request_reauth(url, result, HTTP_REQUEST_STRBUF, options);
+ return http_request_recoverable(url, result, HTTP_REQUEST_STRBUF, options);
}
/*
@@ -2337,7 +2439,7 @@ int http_get_file(const char *url, const char *filename,
goto cleanup;
}
- ret = http_request_reauth(url, result, HTTP_REQUEST_FILE, options);
+ ret = http_request_recoverable(url, result, HTTP_REQUEST_FILE, options);
fclose(result);
if (ret == HTTP_OK && finalize_object_file(the_repository, tmpfile.buf, filename))
diff --git a/http.h b/http.h
index f9d4593404..f9ee888c3e 100644
--- a/http.h
+++ b/http.h
@@ -20,6 +20,7 @@ struct slot_results {
long http_code;
long auth_avail;
long http_connectcode;
+ long retry_after;
};
struct active_request_slot {
@@ -157,6 +158,13 @@ struct http_get_options {
* request has completed.
*/
struct string_list *extra_headers;
+
+ /*
+ * After a request completes, contains the Retry-After delay in seconds
+ * if the server returned HTTP 429 with a Retry-After header (requires
+ * libcurl 7.66.0 or later), or -1 if no such header was present.
+ */
+ long retry_after;
};
/* Return values for http_get_*() */
@@ -167,6 +175,7 @@ struct http_get_options {
#define HTTP_REAUTH 4
#define HTTP_NOAUTH 5
#define HTTP_NOMATCHPUBLICKEY 6
+#define HTTP_RATE_LIMITED 7
/*
* Requests a URL and stores the result in a strbuf.
diff --git a/line-log.c b/line-log.c
index eeaf68454e..858a899cd2 100644
--- a/line-log.c
+++ b/line-log.c
@@ -858,173 +858,33 @@ static void queue_diffs(struct line_log_data *range,
diff_queue_clear(&diff_queued_diff);
diff_tree_oid(parent_tree_oid, tree_oid, "", opt);
if (opt->detect_rename && diff_might_be_rename()) {
+ struct diff_options rename_opts;
+
+ /*
+ * Build a private diff_options for rename detection so
+ * that any user-specified options on the original opts
+ * (e.g. pickaxe) cannot discard diff pairs needed for
+ * rename tracking. Similar to blame's find_rename().
+ */
+ repo_diff_setup(opt->repo, &rename_opts);
+ rename_opts.flags.recursive = 1;
+ rename_opts.detect_rename = opt->detect_rename;
+ rename_opts.rename_score = opt->rename_score;
+ rename_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_setup_done(&rename_opts);
+
/* must look at the full tree diff to detect renames */
- clear_pathspec(&opt->pathspec);
diff_queue_clear(&diff_queued_diff);
-
- diff_tree_oid(parent_tree_oid, tree_oid, "", opt);
+ diff_tree_oid(parent_tree_oid, tree_oid, "", &rename_opts);
filter_diffs_for_paths(range, 1);
- diffcore_std(opt);
+ diffcore_std(&rename_opts);
filter_diffs_for_paths(range, 0);
+ diff_free(&rename_opts);
}
move_diff_queue(queue, &diff_queued_diff);
}
-static char *get_nth_line(long line, unsigned long *ends, void *data)
-{
- if (line == 0)
- return (char *)data;
- else
- return (char *)data + ends[line] + 1;
-}
-
-static void print_line(const char *prefix, char first,
- long line, unsigned long *ends, void *data,
- const char *color, const char *reset, FILE *file)
-{
- char *begin = get_nth_line(line, ends, data);
- char *end = get_nth_line(line+1, ends, data);
- int had_nl = 0;
-
- if (end > begin && end[-1] == '\n') {
- end--;
- had_nl = 1;
- }
-
- fputs(prefix, file);
- fputs(color, file);
- putc(first, file);
- fwrite(begin, 1, end-begin, file);
- fputs(reset, file);
- putc('\n', file);
- if (!had_nl)
- fputs("\\ No newline at end of file\n", file);
-}
-
-static void dump_diff_hacky_one(struct rev_info *rev, struct line_log_data *range)
-{
- unsigned int i, j = 0;
- long p_lines, t_lines;
- unsigned long *p_ends = NULL, *t_ends = NULL;
- struct diff_filepair *pair = range->pair;
- struct diff_ranges *diff = &range->diff;
-
- struct diff_options *opt = &rev->diffopt;
- const char *prefix = diff_line_prefix(opt);
- const char *c_reset = diff_get_color(opt->use_color, DIFF_RESET);
- const char *c_frag = diff_get_color(opt->use_color, DIFF_FRAGINFO);
- const char *c_meta = diff_get_color(opt->use_color, DIFF_METAINFO);
- const char *c_old = diff_get_color(opt->use_color, DIFF_FILE_OLD);
- const char *c_new = diff_get_color(opt->use_color, DIFF_FILE_NEW);
- const char *c_context = diff_get_color(opt->use_color, DIFF_CONTEXT);
-
- if (!pair || !diff)
- goto out;
-
- if (pair->one->oid_valid)
- fill_line_ends(rev->diffopt.repo, pair->one, &p_lines, &p_ends);
- fill_line_ends(rev->diffopt.repo, pair->two, &t_lines, &t_ends);
-
- fprintf(opt->file, "%s%sdiff --git a/%s b/%s%s\n", prefix, c_meta, pair->one->path, pair->two->path, c_reset);
- fprintf(opt->file, "%s%s--- %s%s%s\n", prefix, c_meta,
- pair->one->oid_valid ? "a/" : "",
- pair->one->oid_valid ? pair->one->path : "/dev/null",
- c_reset);
- fprintf(opt->file, "%s%s+++ b/%s%s\n", prefix, c_meta, pair->two->path, c_reset);
- for (i = 0; i < range->ranges.nr; i++) {
- long p_start, p_end;
- long t_start = range->ranges.ranges[i].start;
- long t_end = range->ranges.ranges[i].end;
- long t_cur = t_start;
- unsigned int j_last;
-
- /*
- * If a diff range touches multiple line ranges, then all
- * those line ranges should be shown, so take a step back if
- * the current line range is still in the previous diff range
- * (even if only partially).
- */
- if (j > 0 && diff->target.ranges[j-1].end > t_start)
- j--;
-
- while (j < diff->target.nr && diff->target.ranges[j].end < t_start)
- j++;
- if (j == diff->target.nr || diff->target.ranges[j].start >= t_end)
- continue;
-
- /* Scan ahead to determine the last diff that falls in this range */
- j_last = j;
- while (j_last < diff->target.nr && diff->target.ranges[j_last].start < t_end)
- j_last++;
- if (j_last > j)
- j_last--;
-
- /*
- * Compute parent hunk headers: we know that the diff
- * has the correct line numbers (but not all hunks).
- * So it suffices to shift the start/end according to
- * the line numbers of the first/last hunk(s) that
- * fall in this range.
- */
- if (t_start < diff->target.ranges[j].start)
- p_start = diff->parent.ranges[j].start - (diff->target.ranges[j].start-t_start);
- else
- p_start = diff->parent.ranges[j].start;
- if (t_end > diff->target.ranges[j_last].end)
- p_end = diff->parent.ranges[j_last].end + (t_end-diff->target.ranges[j_last].end);
- else
- p_end = diff->parent.ranges[j_last].end;
-
- if (!p_start && !p_end) {
- p_start = -1;
- p_end = -1;
- }
-
- /* Now output a diff hunk for this range */
- fprintf(opt->file, "%s%s@@ -%ld,%ld +%ld,%ld @@%s\n",
- prefix, c_frag,
- p_start+1, p_end-p_start, t_start+1, t_end-t_start,
- c_reset);
- while (j < diff->target.nr && diff->target.ranges[j].start < t_end) {
- int k;
- for (; t_cur < diff->target.ranges[j].start; t_cur++)
- print_line(prefix, ' ', t_cur, t_ends, pair->two->data,
- c_context, c_reset, opt->file);
- for (k = diff->parent.ranges[j].start; k < diff->parent.ranges[j].end; k++)
- print_line(prefix, '-', k, p_ends, pair->one->data,
- c_old, c_reset, opt->file);
- for (; t_cur < diff->target.ranges[j].end && t_cur < t_end; t_cur++)
- print_line(prefix, '+', t_cur, t_ends, pair->two->data,
- c_new, c_reset, opt->file);
- j++;
- }
- for (; t_cur < t_end; t_cur++)
- print_line(prefix, ' ', t_cur, t_ends, pair->two->data,
- c_context, c_reset, opt->file);
- }
-
-out:
- free(p_ends);
- free(t_ends);
-}
-
-/*
- * NEEDSWORK: manually building a diff here is not the Right
- * Thing(tm). log -L should be built into the diff pipeline.
- */
-static void dump_diff_hacky(struct rev_info *rev, struct line_log_data *range)
-{
- const char *prefix = diff_line_prefix(&rev->diffopt);
-
- fprintf(rev->diffopt.file, "%s\n", prefix);
-
- while (range) {
- dump_diff_hacky_one(rev, range);
- range = range->next;
- }
-}
-
/*
* Unlike most other functions, this destructively operates on
* 'range'.
@@ -1088,7 +948,7 @@ static int process_diff_filepair(struct rev_info *rev,
static struct diff_filepair *diff_filepair_dup(struct diff_filepair *pair)
{
- struct diff_filepair *new_filepair = xmalloc(sizeof(struct diff_filepair));
+ struct diff_filepair *new_filepair = xcalloc(1, sizeof(struct diff_filepair));
new_filepair->one = pair->one;
new_filepair->two = pair->two;
new_filepair->one->count++;
@@ -1146,11 +1006,25 @@ static int process_all_files(struct line_log_data **range_out,
int line_log_print(struct rev_info *rev, struct commit *commit)
{
-
show_log(rev);
if (!(rev->diffopt.output_format & DIFF_FORMAT_NO_OUTPUT)) {
struct line_log_data *range = lookup_line_range(rev, commit);
- dump_diff_hacky(rev, range);
+ struct line_log_data *r;
+ const char *prefix = diff_line_prefix(&rev->diffopt);
+
+ fprintf(rev->diffopt.file, "%s\n", prefix);
+
+ for (r = range; r; r = r->next) {
+ if (r->pair) {
+ struct diff_filepair *p =
+ diff_filepair_dup(r->pair);
+ p->line_ranges = &r->ranges;
+ diff_q(&diff_queued_diff, p);
+ }
+ }
+
+ diffcore_std(&rev->diffopt);
+ diff_flush(&rev->diffopt);
}
return 1;
}
diff --git a/line-log.h b/line-log.h
index e9dadbc1a5..04a6ea64d3 100644
--- a/line-log.h
+++ b/line-log.h
@@ -1,22 +1,12 @@
#ifndef LINE_LOG_H
#define LINE_LOG_H
+#include "diffcore.h" /* struct range, struct range_set */
+
struct rev_info;
struct commit;
struct string_list;
-/* A range [start,end]. Lines are numbered starting at 0, and the
- * ranges include start but exclude end. */
-struct range {
- long start, end;
-};
-
-/* A set of ranges. The ranges must always be disjoint and sorted. */
-struct range_set {
- unsigned int alloc, nr;
- struct range *ranges;
-};
-
/* A diff, encoded as the set of pre- and post-image ranges where the
* files differ. A pair of ranges corresponds to a hunk. */
struct diff_ranges {
diff --git a/list-objects.c b/list-objects.c
index 91b23e22f7..724d723c48 100644
--- a/list-objects.c
+++ b/list-objects.c
@@ -75,7 +75,7 @@ static void process_blob(struct traversal_context *ctx,
*/
if (ctx->revs->exclude_promisor_objects &&
!odb_has_object(the_repository->objects, &obj->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR) &&
is_promisor_object(ctx->revs->repo, &obj->oid))
return;
diff --git a/mailinfo.c b/mailinfo.c
index a2f06dbd96..13949ff31e 100644
--- a/mailinfo.c
+++ b/mailinfo.c
@@ -470,7 +470,7 @@ static int convert_to_utf8(struct mailinfo *mi,
return error("cannot convert from %s to %s",
charset, mi->metainfo_charset);
}
- strbuf_attach(line, out, out_len, out_len);
+ strbuf_attach(line, out, out_len, out_len + 1);
return 0;
}
diff --git a/meson.build b/meson.build
index 1b0e431d5f..8309942d18 100644
--- a/meson.build
+++ b/meson.build
@@ -271,6 +271,13 @@ version_gen_environment.set('GIT_VERSION', get_option('version'))
compiler = meson.get_compiler('c')
+compat_sources = [
+ 'compat/nonblock.c',
+ 'compat/obstack.c',
+ 'compat/open.c',
+ 'compat/terminal.c',
+]
+
libgit_sources = [
'abspath.c',
'add-interactive.c',
@@ -304,10 +311,6 @@ libgit_sources = [
'commit.c',
'common-exit.c',
'common-init.c',
- 'compat/nonblock.c',
- 'compat/obstack.c',
- 'compat/open.c',
- 'compat/terminal.c',
'compiler-tricks/not-constant.c',
'config.c',
'connect.c',
@@ -556,7 +559,7 @@ libgit_sources = [
libgit_sources += custom_target(
input: 'command-list.txt',
output: 'command-list.h',
- command: [shell, meson.current_source_dir() + '/generate-cmdlist.sh', meson.current_source_dir(), '@OUTPUT@'],
+ command: [shell, meson.current_source_dir() + '/tools/generate-cmdlist.sh', meson.current_source_dir(), '@OUTPUT@'],
env: script_environment,
)
@@ -725,10 +728,10 @@ endif
builtin_sources += custom_target(
output: 'config-list.h',
depfile: 'config-list.h.d',
- depend_files: [ 'generate-configlist.sh' ],
+ depend_files: [ 'tools/generate-configlist.sh' ],
command: [
shell,
- meson.current_source_dir() / 'generate-configlist.sh',
+ meson.current_source_dir() / 'tools/generate-configlist.sh',
meson.current_source_dir(),
'@OUTPUT@',
'@DEPFILE@',
@@ -741,7 +744,7 @@ builtin_sources += custom_target(
output: 'hook-list.h',
command: [
shell,
- meson.current_source_dir() + '/generate-hooklist.sh',
+ meson.current_source_dir() + '/tools/generate-hooklist.sh',
meson.current_source_dir(),
'@OUTPUT@',
],
@@ -1175,7 +1178,7 @@ endif
if not has_poll_h and not has_sys_poll_h
libgit_c_args += '-DNO_POLL'
- libgit_sources += 'compat/poll/poll.c'
+ compat_sources += 'compat/poll/poll.c'
libgit_include_directories += 'compat/poll'
endif
@@ -1191,7 +1194,7 @@ endif
# implementation to threat things like drive prefixes specially.
if host_machine.system() == 'windows' or not compiler.has_header('libgen.h')
libgit_c_args += '-DNO_LIBGEN_H'
- libgit_sources += 'compat/basename.c'
+ compat_sources += 'compat/basename.c'
endif
if compiler.has_header('paths.h')
@@ -1221,7 +1224,7 @@ if host_machine.system() != 'windows'
foreach symbol : ['inet_ntop', 'inet_pton', 'hstrerror']
if not compiler.has_function(symbol, dependencies: networking_dependencies)
libgit_c_args += '-DNO_' + symbol.to_upper()
- libgit_sources += 'compat/' + symbol + '.c'
+ compat_sources += 'compat/' + symbol + '.c'
endif
endforeach
endif
@@ -1263,18 +1266,18 @@ else
endif
if host_machine.system() == 'darwin'
- libgit_sources += 'compat/precompose_utf8.c'
+ compat_sources += 'compat/precompose_utf8.c'
libgit_c_args += '-DPRECOMPOSE_UNICODE'
libgit_c_args += '-DPROTECT_HFS_DEFAULT'
endif
# Configure general compatibility wrappers.
if host_machine.system() == 'cygwin'
- libgit_sources += [
+ compat_sources += [
'compat/win32/path-utils.c',
]
elif host_machine.system() == 'windows'
- libgit_sources += [
+ compat_sources += [
'compat/winansi.c',
'compat/win32/dirent.c',
'compat/win32/flush.c',
@@ -1301,20 +1304,20 @@ elif host_machine.system() == 'windows'
libgit_include_directories += 'compat/win32'
if compiler.get_id() == 'msvc'
libgit_include_directories += 'compat/vcbuild/include'
- libgit_sources += 'compat/msvc.c'
+ compat_sources += 'compat/msvc.c'
else
- libgit_sources += 'compat/mingw.c'
+ compat_sources += 'compat/mingw.c'
endif
endif
if host_machine.system() == 'linux'
- libgit_sources += 'compat/linux/procinfo.c'
+ compat_sources += 'compat/linux/procinfo.c'
elif host_machine.system() == 'windows'
- libgit_sources += 'compat/win32/trace2_win32_process_info.c'
+ compat_sources += 'compat/win32/trace2_win32_process_info.c'
elif host_machine.system() == 'darwin'
- libgit_sources += 'compat/darwin/procinfo.c'
+ compat_sources += 'compat/darwin/procinfo.c'
else
- libgit_sources += 'compat/stub/procinfo.c'
+ compat_sources += 'compat/stub/procinfo.c'
endif
if host_machine.system() == 'cygwin' or host_machine.system() == 'windows'
@@ -1327,13 +1330,13 @@ endif
# Configure the simple-ipc subsystem required fro the fsmonitor.
if host_machine.system() == 'windows'
- libgit_sources += [
+ compat_sources += [
'compat/simple-ipc/ipc-shared.c',
'compat/simple-ipc/ipc-win32.c',
]
libgit_c_args += '-DSUPPORTS_SIMPLE_IPC'
else
- libgit_sources += [
+ compat_sources += [
'compat/simple-ipc/ipc-shared.c',
'compat/simple-ipc/ipc-unix-socket.c',
]
@@ -1351,7 +1354,7 @@ if fsmonitor_backend != ''
libgit_c_args += '-DHAVE_FSMONITOR_DAEMON_BACKEND'
libgit_c_args += '-DHAVE_FSMONITOR_OS_SETTINGS'
- libgit_sources += [
+ compat_sources += [
'compat/fsmonitor/fsm-health-' + fsmonitor_backend + '.c',
'compat/fsmonitor/fsm-ipc-' + fsmonitor_backend + '.c',
'compat/fsmonitor/fsm-listen-' + fsmonitor_backend + '.c',
@@ -1367,7 +1370,7 @@ if not get_option('b_sanitize').contains('address') and get_option('regex').allo
if compiler.get_define('REG_ENHANCED', prefix: '#include <regex.h>') != ''
libgit_c_args += '-DUSE_ENHANCED_BASIC_REGULAR_EXPRESSIONS'
- libgit_sources += 'compat/regcomp_enhanced.c'
+ compat_sources += 'compat/regcomp_enhanced.c'
endif
elif not get_option('regex').enabled()
libgit_c_args += [
@@ -1376,7 +1379,7 @@ elif not get_option('regex').enabled()
'-DNO_MBSUPPORT',
]
build_options_config.set('NO_REGEX', '1')
- libgit_sources += 'compat/regex/regex.c'
+ compat_sources += 'compat/regex/regex.c'
libgit_include_directories += 'compat/regex'
else
error('Native regex support requested but not found')
@@ -1441,7 +1444,7 @@ else
if get_option('b_sanitize').contains('address') or get_option('b_sanitize').contains('leak')
libgit_c_args += '-DNO_MMAP'
- libgit_sources += 'compat/mmap.c'
+ compat_sources += 'compat/mmap.c'
else
checkfuncs += { 'mmap': ['mmap.c'] }
endif
@@ -1451,7 +1454,7 @@ foreach func, impls : checkfuncs
if not compiler.has_function(func)
libgit_c_args += '-DNO_' + func.to_upper()
foreach impl : impls
- libgit_sources += 'compat/' + impl
+ compat_sources += 'compat/' + impl
endforeach
endif
endforeach
@@ -1462,13 +1465,13 @@ endif
if not compiler.has_function('strdup')
libgit_c_args += '-DOVERRIDE_STRDUP'
- libgit_sources += 'compat/strdup.c'
+ compat_sources += 'compat/strdup.c'
endif
if not compiler.has_function('qsort')
libgit_c_args += '-DINTERNAL_QSORT'
endif
-libgit_sources += 'compat/qsort_s.c'
+compat_sources += 'compat/qsort_s.c'
if compiler.has_function('getdelim')
libgit_c_args += '-DHAVE_GETDELIM'
@@ -1524,7 +1527,7 @@ if meson.can_run_host_binaries() and compiler.run('''
}
''', name: 'fread reads directories').returncode() == 0
libgit_c_args += '-DFREAD_READS_DIRECTORIES'
- libgit_sources += 'compat/fopen.c'
+ compat_sources += 'compat/fopen.c'
endif
if not meson.is_cross_build() and fs.exists('/dev/tty')
@@ -1758,14 +1761,23 @@ else
endif
libgit = declare_dependency(
- link_with: static_library('git',
- sources: libgit_sources,
- c_args: libgit_c_args + [
- '-DGIT_VERSION_H="' + version_def_h.full_path() + '"',
- ],
- dependencies: libgit_dependencies,
- include_directories: libgit_include_directories,
- ),
+ link_with: [
+ static_library('compat',
+ sources: compat_sources,
+ c_args: libgit_c_args,
+ dependencies: libgit_dependencies,
+ include_directories: libgit_include_directories,
+ ),
+ static_library('git',
+ sources: libgit_sources,
+ c_args: libgit_c_args + [
+ '-DGIT_VERSION_H="' + version_def_h.full_path() + '"',
+ ],
+ c_pch: 'tools/precompiled.h',
+ dependencies: libgit_dependencies,
+ include_directories: libgit_include_directories,
+ ),
+ ],
compile_args: libgit_c_args,
dependencies: libgit_dependencies,
include_directories: libgit_include_directories,
@@ -1822,6 +1834,7 @@ test_dependencies = [ ]
git_builtin = executable('git',
sources: builtin_sources + 'git.c',
+ c_pch: 'tools/precompiled.h',
dependencies: [libgit_commonmain],
install: true,
install_dir: git_exec_path,
@@ -1972,7 +1985,7 @@ foreach script : scripts_sh
output: fs.stem(script),
command: [
shell,
- meson.project_source_root() / 'generate-script.sh',
+ meson.project_source_root() / 'tools/generate-script.sh',
'@INPUT@',
'@OUTPUT@',
meson.project_build_root() / 'GIT-BUILD-OPTIONS',
@@ -2021,7 +2034,7 @@ if perl_features_enabled
generate_perl_command = [
shell,
- meson.project_source_root() / 'generate-perl.sh',
+ meson.project_source_root() / 'tools/generate-perl.sh',
meson.project_build_root() / 'GIT-BUILD-OPTIONS',
git_version_file.full_path(),
perl_header,
@@ -2070,7 +2083,7 @@ if target_python.found()
output: fs.stem(script),
command: [
shell,
- meson.project_source_root() / 'generate-python.sh',
+ meson.project_source_root() / 'tools/generate-python.sh',
meson.project_build_root() / 'GIT-BUILD-OPTIONS',
'@INPUT@',
'@OUTPUT@',
@@ -2162,6 +2175,7 @@ else
endif
subdir('contrib')
+subdir('tools')
# Note that the target is intentionally configured after including the
# 'contrib' directory, as some tool there also have their own manpages.
diff --git a/midx-write.c b/midx-write.c
index 6485cb6706..0ff2e45aa7 100644
--- a/midx-write.c
+++ b/midx-write.c
@@ -36,10 +36,13 @@ extern int cmp_idx_or_pack_name(const char *idx_or_pack_name,
static size_t write_midx_header(const struct git_hash_algo *hash_algo,
struct hashfile *f, unsigned char num_chunks,
- uint32_t num_packs)
+ uint32_t num_packs, int version)
{
+ if (version != MIDX_VERSION_V1 && version != MIDX_VERSION_V2)
+ BUG("unexpected MIDX version: %d", version);
+
hashwrite_be32(f, MIDX_SIGNATURE);
- hashwrite_u8(f, MIDX_VERSION);
+ hashwrite_u8(f, version);
hashwrite_u8(f, oid_version(hash_algo));
hashwrite_u8(f, num_chunks);
hashwrite_u8(f, 0); /* unused */
@@ -105,15 +108,29 @@ struct write_midx_context {
uint32_t preferred_pack_idx;
+ int version; /* must be MIDX_VERSION_V1 or _V2 */
+
int incremental;
uint32_t num_multi_pack_indexes_before;
+ struct multi_pack_index *compact_from;
+ struct multi_pack_index *compact_to;
+ int compact;
+
struct string_list *to_include;
struct repository *repo;
struct odb_source *source;
};
+static uint32_t midx_pack_perm(struct write_midx_context *ctx,
+ uint32_t orig_pack_int_id)
+{
+ if (ctx->compact)
+ orig_pack_int_id -= ctx->compact_from->num_packs_in_base;
+ return ctx->pack_perm[orig_pack_int_id];
+}
+
static int should_include_pack(const struct write_midx_context *ctx,
const char *file_name)
{
@@ -257,18 +274,14 @@ static void midx_fanout_sort(struct midx_fanout *fanout)
QSORT(fanout->entries, fanout->nr, midx_oid_compare);
}
-static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
- struct multi_pack_index *m,
- uint32_t cur_fanout,
- uint32_t preferred_pack)
+static void midx_fanout_add_midx_fanout_1(struct midx_fanout *fanout,
+ struct multi_pack_index *m,
+ uint32_t cur_fanout,
+ uint32_t preferred_pack)
{
uint32_t start = m->num_objects_in_base, end;
uint32_t cur_object;
- if (m->base_midx)
- midx_fanout_add_midx_fanout(fanout, m->base_midx, cur_fanout,
- preferred_pack);
-
if (cur_fanout)
start += ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
end = m->num_objects_in_base + ntohl(m->chunk_oid_fanout[cur_fanout]);
@@ -292,6 +305,17 @@ static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
}
}
+static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
+ struct multi_pack_index *m,
+ uint32_t cur_fanout,
+ uint32_t preferred_pack)
+{
+ if (m->base_midx)
+ midx_fanout_add_midx_fanout(fanout, m->base_midx, cur_fanout,
+ preferred_pack);
+ midx_fanout_add_midx_fanout_1(fanout, m, cur_fanout, preferred_pack);
+}
+
static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
struct pack_info *info,
uint32_t cur_pack,
@@ -317,6 +341,45 @@ static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
}
}
+static void midx_fanout_add(struct midx_fanout *fanout,
+ struct write_midx_context *ctx,
+ uint32_t start_pack,
+ uint32_t cur_fanout)
+{
+ uint32_t cur_pack;
+
+ if (ctx->m && !ctx->incremental)
+ midx_fanout_add_midx_fanout(fanout, ctx->m, cur_fanout,
+ ctx->preferred_pack_idx);
+
+ for (cur_pack = start_pack; cur_pack < ctx->nr; cur_pack++) {
+ int preferred = cur_pack == ctx->preferred_pack_idx;
+ midx_fanout_add_pack_fanout(fanout, ctx->info, cur_pack,
+ preferred, cur_fanout);
+ }
+
+ if (ctx->preferred_pack_idx != NO_PREFERRED_PACK &&
+ ctx->preferred_pack_idx < start_pack)
+ midx_fanout_add_pack_fanout(fanout, ctx->info,
+ ctx->preferred_pack_idx, 1,
+ cur_fanout);
+}
+
+static void midx_fanout_add_compact(struct midx_fanout *fanout,
+ struct write_midx_context *ctx,
+ uint32_t cur_fanout)
+{
+ struct multi_pack_index *m = ctx->compact_to;
+
+ ASSERT(ctx->compact);
+
+ while (m && m != ctx->compact_from->base_midx) {
+ midx_fanout_add_midx_fanout_1(fanout, m, cur_fanout,
+ NO_PREFERRED_PACK);
+ m = m->base_midx;
+ }
+}
+
/*
* It is possible to artificially get into a state where there are many
* duplicate copies of objects. That can create high memory pressure if
@@ -335,6 +398,9 @@ static void compute_sorted_entries(struct write_midx_context *ctx,
size_t alloc_objects, total_objects = 0;
struct midx_fanout fanout = { 0 };
+ if (ctx->compact)
+ ASSERT(!start_pack);
+
for (cur_pack = start_pack; cur_pack < ctx->nr; cur_pack++)
total_objects = st_add(total_objects,
ctx->info[cur_pack].p->num_objects);
@@ -353,23 +419,10 @@ static void compute_sorted_entries(struct write_midx_context *ctx,
for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
fanout.nr = 0;
- if (ctx->m && !ctx->incremental)
- midx_fanout_add_midx_fanout(&fanout, ctx->m, cur_fanout,
- ctx->preferred_pack_idx);
-
- for (cur_pack = start_pack; cur_pack < ctx->nr; cur_pack++) {
- int preferred = cur_pack == ctx->preferred_pack_idx;
- midx_fanout_add_pack_fanout(&fanout,
- ctx->info, cur_pack,
- preferred, cur_fanout);
- }
-
- if (ctx->preferred_pack_idx != NO_PREFERRED_PACK &&
- ctx->preferred_pack_idx < start_pack)
- midx_fanout_add_pack_fanout(&fanout, ctx->info,
- ctx->preferred_pack_idx, 1,
- cur_fanout);
-
+ if (ctx->compact)
+ midx_fanout_add_compact(&fanout, ctx, cur_fanout);
+ else
+ midx_fanout_add(&fanout, ctx, start_pack, cur_fanout);
midx_fanout_sort(&fanout);
/*
@@ -410,7 +463,9 @@ static int write_midx_pack_names(struct hashfile *f, void *data)
if (ctx->info[i].expired)
continue;
- if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
+ if (ctx->version == MIDX_VERSION_V1 &&
+ i && strcmp(ctx->info[i].pack_name,
+ ctx->info[i - 1].pack_name) <= 0)
BUG("incorrect pack-file order: %s before %s",
ctx->info[i - 1].pack_name,
ctx->info[i].pack_name);
@@ -514,12 +569,12 @@ static int write_midx_object_offsets(struct hashfile *f,
for (i = 0; i < ctx->entries_nr; i++) {
struct pack_midx_entry *obj = list++;
- if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
+ if (midx_pack_perm(ctx, obj->pack_int_id) == PACK_EXPIRED)
BUG("object %s is in an expired pack with int-id %d",
oid_to_hex(&obj->oid),
obj->pack_int_id);
- hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
+ hashwrite_be32(f, midx_pack_perm(ctx, obj->pack_int_id));
if (ctx->large_offsets_needed && obj->offset >> 31)
hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
@@ -620,8 +675,8 @@ static uint32_t *midx_pack_order(struct write_midx_context *ctx)
for (i = 0; i < ctx->entries_nr; i++) {
struct pack_midx_entry *e = &ctx->entries[i];
data[i].nr = i;
- data[i].pack = ctx->pack_perm[e->pack_int_id];
- if (!e->preferred)
+ data[i].pack = midx_pack_perm(ctx, e->pack_int_id);
+ if (!e->preferred || ctx->compact)
data[i].pack |= (1U << 31);
data[i].offset = e->offset;
}
@@ -630,14 +685,14 @@ static uint32_t *midx_pack_order(struct write_midx_context *ctx)
for (i = 0; i < ctx->entries_nr; i++) {
struct pack_midx_entry *e = &ctx->entries[data[i].nr];
- struct pack_info *pack = &ctx->info[ctx->pack_perm[e->pack_int_id]];
+ struct pack_info *pack = &ctx->info[midx_pack_perm(ctx, e->pack_int_id)];
if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
pack->bitmap_pos = i + base_objects;
pack->bitmap_nr++;
pack_order[i] = data[i].nr;
}
for (i = 0; i < ctx->nr; i++) {
- struct pack_info *pack = &ctx->info[ctx->pack_perm[i]];
+ struct pack_info *pack = &ctx->info[i];
if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
pack->bitmap_pos = 0;
}
@@ -691,7 +746,7 @@ static void prepare_midx_packing_data(struct packing_data *pdata,
struct object_entry *to = packlist_alloc(pdata, &from->oid);
oe_set_in_pack(pdata, to,
- ctx->info[ctx->pack_perm[from->pack_int_id]].p);
+ ctx->info[midx_pack_perm(ctx, from->pack_int_id)].p);
}
trace2_region_leave("midx", "prepare_midx_packing_data", ctx->repo);
@@ -900,6 +955,21 @@ cleanup:
return ret;
}
+static int fill_pack_from_midx(struct pack_info *info,
+ struct multi_pack_index *m,
+ uint32_t pack_int_id)
+{
+ if (prepare_midx_pack(m, pack_int_id))
+ return error(_("could not load pack %d"), pack_int_id);
+
+ fill_pack_info(info,
+ m->packs[pack_int_id - m->num_packs_in_base],
+ m->pack_names[pack_int_id - m->num_packs_in_base],
+ pack_int_id);
+
+ return 0;
+}
+
static int fill_packs_from_midx(struct write_midx_context *ctx)
{
struct multi_pack_index *m;
@@ -907,19 +977,88 @@ static int fill_packs_from_midx(struct write_midx_context *ctx)
for (m = ctx->m; m; m = m->base_midx) {
uint32_t i;
- for (i = 0; i < m->num_packs; i++) {
- if (prepare_midx_pack(m, m->num_packs_in_base + i))
- return error(_("could not load pack"));
-
+ for (i = m->num_packs_in_base;
+ i < m->num_packs_in_base + m->num_packs; i++) {
ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
- fill_pack_info(&ctx->info[ctx->nr++], m->packs[i],
- m->pack_names[i],
- m->num_packs_in_base + i);
+
+ if (fill_pack_from_midx(&ctx->info[ctx->nr], m, i) < 0)
+ return -1;
+
+ ctx->nr++;
}
}
return 0;
}
+static uint32_t compactible_packs_between(const struct multi_pack_index *from,
+ const struct multi_pack_index *to)
+{
+ uint32_t nr;
+
+ ASSERT(from && to);
+
+ if (unsigned_add_overflows(to->num_packs, to->num_packs_in_base))
+ die(_("too many packs, unable to compact"));
+
+ nr = to->num_packs + to->num_packs_in_base;
+ if (nr < from->num_packs_in_base)
+ BUG("unexpected number of packs in base during compaction: "
+ "%"PRIu32" < %"PRIu32, nr, from->num_packs_in_base);
+
+ return nr - from->num_packs_in_base;
+}
+
+static int fill_packs_from_midx_range(struct write_midx_context *ctx,
+ int bitmap_order)
+{
+ struct multi_pack_index *m = ctx->compact_to;
+ uint32_t packs_nr;
+
+ ASSERT(ctx->compact && !ctx->nr);
+ ASSERT(ctx->compact_from);
+ ASSERT(ctx->compact_to);
+
+ packs_nr = compactible_packs_between(ctx->compact_from,
+ ctx->compact_to);
+
+ ALLOC_GROW(ctx->info, packs_nr, ctx->alloc);
+
+ while (m != ctx->compact_from->base_midx) {
+ uint32_t pack_int_id, preferred_pack_id;
+ uint32_t i;
+
+ if (bitmap_order) {
+ if (midx_preferred_pack(m, &preferred_pack_id) < 0)
+ die(_("could not determine preferred pack"));
+ } else {
+ preferred_pack_id = m->num_packs_in_base;
+ }
+
+ pack_int_id = m->num_packs_in_base - ctx->compact_from->num_packs_in_base;
+
+ if (fill_pack_from_midx(&ctx->info[pack_int_id++], m,
+ preferred_pack_id) < 0)
+ return -1;
+
+ for (i = m->num_packs_in_base;
+ i < m->num_packs_in_base + m->num_packs; i++) {
+ if (preferred_pack_id == i)
+ continue;
+
+ if (fill_pack_from_midx(&ctx->info[pack_int_id++], m,
+ i) < 0)
+ return -1;
+ }
+
+ ctx->nr += m->num_packs;
+ m = m->base_midx;
+ }
+
+ ASSERT(ctx->nr == packs_nr);
+
+ return 0;
+}
+
static struct {
const char *non_split;
const char *split;
@@ -946,7 +1085,7 @@ static int link_midx_to_chain(struct multi_pack_index *m)
}
for (i = 0; i < ARRAY_SIZE(midx_exts); i++) {
- const unsigned char *hash = get_midx_checksum(m);
+ const unsigned char *hash = midx_get_checksum_hash(m);
get_midx_filename_ext(m->source, &from,
hash, midx_exts[i].non_split);
@@ -1026,6 +1165,12 @@ static bool midx_needs_update(struct multi_pack_index *midx, struct write_midx_c
goto out;
/*
+ * If the version differs, we need to update.
+ */
+ if (midx->version != ctx->version)
+ goto out;
+
+ /*
* Ignore incremental updates for now. The assumption is that any
* incremental update would be either empty (in which case we will bail
* out later) or it would actually cover at least one new pack.
@@ -1033,6 +1178,9 @@ static bool midx_needs_update(struct multi_pack_index *midx, struct write_midx_c
if (ctx->incremental)
goto out;
+ if (ctx->compact)
+ goto out; /* Compaction always requires an update. */
+
/*
* Otherwise, we need to verify that the packs covered by the existing
* MIDX match the packs that we already have. The logic to do so is way
@@ -1078,14 +1226,31 @@ out:
return needed;
}
-static int write_midx_internal(struct odb_source *source,
- struct string_list *packs_to_include,
- struct string_list *packs_to_drop,
- const char *preferred_pack_name,
- const char *refs_snapshot,
- unsigned flags)
+static int midx_hashcmp(const struct multi_pack_index *a,
+ const struct multi_pack_index *b,
+ const struct git_hash_algo *algop)
{
- struct repository *r = source->odb->repo;
+ return hashcmp(midx_get_checksum_hash(a), midx_get_checksum_hash(b),
+ algop);
+}
+
+struct write_midx_opts {
+ struct odb_source *source; /* non-optional */
+
+ struct string_list *packs_to_include;
+ struct string_list *packs_to_drop;
+
+ struct multi_pack_index *compact_from;
+ struct multi_pack_index *compact_to;
+
+ const char *preferred_pack_name;
+ const char *refs_snapshot;
+ unsigned flags;
+};
+
+static int write_midx_internal(struct write_midx_opts *opts)
+{
+ struct repository *r = opts->source->odb->repo;
struct strbuf midx_name = STRBUF_INIT;
unsigned char midx_hash[GIT_MAX_RAWSZ];
uint32_t start_pack;
@@ -1094,6 +1259,7 @@ static int write_midx_internal(struct odb_source *source,
struct tempfile *incr;
struct write_midx_context ctx = {
.preferred_pack_idx = NO_PREFERRED_PACK,
+ .version = MIDX_VERSION_V2,
};
struct multi_pack_index *midx_to_free = NULL;
int bitmapped_packs_concat_len = 0;
@@ -1101,27 +1267,45 @@ static int write_midx_internal(struct odb_source *source,
int dropped_packs = 0;
int result = -1;
const char **keep_hashes = NULL;
+ size_t keep_hashes_nr = 0;
struct chunkfile *cf;
trace2_region_enter("midx", "write_midx_internal", r);
ctx.repo = r;
- ctx.source = source;
+ ctx.source = opts->source;
+
+ repo_config_get_int(ctx.repo, "midx.version", &ctx.version);
+ if (ctx.version != MIDX_VERSION_V1 && ctx.version != MIDX_VERSION_V2)
+ die(_("unknown MIDX version: %d"), ctx.version);
+
+ ctx.incremental = !!(opts->flags & MIDX_WRITE_INCREMENTAL);
+ ctx.compact = !!(opts->flags & MIDX_WRITE_COMPACT);
+
+ if (ctx.compact) {
+ if (ctx.version != MIDX_VERSION_V2)
+ die(_("cannot perform MIDX compaction with v1 format"));
+ if (!opts->compact_from)
+ BUG("expected non-NULL 'from' MIDX during compaction");
+ if (!opts->compact_to)
+ BUG("expected non-NULL 'to' MIDX during compaction");
- ctx.incremental = !!(flags & MIDX_WRITE_INCREMENTAL);
+ ctx.compact_from = opts->compact_from;
+ ctx.compact_to = opts->compact_to;
+ }
if (ctx.incremental)
strbuf_addf(&midx_name,
"%s/pack/multi-pack-index.d/tmp_midx_XXXXXX",
- source->path);
+ opts->source->path);
else
- get_midx_filename(source, &midx_name);
+ get_midx_filename(opts->source, &midx_name);
if (safe_create_leading_directories(r, midx_name.buf))
die_errno(_("unable to create leading directories of %s"),
midx_name.buf);
- if (!packs_to_include || ctx.incremental) {
- struct multi_pack_index *m = get_multi_pack_index(source);
+ if (!opts->packs_to_include || ctx.incremental) {
+ struct multi_pack_index *m = get_multi_pack_index(opts->source);
if (m && !midx_checksum_valid(m)) {
warning(_("ignoring existing multi-pack-index; checksum mismatch"));
m = NULL;
@@ -1136,11 +1320,18 @@ static int write_midx_internal(struct odb_source *source,
*/
if (ctx.incremental)
ctx.base_midx = m;
- else if (!packs_to_include)
+ if (!opts->packs_to_include)
ctx.m = m;
}
}
+ /*
+ * If compacting MIDX layer(s) in the range [from, to], then the
+ * compacted MIDX will share the same base MIDX as 'from'.
+ */
+ if (ctx.compact)
+ ctx.base_midx = ctx.compact_from->base_midx;
+
ctx.nr = 0;
ctx.alloc = ctx.m ? ctx.m->num_packs + ctx.m->num_packs_in_base : 16;
ctx.info = NULL;
@@ -1149,34 +1340,42 @@ static int write_midx_internal(struct odb_source *source,
if (ctx.incremental) {
struct multi_pack_index *m = ctx.base_midx;
while (m) {
- if (flags & MIDX_WRITE_BITMAP && load_midx_revindex(m)) {
+ if (opts->flags & MIDX_WRITE_BITMAP && load_midx_revindex(m)) {
error(_("could not load reverse index for MIDX %s"),
- hash_to_hex_algop(get_midx_checksum(m),
- m->source->odb->repo->hash_algo));
+ midx_get_checksum_hex(m));
goto cleanup;
}
ctx.num_multi_pack_indexes_before++;
m = m->base_midx;
}
- } else if (ctx.m && fill_packs_from_midx(&ctx)) {
+ } else if (ctx.m && !ctx.compact && fill_packs_from_midx(&ctx)) {
goto cleanup;
}
start_pack = ctx.nr;
ctx.pack_paths_checked = 0;
- if (flags & MIDX_PROGRESS)
+ if (opts->flags & MIDX_PROGRESS)
ctx.progress = start_delayed_progress(r,
_("Adding packfiles to multi-pack-index"), 0);
else
ctx.progress = NULL;
- ctx.to_include = packs_to_include;
+ if (ctx.compact) {
+ int bitmap_order = 0;
+ if (opts->preferred_pack_name)
+ bitmap_order |= 1;
+ else if (opts->flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))
+ bitmap_order |= 1;
- for_each_file_in_pack_dir(source->path, add_pack_to_midx, &ctx);
+ fill_packs_from_midx_range(&ctx, bitmap_order);
+ } else {
+ ctx.to_include = opts->packs_to_include;
+ for_each_file_in_pack_dir(opts->source->path, add_pack_to_midx, &ctx);
+ }
stop_progress(&ctx.progress);
- if (!packs_to_drop) {
+ if (!opts->packs_to_drop) {
/*
* If there is no MIDX then either it doesn't exist, or we're
* doing a geometric repack. Try to load it from the source to
@@ -1189,7 +1388,7 @@ static int write_midx_internal(struct odb_source *source,
if (midx && !midx_needs_update(midx, &ctx)) {
struct bitmap_index *bitmap_git;
int bitmap_exists;
- int want_bitmap = flags & MIDX_WRITE_BITMAP;
+ int want_bitmap = opts->flags & MIDX_WRITE_BITMAP;
bitmap_git = prepare_midx_bitmap_git(midx);
bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
@@ -1201,7 +1400,7 @@ static int write_midx_internal(struct odb_source *source,
* corresponding bitmap (or one wasn't requested).
*/
if (!want_bitmap)
- clear_midx_files_ext(source, "bitmap", NULL);
+ clear_midx_files_ext(ctx.source, "bitmap", NULL);
result = 0;
goto cleanup;
}
@@ -1216,11 +1415,11 @@ static int write_midx_internal(struct odb_source *source,
goto cleanup; /* nothing to do */
}
- if (preferred_pack_name) {
+ if (opts->preferred_pack_name) {
ctx.preferred_pack_idx = NO_PREFERRED_PACK;
for (size_t i = 0; i < ctx.nr; i++) {
- if (!cmp_idx_or_pack_name(preferred_pack_name,
+ if (!cmp_idx_or_pack_name(opts->preferred_pack_name,
ctx.info[i].pack_name)) {
ctx.preferred_pack_idx = i;
break;
@@ -1229,9 +1428,9 @@ static int write_midx_internal(struct odb_source *source,
if (ctx.preferred_pack_idx == NO_PREFERRED_PACK)
warning(_("unknown preferred pack: '%s'"),
- preferred_pack_name);
+ opts->preferred_pack_name);
} else if (ctx.nr &&
- (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
+ (opts->flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
struct packed_git *oldest = ctx.info[0].p;
ctx.preferred_pack_idx = 0;
@@ -1242,7 +1441,7 @@ static int write_midx_internal(struct odb_source *source,
*/
open_pack_index(oldest);
- if (packs_to_drop && packs_to_drop->nr)
+ if (opts->packs_to_drop && opts->packs_to_drop->nr)
BUG("cannot write a MIDX bitmap during expiration");
/*
@@ -1302,22 +1501,30 @@ static int write_midx_internal(struct odb_source *source,
ctx.large_offsets_needed = 1;
}
- QSORT(ctx.info, ctx.nr, pack_info_compare);
+ if (ctx.compact) {
+ if (ctx.version != MIDX_VERSION_V2)
+ BUG("performing MIDX compaction with v1 MIDX");
+ } else {
+ QSORT(ctx.info, ctx.nr, pack_info_compare);
+ }
- if (packs_to_drop && packs_to_drop->nr) {
+ if (opts->packs_to_drop && opts->packs_to_drop->nr) {
size_t drop_index = 0;
int missing_drops = 0;
- for (size_t i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
+ ASSERT(!ctx.compact);
+
+ for (size_t i = 0;
+ i < ctx.nr && drop_index < opts->packs_to_drop->nr; i++) {
int cmp = strcmp(ctx.info[i].pack_name,
- packs_to_drop->items[drop_index].string);
+ opts->packs_to_drop->items[drop_index].string);
if (!cmp) {
drop_index++;
ctx.info[i].expired = 1;
} else if (cmp > 0) {
error(_("did not see pack-file %s to drop"),
- packs_to_drop->items[drop_index].string);
+ opts->packs_to_drop->items[drop_index].string);
drop_index++;
missing_drops++;
i--;
@@ -1338,12 +1545,20 @@ static int write_midx_internal(struct odb_source *source,
*/
ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
for (size_t i = 0; i < ctx.nr; i++) {
+ uint32_t from = ctx.info[i].orig_pack_int_id;
+ uint32_t to;
+
if (ctx.info[i].expired) {
+ to = PACK_EXPIRED;
dropped_packs++;
- ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
} else {
- ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
+ to = i - dropped_packs;
}
+
+ if (ctx.compact)
+ from -= ctx.compact_from->num_packs_in_base;
+
+ ctx.pack_perm[from] = to;
}
for (size_t i = 0; i < ctx.nr; i++) {
@@ -1354,16 +1569,16 @@ static int write_midx_internal(struct odb_source *source,
}
/* Check that the preferred pack wasn't expired (if given). */
- if (preferred_pack_name) {
- struct pack_info *preferred = bsearch(preferred_pack_name,
+ if (opts->preferred_pack_name) {
+ struct pack_info *preferred = bsearch(opts->preferred_pack_name,
ctx.info, ctx.nr,
sizeof(*ctx.info),
idx_or_pack_name_cmp);
if (preferred) {
- uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+ uint32_t perm = midx_pack_perm(&ctx, preferred->orig_pack_int_id);
if (perm == PACK_EXPIRED)
warning(_("preferred pack '%s' is expired"),
- preferred_pack_name);
+ opts->preferred_pack_name);
}
}
@@ -1377,15 +1592,15 @@ static int write_midx_internal(struct odb_source *source,
}
if (!ctx.entries_nr) {
- if (flags & MIDX_WRITE_BITMAP)
+ if (opts->flags & MIDX_WRITE_BITMAP)
warning(_("refusing to write multi-pack .bitmap without any objects"));
- flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
+ opts->flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
}
if (ctx.incremental) {
struct strbuf lock_name = STRBUF_INIT;
- get_midx_chain_filename(source, &lock_name);
+ get_midx_chain_filename(opts->source, &lock_name);
hold_lock_file_for_update(&lk, lock_name.buf, LOCK_DIE_ON_ERROR);
strbuf_release(&lock_name);
@@ -1428,7 +1643,7 @@ static int write_midx_internal(struct odb_source *source,
MIDX_CHUNK_LARGE_OFFSET_WIDTH),
write_midx_large_offsets);
- if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
+ if (opts->flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
ctx.pack_order = midx_pack_order(&ctx);
add_chunk(cf, MIDX_CHUNKID_REVINDEX,
st_mult(ctx.entries_nr, sizeof(uint32_t)),
@@ -1439,18 +1654,18 @@ static int write_midx_internal(struct odb_source *source,
}
write_midx_header(r->hash_algo, f, get_num_chunks(cf),
- ctx.nr - dropped_packs);
+ ctx.nr - dropped_packs, ctx.version);
write_chunkfile(cf, &ctx);
finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
- if (flags & MIDX_WRITE_REV_INDEX &&
+ if (opts->flags & MIDX_WRITE_REV_INDEX &&
git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
write_midx_reverse_index(&ctx, midx_hash);
- if (flags & MIDX_WRITE_BITMAP) {
+ if (opts->flags & MIDX_WRITE_BITMAP) {
struct packing_data pdata;
struct commit_stack commits = COMMIT_STACK_INIT;
@@ -1459,7 +1674,7 @@ static int write_midx_internal(struct odb_source *source,
prepare_midx_packing_data(&pdata, &ctx);
- find_commits_for_midx_bitmap(&commits, refs_snapshot, &ctx);
+ find_commits_for_midx_bitmap(&commits, opts->refs_snapshot, &ctx);
/*
* The previous steps translated the information from
@@ -1470,8 +1685,8 @@ static int write_midx_internal(struct odb_source *source,
FREE_AND_NULL(ctx.entries);
ctx.entries_nr = 0;
- if (write_midx_bitmap(&ctx, midx_hash, &pdata,
- commits.items, commits.nr, flags) < 0) {
+ if (write_midx_bitmap(&ctx, midx_hash, &pdata, commits.items,
+ commits.nr, opts->flags) < 0) {
error(_("could not write multi-pack bitmap"));
clear_packing_data(&pdata);
commit_stack_clear(&commits);
@@ -1489,7 +1704,24 @@ static int write_midx_internal(struct odb_source *source,
if (ctx.num_multi_pack_indexes_before == UINT32_MAX)
die(_("too many multi-pack-indexes"));
- CALLOC_ARRAY(keep_hashes, ctx.num_multi_pack_indexes_before + 1);
+ if (ctx.compact) {
+ struct multi_pack_index *m;
+
+ /*
+ * Keep all MIDX layers excluding those in the range [from, to].
+ */
+ for (m = ctx.base_midx; m; m = m->base_midx)
+ keep_hashes_nr++;
+ for (m = ctx.m;
+ m && midx_hashcmp(m, ctx.compact_to, r->hash_algo);
+ m = m->base_midx)
+ keep_hashes_nr++;
+
+ keep_hashes_nr++; /* include the compacted layer */
+ } else {
+ keep_hashes_nr = ctx.num_multi_pack_indexes_before + 1;
+ }
+ CALLOC_ARRAY(keep_hashes, keep_hashes_nr);
if (ctx.incremental) {
FILE *chainf = fdopen_lock_file(&lk, "w");
@@ -1504,7 +1736,7 @@ static int write_midx_internal(struct odb_source *source,
if (link_midx_to_chain(ctx.base_midx) < 0)
goto cleanup;
- get_split_midx_filename_ext(source, &final_midx_name,
+ get_split_midx_filename_ext(opts->source, &final_midx_name,
midx_hash, MIDX_EXT_MIDX);
if (rename_tempfile(&incr, final_midx_name.buf) < 0) {
@@ -1514,18 +1746,47 @@ static int write_midx_internal(struct odb_source *source,
strbuf_release(&final_midx_name);
- keep_hashes[ctx.num_multi_pack_indexes_before] =
- xstrdup(hash_to_hex_algop(midx_hash, r->hash_algo));
+ if (ctx.compact) {
+ struct multi_pack_index *m;
+ uint32_t num_layers_before_from = 0;
+ uint32_t i;
- for (uint32_t i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
- uint32_t j = ctx.num_multi_pack_indexes_before - i - 1;
+ for (m = ctx.base_midx; m; m = m->base_midx)
+ num_layers_before_from++;
- keep_hashes[j] = xstrdup(hash_to_hex_algop(get_midx_checksum(m),
+ m = ctx.base_midx;
+ for (i = 0; i < num_layers_before_from; i++) {
+ uint32_t j = num_layers_before_from - i - 1;
+
+ keep_hashes[j] = xstrdup(midx_get_checksum_hex(m));
+ m = m->base_midx;
+ }
+
+ keep_hashes[i] = xstrdup(hash_to_hex_algop(midx_hash,
r->hash_algo));
- m = m->base_midx;
+
+ i = 0;
+ for (m = ctx.m;
+ m && midx_hashcmp(m, ctx.compact_to, r->hash_algo);
+ m = m->base_midx) {
+ keep_hashes[keep_hashes_nr - i - 1] =
+ xstrdup(midx_get_checksum_hex(m));
+ i++;
+ }
+ } else {
+ keep_hashes[ctx.num_multi_pack_indexes_before] =
+ xstrdup(hash_to_hex_algop(midx_hash,
+ r->hash_algo));
+
+ for (uint32_t i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
+ uint32_t j = ctx.num_multi_pack_indexes_before - i - 1;
+
+ keep_hashes[j] = xstrdup(midx_get_checksum_hex(m));
+ m = m->base_midx;
+ }
}
- for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
+ for (uint32_t i = 0; i < keep_hashes_nr; i++)
fprintf(get_lock_file_fp(&lk), "%s\n", keep_hashes[i]);
} else {
keep_hashes[ctx.num_multi_pack_indexes_before] =
@@ -1538,8 +1799,7 @@ static int write_midx_internal(struct odb_source *source,
if (commit_lock_file(&lk) < 0)
die_errno(_("could not write multi-pack-index"));
- clear_midx_files(source, keep_hashes,
- ctx.num_multi_pack_indexes_before + 1,
+ clear_midx_files(opts->source, keep_hashes, keep_hashes_nr,
ctx.incremental);
result = 0;
@@ -1557,7 +1817,7 @@ cleanup:
free(ctx.pack_perm);
free(ctx.pack_order);
if (keep_hashes) {
- for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
+ for (uint32_t i = 0; i < keep_hashes_nr; i++)
free((char *)keep_hashes[i]);
free(keep_hashes);
}
@@ -1573,9 +1833,14 @@ int write_midx_file(struct odb_source *source,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags)
{
- return write_midx_internal(source, NULL, NULL,
- preferred_pack_name, refs_snapshot,
- flags);
+ struct write_midx_opts opts = {
+ .source = source,
+ .preferred_pack_name = preferred_pack_name,
+ .refs_snapshot = refs_snapshot,
+ .flags = flags,
+ };
+
+ return write_midx_internal(&opts);
}
int write_midx_file_only(struct odb_source *source,
@@ -1583,8 +1848,30 @@ int write_midx_file_only(struct odb_source *source,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags)
{
- return write_midx_internal(source, packs_to_include, NULL,
- preferred_pack_name, refs_snapshot, flags);
+ struct write_midx_opts opts = {
+ .source = source,
+ .packs_to_include = packs_to_include,
+ .preferred_pack_name = preferred_pack_name,
+ .refs_snapshot = refs_snapshot,
+ .flags = flags,
+ };
+
+ return write_midx_internal(&opts);
+}
+
+int write_midx_file_compact(struct odb_source *source,
+ struct multi_pack_index *from,
+ struct multi_pack_index *to,
+ unsigned flags)
+{
+ struct write_midx_opts opts = {
+ .source = source,
+ .compact_from = from,
+ .compact_to = to,
+ .flags = flags | MIDX_WRITE_COMPACT,
+ };
+
+ return write_midx_internal(&opts);
}
int expire_midx_packs(struct odb_source *source, unsigned flags)
@@ -1643,9 +1930,14 @@ int expire_midx_packs(struct odb_source *source, unsigned flags)
free(count);
- if (packs_to_drop.nr)
- result = write_midx_internal(source, NULL,
- &packs_to_drop, NULL, NULL, flags);
+ if (packs_to_drop.nr) {
+ struct write_midx_opts opts = {
+ .source = source,
+ .packs_to_drop = &packs_to_drop,
+ .flags = flags & MIDX_PROGRESS,
+ };
+ result = write_midx_internal(&opts);
+ }
string_list_clear(&packs_to_drop, 0);
@@ -1778,6 +2070,10 @@ int midx_repack(struct odb_source *source, size_t batch_size, unsigned flags)
struct child_process cmd = CHILD_PROCESS_INIT;
FILE *cmd_in;
struct multi_pack_index *m = get_multi_pack_index(source);
+ struct write_midx_opts opts = {
+ .source = source,
+ .flags = flags,
+ };
/*
* When updating the default for these configuration
@@ -1852,8 +2148,7 @@ int midx_repack(struct odb_source *source, size_t batch_size, unsigned flags)
goto cleanup;
}
- result = write_midx_internal(source, NULL, NULL, NULL, NULL,
- flags);
+ result = write_midx_internal(&opts);
cleanup:
free(include_pack);
diff --git a/midx.c b/midx.c
index ab8e2611d1..81d6ab11e6 100644
--- a/midx.c
+++ b/midx.c
@@ -24,7 +24,13 @@ void clear_incremental_midx_files_ext(struct odb_source *source, const char *ext
int cmp_idx_or_pack_name(const char *idx_or_pack_name,
const char *idx_name);
-const unsigned char *get_midx_checksum(struct multi_pack_index *m)
+const char *midx_get_checksum_hex(const struct multi_pack_index *m)
+{
+ return hash_to_hex_algop(midx_get_checksum_hash(m),
+ m->source->odb->repo->hash_algo);
+}
+
+const unsigned char *midx_get_checksum_hash(const struct multi_pack_index *m)
{
return m->data + m->data_len - m->source->odb->repo->hash_algo->rawsz;
}
@@ -144,7 +150,7 @@ static struct multi_pack_index *load_multi_pack_index_one(struct odb_source *sou
m->signature, MIDX_SIGNATURE);
m->version = m->data[MIDX_BYTE_FILE_VERSION];
- if (m->version != MIDX_VERSION)
+ if (m->version != MIDX_VERSION_V1 && m->version != MIDX_VERSION_V2)
die(_("multi-pack-index version %d not recognized"),
m->version);
@@ -205,7 +211,8 @@ static struct multi_pack_index *load_multi_pack_index_one(struct odb_source *sou
die(_("multi-pack-index pack-name chunk is too short"));
cur_pack_name = end + 1;
- if (i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0)
+ if (m->version == MIDX_VERSION_V1 &&
+ i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0)
die(_("multi-pack-index pack names out of order: '%s' before '%s'"),
m->pack_names[i - 1],
m->pack_names[i]);
@@ -406,6 +413,7 @@ void close_midx(struct multi_pack_index *m)
}
FREE_AND_NULL(m->packs);
FREE_AND_NULL(m->pack_names);
+ FREE_AND_NULL(m->pack_names_sorted);
free(m);
}
@@ -651,17 +659,40 @@ int cmp_idx_or_pack_name(const char *idx_or_pack_name,
return strcmp(idx_or_pack_name, idx_name);
}
+
+static int midx_pack_names_cmp(const void *a, const void *b, void *m_)
+{
+ struct multi_pack_index *m = m_;
+ return strcmp(m->pack_names[*(const size_t *)a],
+ m->pack_names[*(const size_t *)b]);
+}
+
static int midx_contains_pack_1(struct multi_pack_index *m,
const char *idx_or_pack_name)
{
uint32_t first = 0, last = m->num_packs;
+ if (m->version == MIDX_VERSION_V2 && !m->pack_names_sorted) {
+ uint32_t i;
+
+ ALLOC_ARRAY(m->pack_names_sorted, m->num_packs);
+
+ for (i = 0; i < m->num_packs; i++)
+ m->pack_names_sorted[i] = i;
+
+ QSORT_S(m->pack_names_sorted, m->num_packs, midx_pack_names_cmp,
+ m);
+ }
+
while (first < last) {
uint32_t mid = first + (last - first) / 2;
const char *current;
int cmp;
- current = m->pack_names[mid];
+ if (m->pack_names_sorted)
+ current = m->pack_names[m->pack_names_sorted[mid]];
+ else
+ current = m->pack_names[mid];
cmp = cmp_idx_or_pack_name(idx_or_pack_name, current);
if (!cmp)
return 1;
diff --git a/midx.h b/midx.h
index 6e54d73503..08f3728e52 100644
--- a/midx.h
+++ b/midx.h
@@ -11,7 +11,8 @@ struct git_hash_algo;
struct odb_source;
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
-#define MIDX_VERSION 1
+#define MIDX_VERSION_V1 1
+#define MIDX_VERSION_V2 2
#define MIDX_BYTE_FILE_VERSION 4
#define MIDX_BYTE_HASH_VERSION 5
#define MIDX_BYTE_NUM_CHUNKS 6
@@ -71,6 +72,7 @@ struct multi_pack_index {
uint32_t num_packs_in_base;
const char **pack_names;
+ size_t *pack_names_sorted;
struct packed_git **packs;
};
@@ -80,12 +82,14 @@ struct multi_pack_index {
#define MIDX_WRITE_BITMAP_HASH_CACHE (1 << 3)
#define MIDX_WRITE_BITMAP_LOOKUP_TABLE (1 << 4)
#define MIDX_WRITE_INCREMENTAL (1 << 5)
+#define MIDX_WRITE_COMPACT (1 << 6)
#define MIDX_EXT_REV "rev"
#define MIDX_EXT_BITMAP "bitmap"
#define MIDX_EXT_MIDX "midx"
-const unsigned char *get_midx_checksum(struct multi_pack_index *m);
+const char *midx_get_checksum_hex(const struct multi_pack_index *m) /* static buffer */;
+const unsigned char *midx_get_checksum_hash(const struct multi_pack_index *m);
void get_midx_filename(struct odb_source *source, struct strbuf *out);
void get_midx_filename_ext(struct odb_source *source, struct strbuf *out,
const unsigned char *hash, const char *ext);
@@ -128,6 +132,10 @@ int write_midx_file_only(struct odb_source *source,
struct string_list *packs_to_include,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags);
+int write_midx_file_compact(struct odb_source *source,
+ struct multi_pack_index *from,
+ struct multi_pack_index *to,
+ unsigned flags);
void clear_midx_file(struct repository *r);
int verify_midx_file(struct odb_source *source, unsigned flags);
int expire_midx_packs(struct odb_source *source, unsigned flags);
diff --git a/negotiator/default.c b/negotiator/default.c
index 116dedcf83..3cac0476a7 100644
--- a/negotiator/default.c
+++ b/negotiator/default.c
@@ -57,19 +57,19 @@ static int clear_marks(const struct reference *ref, void *cb_data UNUSED)
static void mark_common(struct negotiation_state *ns, struct commit *commit,
int ancestors_only, int dont_parse)
{
- struct prio_queue queue = { NULL };
+ struct commit_stack stack = COMMIT_STACK_INIT;
if (!commit || (commit->object.flags & COMMON))
return;
- prio_queue_put(&queue, commit);
+ commit_stack_push(&stack, commit);
if (!ancestors_only) {
commit->object.flags |= COMMON;
if ((commit->object.flags & SEEN) && !(commit->object.flags & POPPED))
ns->non_common_revs--;
}
- while ((commit = prio_queue_get(&queue))) {
+ while ((commit = commit_stack_pop(&stack))) {
struct object *o = (struct object *)commit;
if (!(o->flags & SEEN))
@@ -94,12 +94,12 @@ static void mark_common(struct negotiation_state *ns, struct commit *commit,
if ((p->object.flags & SEEN) && !(p->object.flags & POPPED))
ns->non_common_revs--;
- prio_queue_put(&queue, parents->item);
+ commit_stack_push(&stack, parents->item);
}
}
}
- clear_prio_queue(&queue);
+ commit_stack_clear(&stack);
}
/*
diff --git a/negotiator/skipping.c b/negotiator/skipping.c
index 0a272130fb..fe4126ca4d 100644
--- a/negotiator/skipping.c
+++ b/negotiator/skipping.c
@@ -91,15 +91,15 @@ static int clear_marks(const struct reference *ref, void *cb_data UNUSED)
*/
static void mark_common(struct data *data, struct commit *seen_commit)
{
- struct prio_queue queue = { NULL };
+ struct commit_stack stack = COMMIT_STACK_INIT;
struct commit *c;
if (seen_commit->object.flags & COMMON)
return;
- prio_queue_put(&queue, seen_commit);
+ commit_stack_push(&stack, seen_commit);
seen_commit->object.flags |= COMMON;
- while ((c = prio_queue_get(&queue))) {
+ while ((c = commit_stack_pop(&stack))) {
struct commit_list *p;
if (!(c->object.flags & POPPED))
@@ -113,11 +113,11 @@ static void mark_common(struct data *data, struct commit *seen_commit)
continue;
p->item->object.flags |= COMMON;
- prio_queue_put(&queue, p->item);
+ commit_stack_push(&stack, p->item);
}
}
- clear_prio_queue(&queue);
+ commit_stack_clear(&stack);
}
/*
diff --git a/notes.c b/notes.c
index 51a7ef9f83..8f315e2a00 100644
--- a/notes.c
+++ b/notes.c
@@ -796,7 +796,7 @@ static int prune_notes_helper(const struct object_id *object_oid,
struct note_delete_list *n;
if (odb_has_object(the_repository->objects, object_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return 0; /* nothing to do for this note */
/* failed to find object => prune this note */
diff --git a/object-file.c b/object-file.c
index c62e5496e0..c353176206 100644
--- a/object-file.c
+++ b/object-file.c
@@ -33,6 +33,9 @@
/* The maximum size for an object header. */
#define MAX_HEADER_LEN 32
+static struct oidtree *odb_source_loose_cache(struct odb_source *source,
+ const struct object_id *oid);
+
static int get_conv_flags(unsigned flags)
{
if (flags & INDEX_RENORMALIZE)
@@ -906,7 +909,7 @@ static int start_loose_object_common(struct odb_source *source,
fd = create_tmpfile(source->odb->repo, tmp_file, filename);
if (fd < 0) {
- if (flags & WRITE_OBJECT_SILENT)
+ if (flags & ODB_WRITE_OBJECT_SILENT)
return -1;
else if (errno == EACCES)
return error(_("insufficient permission for adding "
@@ -1039,7 +1042,7 @@ static int write_loose_object(struct odb_source *source,
utb.actime = mtime;
utb.modtime = mtime;
if (utime(tmp_file.buf, &utb) < 0 &&
- !(flags & WRITE_OBJECT_SILENT))
+ !(flags & ODB_WRITE_OBJECT_SILENT))
warning_errno(_("failed utime() on %s"), tmp_file.buf);
}
@@ -1166,7 +1169,8 @@ cleanup:
int odb_source_loose_write_object(struct odb_source *source,
const void *buf, unsigned long len,
enum object_type type, struct object_id *oid,
- struct object_id *compat_oid_in, unsigned flags)
+ struct object_id *compat_oid_in,
+ enum odb_write_object_flags flags)
{
const struct git_hash_algo *algo = source->odb->repo->hash_algo;
const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
@@ -1279,8 +1283,9 @@ static int index_mem(struct index_state *istate,
}
}
if (flags & INDEX_FORMAT_CHECK) {
- struct fsck_options opts = FSCK_OPTIONS_DEFAULT;
+ struct fsck_options opts;
+ fsck_options_init(&opts, the_repository, FSCK_OPTIONS_DEFAULT);
opts.strict = 1;
opts.error_func = hash_format_check_report;
if (fsck_buffer(null_oid(istate->repo->hash_algo), type, buf, size, &opts))
@@ -1374,7 +1379,7 @@ static int already_written(struct odb_transaction_files *transaction,
{
/* The object may already exist in the repository */
if (odb_has_object(transaction->base.source->odb, oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return 1;
/* Might want to keep the list sorted */
@@ -1845,11 +1850,28 @@ static int for_each_object_wrapper_cb(const struct object_id *oid,
}
}
+static int for_each_prefixed_object_wrapper_cb(const struct object_id *oid,
+ void *cb_data)
+{
+ struct for_each_object_wrapper_data *data = cb_data;
+ if (data->request) {
+ struct object_info oi = *data->request;
+
+ if (odb_source_loose_read_object_info(data->source,
+ oid, &oi, 0) < 0)
+ return -1;
+
+ return data->cb(oid, &oi, data->cb_data);
+ } else {
+ return data->cb(oid, NULL, data->cb_data);
+ }
+}
+
int odb_source_loose_for_each_object(struct odb_source *source,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags)
+ const struct odb_for_each_object_options *opts)
{
struct for_each_object_wrapper_data data = {
.source = source,
@@ -1859,15 +1881,117 @@ int odb_source_loose_for_each_object(struct odb_source *source,
};
/* There are no loose promisor objects, so we can return immediately. */
- if ((flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY))
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY))
return 0;
- if ((flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY) && !source->local)
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY) && !source->local)
return 0;
+ if (opts->prefix)
+ return oidtree_each(odb_source_loose_cache(source, opts->prefix),
+ opts->prefix, opts->prefix_hex_len,
+ for_each_prefixed_object_wrapper_cb, &data);
+
return for_each_loose_file_in_source(source, for_each_object_wrapper_cb,
NULL, NULL, &data);
}
+static int count_loose_object(const struct object_id *oid UNUSED,
+ struct object_info *oi UNUSED,
+ void *payload)
+{
+ unsigned long *count = payload;
+ (*count)++;
+ return 0;
+}
+
+int odb_source_loose_count_objects(struct odb_source *source,
+ enum odb_count_objects_flags flags,
+ unsigned long *out)
+{
+ const unsigned hexsz = source->odb->repo->hash_algo->hexsz - 2;
+ char *path = NULL;
+ DIR *dir = NULL;
+ int ret;
+
+ if (flags & ODB_COUNT_OBJECTS_APPROXIMATE) {
+ unsigned long count = 0;
+ struct dirent *ent;
+
+ path = xstrfmt("%s/17", source->path);
+
+ dir = opendir(path);
+ if (!dir) {
+ if (errno == ENOENT) {
+ *out = 0;
+ ret = 0;
+ goto out;
+ }
+
+ ret = error_errno("cannot open object shard '%s'", path);
+ goto out;
+ }
+
+ while ((ent = readdir(dir)) != NULL) {
+ if (strspn(ent->d_name, "0123456789abcdef") != hexsz ||
+ ent->d_name[hexsz] != '\0')
+ continue;
+ count++;
+ }
+
+ *out = count * 256;
+ ret = 0;
+ } else {
+ struct odb_for_each_object_options opts = { 0 };
+ *out = 0;
+ ret = odb_source_loose_for_each_object(source, NULL, count_loose_object,
+ out, &opts);
+ }
+
+out:
+ if (dir)
+ closedir(dir);
+ free(path);
+ return ret;
+}
+
+struct find_abbrev_len_data {
+ const struct object_id *oid;
+ unsigned len;
+};
+
+static int find_abbrev_len_cb(const struct object_id *oid,
+ struct object_info *oi UNUSED,
+ void *cb_data)
+{
+ struct find_abbrev_len_data *data = cb_data;
+ unsigned len = oid_common_prefix_hexlen(oid, data->oid);
+ if (len != hash_algos[oid->algo].hexsz && len >= data->len)
+ data->len = len + 1;
+ return 0;
+}
+
+int odb_source_loose_find_abbrev_len(struct odb_source *source,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
+{
+ struct odb_for_each_object_options opts = {
+ .prefix = oid,
+ .prefix_hex_len = min_len,
+ };
+ struct find_abbrev_len_data data = {
+ .oid = oid,
+ .len = min_len,
+ };
+ int ret;
+
+ ret = odb_source_loose_for_each_object(source, NULL, find_abbrev_len_cb,
+ &data, &opts);
+ *out = data.len;
+
+ return ret;
+}
+
static int append_loose_object(const struct object_id *oid,
const char *path UNUSED,
void *data)
@@ -1876,8 +2000,8 @@ static int append_loose_object(const struct object_id *oid,
return 0;
}
-struct oidtree *odb_source_loose_cache(struct odb_source *source,
- const struct object_id *oid)
+static struct oidtree *odb_source_loose_cache(struct odb_source *source,
+ const struct object_id *oid)
{
struct odb_source_files *files = odb_source_files_downcast(source);
int subdir_nr = oid->hash[0];
diff --git a/object-file.h b/object-file.h
index ff6da65296..5241b8dd5c 100644
--- a/object-file.h
+++ b/object-file.h
@@ -68,20 +68,14 @@ int odb_source_loose_freshen_object(struct odb_source *source,
int odb_source_loose_write_object(struct odb_source *source,
const void *buf, unsigned long len,
enum object_type type, struct object_id *oid,
- struct object_id *compat_oid_in, unsigned flags);
+ struct object_id *compat_oid_in,
+ enum odb_write_object_flags flags);
int odb_source_loose_write_stream(struct odb_source *source,
struct odb_write_stream *stream, size_t len,
struct object_id *oid);
/*
- * Populate and return the loose object cache array corresponding to the
- * given object ID.
- */
-struct oidtree *odb_source_loose_cache(struct odb_source *source,
- const struct object_id *oid);
-
-/*
* Put in `buf` the name of the file in the local object database that
* would be used to store a loose object with the specified oid.
*/
@@ -137,7 +131,33 @@ int odb_source_loose_for_each_object(struct odb_source *source,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags);
+ const struct odb_for_each_object_options *opts);
+
+/*
+ * Count the number of loose objects in this source.
+ *
+ * The object count is approximated by opening a single sharding directory for
+ * loose objects and scanning its contents. The result is then extrapolated by
+ * 256. This should generally work as a reasonable estimate given that the
+ * object hash is supposed to be indistinguishable from random.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int odb_source_loose_count_objects(struct odb_source *source,
+ enum odb_count_objects_flags flags,
+ unsigned long *out);
+
+/*
+ * Find the shortest unique prefix for the given object ID, where `min_len` is
+ * the minimum length that the prefix should have.
+ *
+ * Returns 0 on success, in which case the computed length will be written to
+ * `out`. Otherwise, a negative error code is returned.
+ */
+int odb_source_loose_find_abbrev_len(struct odb_source *source,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out);
/**
* format_object_header() is a thin wrapper around s xsnprintf() that
diff --git a/object-name.c b/object-name.c
index 7b14c3bf9b..21dcdc4a0e 100644
--- a/object-name.c
+++ b/object-name.c
@@ -15,11 +15,9 @@
#include "refs.h"
#include "remote.h"
#include "dir.h"
+#include "odb.h"
#include "oid-array.h"
-#include "oidtree.h"
-#include "packfile.h"
#include "pretty.h"
-#include "object-file.h"
#include "read-cache-ll.h"
#include "repo-settings.h"
#include "repository.h"
@@ -49,30 +47,29 @@ struct disambiguate_state {
unsigned candidate_ok:1;
unsigned disambiguate_fn_used:1;
unsigned ambiguous:1;
- unsigned always_call_fn:1;
};
-static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
+static int update_disambiguate_state(const struct object_id *current,
+ struct object_info *oi UNUSED,
+ void *cb_data)
{
+ struct disambiguate_state *ds = cb_data;
+
/* The hash algorithm of current has already been filtered */
- if (ds->always_call_fn) {
- ds->ambiguous = ds->fn(ds->repo, current, ds->cb_data) ? 1 : 0;
- return;
- }
if (!ds->candidate_exists) {
/* this is the first candidate */
oidcpy(&ds->candidate, current);
ds->candidate_exists = 1;
- return;
+ return 0;
} else if (oideq(&ds->candidate, current)) {
/* the same as what we already have seen */
- return;
+ return 0;
}
if (!ds->fn) {
/* cannot disambiguate between ds->candidate and current */
ds->ambiguous = 1;
- return;
+ return ds->ambiguous;
}
if (!ds->candidate_checked) {
@@ -85,7 +82,7 @@ static void update_candidates(struct disambiguate_state *ds, const struct object
/* discard the candidate; we know it does not satisfy fn */
oidcpy(&ds->candidate, current);
ds->candidate_checked = 0;
- return;
+ return 0;
}
/* if we reach this point, we know ds->candidate satisfies fn */
@@ -96,128 +93,12 @@ static void update_candidates(struct disambiguate_state *ds, const struct object
*/
ds->candidate_ok = 0;
ds->ambiguous = 1;
+ return ds->ambiguous;
}
/* otherwise, current can be discarded and candidate is still good */
-}
-
-static int match_hash(unsigned, const unsigned char *, const unsigned char *);
-
-static enum cb_next match_prefix(const struct object_id *oid, void *arg)
-{
- struct disambiguate_state *ds = arg;
- /* no need to call match_hash, oidtree_each did prefix match */
- update_candidates(ds, oid);
- return ds->ambiguous ? CB_BREAK : CB_CONTINUE;
-}
-
-static void find_short_object_filename(struct disambiguate_state *ds)
-{
- struct odb_source *source;
-
- for (source = ds->repo->objects->sources; source && !ds->ambiguous; source = source->next)
- oidtree_each(odb_source_loose_cache(source, &ds->bin_pfx),
- &ds->bin_pfx, ds->len, match_prefix, ds);
-}
-
-static int match_hash(unsigned len, const unsigned char *a, const unsigned char *b)
-{
- do {
- if (*a != *b)
- return 0;
- a++;
- b++;
- len -= 2;
- } while (len > 1);
- if (len)
- if ((*a ^ *b) & 0xf0)
- return 0;
- return 1;
-}
-
-static void unique_in_midx(struct multi_pack_index *m,
- struct disambiguate_state *ds)
-{
- for (; m; m = m->base_midx) {
- uint32_t num, i, first = 0;
- const struct object_id *current = NULL;
- int len = ds->len > ds->repo->hash_algo->hexsz ?
- ds->repo->hash_algo->hexsz : ds->len;
-
- if (!m->num_objects)
- continue;
-
- num = m->num_objects + m->num_objects_in_base;
-
- bsearch_one_midx(&ds->bin_pfx, m, &first);
-
- /*
- * At this point, "first" is the location of the lowest
- * object with an object name that could match
- * "bin_pfx". See if we have 0, 1 or more objects that
- * actually match(es).
- */
- for (i = first; i < num && !ds->ambiguous; i++) {
- struct object_id oid;
- current = nth_midxed_object_oid(&oid, m, i);
- if (!match_hash(len, ds->bin_pfx.hash, current->hash))
- break;
- update_candidates(ds, current);
- }
- }
-}
-
-static void unique_in_pack(struct packed_git *p,
- struct disambiguate_state *ds)
-{
- uint32_t num, i, first = 0;
- int len = ds->len > ds->repo->hash_algo->hexsz ?
- ds->repo->hash_algo->hexsz : ds->len;
-
- if (p->multi_pack_index)
- return;
-
- if (open_pack_index(p) || !p->num_objects)
- return;
-
- num = p->num_objects;
- bsearch_pack(&ds->bin_pfx, p, &first);
-
- /*
- * At this point, "first" is the location of the lowest object
- * with an object name that could match "bin_pfx". See if we have
- * 0, 1 or more objects that actually match(es).
- */
- for (i = first; i < num && !ds->ambiguous; i++) {
- struct object_id oid;
- nth_packed_object_id(&oid, p, i);
- if (!match_hash(len, ds->bin_pfx.hash, oid.hash))
- break;
- update_candidates(ds, &oid);
- }
-}
-
-static void find_short_packed_object(struct disambiguate_state *ds)
-{
- struct odb_source *source;
- struct packed_git *p;
-
- /* Skip, unless oids from the storage hash algorithm are wanted */
- if (ds->bin_pfx.algo && (&hash_algos[ds->bin_pfx.algo] != ds->repo->hash_algo))
- return;
-
- odb_prepare_alternates(ds->repo->objects);
- for (source = ds->repo->objects->sources; source && !ds->ambiguous; source = source->next) {
- struct multi_pack_index *m = get_multi_pack_index(source);
- if (m)
- unique_in_midx(m, ds);
- }
- repo_for_each_pack(ds->repo, p) {
- if (ds->ambiguous)
- break;
- unique_in_pack(p, ds);
- }
+ return 0;
}
static int finish_object_disambiguation(struct disambiguate_state *ds,
@@ -348,41 +229,57 @@ int set_disambiguate_hint_config(const char *var, const char *value)
return error("unknown hint type for '%s': %s", var, value);
}
+static int parse_oid_prefix(const char *name, int len,
+ const struct git_hash_algo *algo,
+ char *hex_out,
+ struct object_id *oid_out)
+{
+ for (int i = 0; i < len; i++) {
+ unsigned char c = name[i];
+ unsigned char val;
+ if (c >= '0' && c <= '9') {
+ val = c - '0';
+ } else if (c >= 'a' && c <= 'f') {
+ val = c - 'a' + 10;
+ } else if (c >= 'A' && c <='F') {
+ val = c - 'A' + 10;
+ c -= 'A' - 'a';
+ } else {
+ return -1;
+ }
+
+ if (hex_out)
+ hex_out[i] = c;
+ if (oid_out) {
+ if (!(i & 1))
+ val <<= 4;
+ oid_out->hash[i >> 1] |= val;
+ }
+ }
+
+ if (hex_out)
+ hex_out[len] = '\0';
+ if (oid_out)
+ oid_out->algo = algo ? hash_algo_by_ptr(algo) : GIT_HASH_UNKNOWN;
+
+ return 0;
+}
+
static int init_object_disambiguation(struct repository *r,
const char *name, int len,
const struct git_hash_algo *algo,
struct disambiguate_state *ds)
{
- int i;
-
if (len < MINIMUM_ABBREV || len > GIT_MAX_HEXSZ)
return -1;
memset(ds, 0, sizeof(*ds));
- for (i = 0; i < len ;i++) {
- unsigned char c = name[i];
- unsigned char val;
- if (c >= '0' && c <= '9')
- val = c - '0';
- else if (c >= 'a' && c <= 'f')
- val = c - 'a' + 10;
- else if (c >= 'A' && c <='F') {
- val = c - 'A' + 10;
- c -= 'A' - 'a';
- }
- else
- return -1;
- ds->hex_pfx[i] = c;
- if (!(i & 1))
- val <<= 4;
- ds->bin_pfx.hash[i >> 1] |= val;
- }
+ if (parse_oid_prefix(name, len, algo, ds->hex_pfx, &ds->bin_pfx) < 0)
+ return -1;
ds->len = len;
- ds->hex_pfx[len] = '\0';
ds->repo = r;
- ds->bin_pfx.algo = algo ? hash_algo_by_ptr(algo) : GIT_HASH_UNKNOWN;
odb_prepare_alternates(r->objects);
return 0;
}
@@ -510,8 +407,8 @@ static int collect_ambiguous(const struct object_id *oid, void *data)
return 0;
}
-static int repo_collect_ambiguous(struct repository *r UNUSED,
- const struct object_id *oid,
+static int repo_collect_ambiguous(const struct object_id *oid,
+ struct object_info *oi UNUSED,
void *data)
{
return collect_ambiguous(oid, data);
@@ -561,6 +458,7 @@ static enum get_oid_result get_short_oid(struct repository *r,
struct object_id *oid,
unsigned flags)
{
+ struct odb_for_each_object_options opts = { 0 };
int status;
struct disambiguate_state ds;
int quietly = !!(flags & GET_OID_QUIETLY);
@@ -588,8 +486,11 @@ static enum get_oid_result get_short_oid(struct repository *r,
else
ds.fn = default_disambiguate_hint;
- find_short_object_filename(&ds);
- find_short_packed_object(&ds);
+ opts.prefix = &ds.bin_pfx;
+ opts.prefix_hex_len = ds.len;
+
+ odb_for_each_object_ext(r->objects, NULL, update_disambiguate_state,
+ &ds, &opts);
status = finish_object_disambiguation(&ds, oid);
/*
@@ -599,8 +500,8 @@ static enum get_oid_result get_short_oid(struct repository *r,
*/
if (status == MISSING_OBJECT) {
odb_reprepare(r->objects);
- find_short_object_filename(&ds);
- find_short_packed_object(&ds);
+ odb_for_each_object_ext(r->objects, NULL, update_disambiguate_state,
+ &ds, &opts);
status = finish_object_disambiguation(&ds, oid);
}
@@ -648,169 +549,25 @@ int repo_for_each_abbrev(struct repository *r, const char *prefix,
const struct git_hash_algo *algo,
each_abbrev_fn fn, void *cb_data)
{
+ struct object_id prefix_oid = { 0 };
+ struct odb_for_each_object_options opts = {
+ .prefix = &prefix_oid,
+ .prefix_hex_len = strlen(prefix),
+ };
struct oid_array collect = OID_ARRAY_INIT;
- struct disambiguate_state ds;
int ret;
- if (init_object_disambiguation(r, prefix, strlen(prefix), algo, &ds) < 0)
+ if (parse_oid_prefix(prefix, opts.prefix_hex_len, algo, NULL, &prefix_oid) < 0)
return -1;
- ds.always_call_fn = 1;
- ds.fn = repo_collect_ambiguous;
- ds.cb_data = &collect;
- find_short_object_filename(&ds);
- find_short_packed_object(&ds);
+ if (odb_for_each_object_ext(r->objects, NULL, repo_collect_ambiguous, &collect, &opts) < 0)
+ return -1;
ret = oid_array_for_each_unique(&collect, fn, cb_data);
oid_array_clear(&collect);
return ret;
}
-/*
- * Return the slot of the most-significant bit set in "val". There are various
- * ways to do this quickly with fls() or __builtin_clzl(), but speed is
- * probably not a big deal here.
- */
-static unsigned msb(unsigned long val)
-{
- unsigned r = 0;
- while (val >>= 1)
- r++;
- return r;
-}
-
-struct min_abbrev_data {
- unsigned int init_len;
- unsigned int cur_len;
- char *hex;
- struct repository *repo;
- const struct object_id *oid;
-};
-
-static inline char get_hex_char_from_oid(const struct object_id *oid,
- unsigned int pos)
-{
- static const char hex[] = "0123456789abcdef";
-
- if ((pos & 1) == 0)
- return hex[oid->hash[pos >> 1] >> 4];
- else
- return hex[oid->hash[pos >> 1] & 0xf];
-}
-
-static int extend_abbrev_len(const struct object_id *oid,
- struct min_abbrev_data *mad)
-{
- unsigned int i = mad->init_len;
- while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
- i++;
-
- if (mad->hex[i] && i >= mad->cur_len)
- mad->cur_len = i + 1;
-
- return 0;
-}
-
-static int repo_extend_abbrev_len(struct repository *r UNUSED,
- const struct object_id *oid,
- void *cb_data)
-{
- return extend_abbrev_len(oid, cb_data);
-}
-
-static void find_abbrev_len_for_midx(struct multi_pack_index *m,
- struct min_abbrev_data *mad)
-{
- for (; m; m = m->base_midx) {
- int match = 0;
- uint32_t num, first = 0;
- struct object_id oid;
- const struct object_id *mad_oid;
-
- if (!m->num_objects)
- continue;
-
- num = m->num_objects + m->num_objects_in_base;
- mad_oid = mad->oid;
- match = bsearch_one_midx(mad_oid, m, &first);
-
- /*
- * first is now the position in the packfile where we
- * would insert mad->hash if it does not exist (or the
- * position of mad->hash if it does exist). Hence, we
- * consider a maximum of two objects nearby for the
- * abbreviation length.
- */
- mad->init_len = 0;
- if (!match) {
- if (nth_midxed_object_oid(&oid, m, first))
- extend_abbrev_len(&oid, mad);
- } else if (first < num - 1) {
- if (nth_midxed_object_oid(&oid, m, first + 1))
- extend_abbrev_len(&oid, mad);
- }
- if (first > 0) {
- if (nth_midxed_object_oid(&oid, m, first - 1))
- extend_abbrev_len(&oid, mad);
- }
- mad->init_len = mad->cur_len;
- }
-}
-
-static void find_abbrev_len_for_pack(struct packed_git *p,
- struct min_abbrev_data *mad)
-{
- int match = 0;
- uint32_t num, first = 0;
- struct object_id oid;
- const struct object_id *mad_oid;
-
- if (p->multi_pack_index)
- return;
-
- if (open_pack_index(p) || !p->num_objects)
- return;
-
- num = p->num_objects;
- mad_oid = mad->oid;
- match = bsearch_pack(mad_oid, p, &first);
-
- /*
- * first is now the position in the packfile where we would insert
- * mad->hash if it does not exist (or the position of mad->hash if
- * it does exist). Hence, we consider a maximum of two objects
- * nearby for the abbreviation length.
- */
- mad->init_len = 0;
- if (!match) {
- if (!nth_packed_object_id(&oid, p, first))
- extend_abbrev_len(&oid, mad);
- } else if (first < num - 1) {
- if (!nth_packed_object_id(&oid, p, first + 1))
- extend_abbrev_len(&oid, mad);
- }
- if (first > 0) {
- if (!nth_packed_object_id(&oid, p, first - 1))
- extend_abbrev_len(&oid, mad);
- }
- mad->init_len = mad->cur_len;
-}
-
-static void find_abbrev_len_packed(struct min_abbrev_data *mad)
-{
- struct packed_git *p;
-
- odb_prepare_alternates(mad->repo->objects);
- for (struct odb_source *source = mad->repo->objects->sources; source; source = source->next) {
- struct multi_pack_index *m = get_multi_pack_index(source);
- if (m)
- find_abbrev_len_for_midx(m, mad);
- }
-
- repo_for_each_pack(mad->repo, p)
- find_abbrev_len_for_pack(p, mad);
-}
-
void strbuf_repo_add_unique_abbrev(struct strbuf *sb, struct repository *repo,
const struct object_id *oid, int abbrev_len)
{
@@ -827,61 +584,19 @@ void strbuf_add_unique_abbrev(struct strbuf *sb, const struct object_id *oid,
}
int repo_find_unique_abbrev_r(struct repository *r, char *hex,
- const struct object_id *oid, int len)
+ const struct object_id *oid, int min_len)
{
const struct git_hash_algo *algo =
oid->algo ? &hash_algos[oid->algo] : r->hash_algo;
- struct disambiguate_state ds;
- struct min_abbrev_data mad;
- struct object_id oid_ret;
- const unsigned hexsz = algo->hexsz;
+ unsigned len;
- if (len < 0) {
- unsigned long count = repo_approximate_object_count(r);
- /*
- * Add one because the MSB only tells us the highest bit set,
- * not including the value of all the _other_ bits (so "15"
- * is only one off of 2^4, but the MSB is the 3rd bit.
- */
- len = msb(count) + 1;
- /*
- * We now know we have on the order of 2^len objects, which
- * expects a collision at 2^(len/2). But we also care about hex
- * chars, not bits, and there are 4 bits per hex. So all
- * together we need to divide by 2 and round up.
- */
- len = DIV_ROUND_UP(len, 2);
- /*
- * For very small repos, we stick with our regular fallback.
- */
- if (len < FALLBACK_DEFAULT_ABBREV)
- len = FALLBACK_DEFAULT_ABBREV;
- }
+ if (odb_find_abbrev_len(r->objects, oid, min_len, &len) < 0)
+ len = algo->hexsz;
oid_to_hex_r(hex, oid);
- if (len >= hexsz || !len)
- return hexsz;
-
- mad.repo = r;
- mad.init_len = len;
- mad.cur_len = len;
- mad.hex = hex;
- mad.oid = oid;
-
- find_abbrev_len_packed(&mad);
-
- if (init_object_disambiguation(r, hex, mad.cur_len, algo, &ds) < 0)
- return -1;
-
- ds.fn = repo_extend_abbrev_len;
- ds.always_call_fn = 1;
- ds.cb_data = (void *)&mad;
-
- find_short_object_filename(&ds);
- (void)finish_object_disambiguation(&ds, &oid_ret);
+ hex[len] = 0;
- hex[mad.cur_len] = 0;
- return mad.cur_len;
+ return len;
}
const char *repo_find_unique_abbrev(struct repository *r,
@@ -1660,7 +1375,8 @@ static int interpret_empty_at(const char *name, int namelen, int len, struct str
static int reinterpret(struct repository *r,
const char *name, int namelen, int len,
- struct strbuf *buf, unsigned allowed)
+ struct strbuf *buf,
+ enum interpret_branch_kind allowed)
{
/* we have extra data, which might need further processing */
struct strbuf tmp = STRBUF_INIT;
@@ -1692,7 +1408,8 @@ static void set_shortened_ref(struct repository *r, struct strbuf *buf, const ch
free(s);
}
-static int branch_interpret_allowed(const char *refname, unsigned allowed)
+static int branch_interpret_allowed(const char *refname,
+ enum interpret_branch_kind allowed)
{
if (!allowed)
return 1;
diff --git a/object-name.h b/object-name.h
index cda4934cd5..167a9154ea 100644
--- a/object-name.h
+++ b/object-name.h
@@ -101,9 +101,12 @@ int set_disambiguate_hint_config(const char *var, const char *value);
* If the input was ok but there are not N branch switches in the
* reflog, it returns 0.
*/
-#define INTERPRET_BRANCH_LOCAL (1<<0)
-#define INTERPRET_BRANCH_REMOTE (1<<1)
-#define INTERPRET_BRANCH_HEAD (1<<2)
+enum interpret_branch_kind {
+ INTERPRET_BRANCH_LOCAL = (1 << 0),
+ INTERPRET_BRANCH_REMOTE = (1 << 1),
+ INTERPRET_BRANCH_HEAD = (1 << 2),
+};
+
struct interpret_branch_name_options {
/*
* If "allowed" is non-zero, it is a treated as a bitfield of allowable
@@ -111,7 +114,7 @@ struct interpret_branch_name_options {
* ("refs/remotes/"), or "HEAD". If no "allowed" bits are set, any expansion is
* allowed, even ones to refs outside of those namespaces.
*/
- unsigned allowed;
+ enum interpret_branch_kind allowed;
/*
* If ^{upstream} or ^{push} (or equivalent) is requested, and the
diff --git a/odb.c b/odb.c
index 84a31084d3..9b28fe25ef 100644
--- a/odb.c
+++ b/odb.c
@@ -12,6 +12,7 @@
#include "midx.h"
#include "object-file-convert.h"
#include "object-file.h"
+#include "object-name.h"
#include "odb.h"
#include "packfile.h"
#include "path.h"
@@ -871,15 +872,15 @@ void *odb_read_object_peeled(struct object_database *odb,
}
int odb_has_object(struct object_database *odb, const struct object_id *oid,
- enum has_object_flags flags)
+ enum odb_has_object_flags flags)
{
unsigned object_info_flags = 0;
if (!startup_info->have_repository)
return 0;
- if (!(flags & HAS_OBJECT_RECHECK_PACKED))
+ if (!(flags & ODB_HAS_OBJECT_RECHECK_PACKED))
object_info_flags |= OBJECT_INFO_QUICK;
- if (!(flags & HAS_OBJECT_FETCH_PROMISOR))
+ if (!(flags & ODB_HAS_OBJECT_FETCH_PROMISOR))
object_info_flags |= OBJECT_INFO_SKIP_FETCH_OBJECT;
return odb_read_object_info_extended(odb, oid, NULL, object_info_flags) >= 0;
@@ -896,20 +897,20 @@ int odb_freshen_object(struct object_database *odb,
return 0;
}
-int odb_for_each_object(struct object_database *odb,
- const struct object_info *request,
- odb_for_each_object_cb cb,
- void *cb_data,
- unsigned flags)
+int odb_for_each_object_ext(struct object_database *odb,
+ const struct object_info *request,
+ odb_for_each_object_cb cb,
+ void *cb_data,
+ const struct odb_for_each_object_options *opts)
{
int ret;
odb_prepare_alternates(odb);
for (struct odb_source *source = odb->sources; source; source = source->next) {
- if (flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY && !source->local)
+ if (opts->flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY && !source->local)
continue;
- ret = odb_source_for_each_object(source, request, cb, cb_data, flags);
+ ret = odb_source_for_each_object(source, request, cb, cb_data, opts);
if (ret)
return ret;
}
@@ -917,6 +918,125 @@ int odb_for_each_object(struct object_database *odb,
return 0;
}
+int odb_for_each_object(struct object_database *odb,
+ const struct object_info *request,
+ odb_for_each_object_cb cb,
+ void *cb_data,
+ enum odb_for_each_object_flags flags)
+{
+ struct odb_for_each_object_options opts = {
+ .flags = flags,
+ };
+ return odb_for_each_object_ext(odb, request, cb, cb_data, &opts);
+}
+
+int odb_count_objects(struct object_database *odb,
+ enum odb_count_objects_flags flags,
+ unsigned long *out)
+{
+ struct odb_source *source;
+ unsigned long count = 0;
+ int ret;
+
+ if (odb->object_count_valid && odb->object_count_flags == flags) {
+ *out = odb->object_count;
+ return 0;
+ }
+
+ odb_prepare_alternates(odb);
+ for (source = odb->sources; source; source = source->next) {
+ unsigned long c;
+
+ ret = odb_source_count_objects(source, flags, &c);
+ if (ret < 0)
+ goto out;
+
+ count += c;
+ }
+
+ odb->object_count = count;
+ odb->object_count_valid = 1;
+ odb->object_count_flags = flags;
+
+ *out = count;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ * Return the slot of the most-significant bit set in "val". There are various
+ * ways to do this quickly with fls() or __builtin_clzl(), but speed is
+ * probably not a big deal here.
+ */
+static unsigned msb(unsigned long val)
+{
+ unsigned r = 0;
+ while (val >>= 1)
+ r++;
+ return r;
+}
+
+int odb_find_abbrev_len(struct object_database *odb,
+ const struct object_id *oid,
+ int min_length,
+ unsigned *out)
+{
+ const struct git_hash_algo *algo =
+ oid->algo ? &hash_algos[oid->algo] : odb->repo->hash_algo;
+ const unsigned hexsz = algo->hexsz;
+ unsigned len;
+ int ret;
+
+ if (min_length < 0) {
+ unsigned long count;
+
+ if (odb_count_objects(odb, ODB_COUNT_OBJECTS_APPROXIMATE, &count) < 0)
+ count = 0;
+
+ /*
+ * Add one because the MSB only tells us the highest bit set,
+ * not including the value of all the _other_ bits (so "15"
+ * is only one off of 2^4, but the MSB is the 3rd bit.
+ */
+ len = msb(count) + 1;
+ /*
+ * We now know we have on the order of 2^len objects, which
+ * expects a collision at 2^(len/2). But we also care about hex
+ * chars, not bits, and there are 4 bits per hex. So all
+ * together we need to divide by 2 and round up.
+ */
+ len = DIV_ROUND_UP(len, 2);
+ /*
+ * For very small repos, we stick with our regular fallback.
+ */
+ if (len < FALLBACK_DEFAULT_ABBREV)
+ len = FALLBACK_DEFAULT_ABBREV;
+ } else {
+ len = min_length;
+ }
+
+ if (len >= hexsz || !len) {
+ *out = hexsz;
+ ret = 0;
+ goto out;
+ }
+
+ odb_prepare_alternates(odb);
+ for (struct odb_source *source = odb->sources; source; source = source->next) {
+ ret = odb_source_find_abbrev_len(source, oid, len, &len);
+ if (ret)
+ goto out;
+ }
+
+ ret = 0;
+ *out = len;
+
+out:
+ return ret;
+}
+
void odb_assert_oid_type(struct object_database *odb,
const struct object_id *oid, enum object_type expect)
{
@@ -933,7 +1053,7 @@ int odb_write_object_ext(struct object_database *odb,
enum object_type type,
struct object_id *oid,
struct object_id *compat_oid,
- unsigned flags)
+ enum odb_write_object_flags flags)
{
return odb_source_write_object(odb->sources, buf, len, type,
oid, compat_oid, flags);
@@ -1030,7 +1150,7 @@ void odb_reprepare(struct object_database *o)
for (source = o->sources; source; source = source->next)
odb_source_reprepare(source);
- o->approximate_object_count_valid = 0;
+ o->object_count_valid = 0;
obj_read_unlock();
}
diff --git a/odb.h b/odb.h
index 86e0365c24..3a711f6547 100644
--- a/odb.h
+++ b/odb.h
@@ -1,19 +1,17 @@
#ifndef ODB_H
#define ODB_H
-#include "hashmap.h"
#include "object.h"
-#include "odb/source.h"
#include "oidset.h"
#include "oidmap.h"
#include "string-list.h"
#include "thread-utils.h"
-struct oidmap;
-struct oidtree;
-struct strbuf;
+struct cached_object_entry;
+struct packed_git;
struct repository;
-struct multi_pack_index;
+struct strbuf;
+struct strvec;
/*
* Set this to 0 to prevent odb_read_object_info_extended() from fetching missing
@@ -31,10 +29,6 @@ extern int fetch_if_missing;
*/
char *compute_alternate_path(const char *path, struct strbuf *err);
-struct packed_git;
-struct packfile_store;
-struct cached_object_entry;
-
/*
* A transaction may be started for an object database prior to writing new
* objects via odb_transaction_begin(). These objects are not committed until
@@ -110,10 +104,11 @@ struct object_database {
/*
* A fast, rough count of the number of objects in the repository.
* These two fields are not meant for direct access. Use
- * repo_approximate_object_count() instead.
+ * odb_count_objects() instead.
*/
- unsigned long approximate_object_count;
- unsigned approximate_object_count_valid : 1;
+ unsigned long object_count;
+ unsigned object_count_flags;
+ unsigned object_count_valid : 1;
/*
* Submodule source paths that will be added as additional sources to
@@ -339,6 +334,42 @@ struct object_info {
*/
#define OBJECT_INFO_INIT { 0 }
+/* Flags that can be passed to `odb_read_object_info_extended()`. */
+enum object_info_flags {
+ /* Invoke lookup_replace_object() on the given hash. */
+ OBJECT_INFO_LOOKUP_REPLACE = (1 << 0),
+
+ /* Do not reprepare object sources when the first lookup has failed. */
+ OBJECT_INFO_QUICK = (1 << 1),
+
+ /*
+ * Do not attempt to fetch the object if missing (even if fetch_is_missing is
+ * nonzero).
+ */
+ OBJECT_INFO_SKIP_FETCH_OBJECT = (1 << 2),
+
+ /* Die if object corruption (not just an object being missing) was detected. */
+ OBJECT_INFO_DIE_IF_CORRUPT = (1 << 3),
+
+ /*
+ * We have already tried reading the object, but it couldn't be found
+ * via any of the attached sources, and are now doing a second read.
+ * This second read asks the individual sources to also evaluate
+ * whether any on-disk state may have changed that may have caused the
+ * object to appear.
+ *
+ * This flag is for internal use, only. The second read only occurs
+ * when `OBJECT_INFO_QUICK` was not passed.
+ */
+ OBJECT_INFO_SECOND_READ = (1 << 4),
+
+ /*
+ * This is meant for bulk prefetching of missing blobs in a partial
+ * clone. Implies OBJECT_INFO_SKIP_FETCH_OBJECT and OBJECT_INFO_QUICK.
+ */
+ OBJECT_INFO_FOR_PREFETCH = (OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_QUICK),
+};
+
/*
* Read object info from the object database and populate the `object_info`
* structure. Returns 0 on success, a negative error code otherwise.
@@ -358,11 +389,11 @@ int odb_read_object_info(struct object_database *odb,
const struct object_id *oid,
unsigned long *sizep);
-enum has_object_flags {
+enum odb_has_object_flags {
/* Retry packed storage after checking packed and loose storage */
- HAS_OBJECT_RECHECK_PACKED = (1 << 0),
+ ODB_HAS_OBJECT_RECHECK_PACKED = (1 << 0),
/* Allow fetching the object in case the repository has a promisor remote. */
- HAS_OBJECT_FETCH_PROMISOR = (1 << 1),
+ ODB_HAS_OBJECT_FETCH_PROMISOR = (1 << 1),
};
/*
@@ -371,7 +402,7 @@ enum has_object_flags {
*/
int odb_has_object(struct object_database *odb,
const struct object_id *oid,
- enum has_object_flags flags);
+ enum odb_has_object_flags flags);
int odb_freshen_object(struct object_database *odb,
const struct object_id *oid);
@@ -433,6 +464,34 @@ enum odb_for_each_object_flags {
};
/*
+ * A callback function that can be used to iterate through objects. If given,
+ * the optional `oi` parameter will be populated the same as if you would call
+ * `odb_read_object_info()`.
+ *
+ * Returning a non-zero error code will cause iteration to abort. The error
+ * code will be propagated.
+ */
+typedef int (*odb_for_each_object_cb)(const struct object_id *oid,
+ struct object_info *oi,
+ void *cb_data);
+
+/*
+ * Options that can be passed to `odb_for_each_object()` and its
+ * backend-specific implementations.
+ */
+struct odb_for_each_object_options {
+ /* A bitfield of `odb_for_each_object_flags`. */
+ enum odb_for_each_object_flags flags;
+
+ /*
+ * If set, only iterate through objects whose first `prefix_hex_len`
+ * hex characters matches the given prefix.
+ */
+ const struct object_id *prefix;
+ size_t prefix_hex_len;
+};
+
+/*
* Iterate through all objects contained in the object database. Note that
* objects may be iterated over multiple times in case they are either stored
* in different backends or in case they are stored in multiple sources.
@@ -446,25 +505,69 @@ enum odb_for_each_object_flags {
* Returns 0 on success, a negative error code in case a failure occurred, or
* an arbitrary non-zero error code returned by the callback itself.
*/
+int odb_for_each_object_ext(struct object_database *odb,
+ const struct object_info *request,
+ odb_for_each_object_cb cb,
+ void *cb_data,
+ const struct odb_for_each_object_options *opts);
+
+/* Same as `odb_for_each_object_ext()` with `opts.flags` set to the given flags. */
int odb_for_each_object(struct object_database *odb,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags);
+ enum odb_for_each_object_flags flags);
+
+enum odb_count_objects_flags {
+ /*
+ * Instead of providing an accurate count, allow the number of objects
+ * to be approximated. Details of how this approximation works are
+ * subject to the specific source's implementation.
+ */
+ ODB_COUNT_OBJECTS_APPROXIMATE = (1 << 0),
+};
+
+/*
+ * Count the number of objects in the given object database. This object count
+ * may double-count objects that are stored in multiple backends, or which are
+ * stored multiple times in a single backend.
+ *
+ * Returns 0 on success, a negative error code otherwise. The number of objects
+ * will be assigned to the `out` pointer on success.
+ */
+int odb_count_objects(struct object_database *odb,
+ enum odb_count_objects_flags flags,
+ unsigned long *out);
+
+/*
+ * Given an object ID, find the minimum required length required to make the
+ * object ID unique across the whole object database.
+ *
+ * The `min_len` determines the minimum abbreviated length that'll be returned
+ * by this function. If `min_len < 0`, then the function will set a sensible
+ * default minimum abbreviation length.
+ *
+ * Returns 0 on success, a negative error code otherwise. The computed length
+ * will be assigned to `*out`.
+ */
+int odb_find_abbrev_len(struct object_database *odb,
+ const struct object_id *oid,
+ int min_len,
+ unsigned *out);
-enum {
+enum odb_write_object_flags {
/*
* By default, `odb_write_object()` does not actually write anything
* into the object store, but only computes the object ID. This flag
* changes that so that the object will be written as a loose object
* and persisted.
*/
- WRITE_OBJECT_PERSIST = (1 << 0),
+ ODB_WRITE_OBJECT_PERSIST = (1 << 0),
/*
* Do not print an error in case something goes wrong.
*/
- WRITE_OBJECT_SILENT = (1 << 1),
+ ODB_WRITE_OBJECT_SILENT = (1 << 1),
};
/*
@@ -480,7 +583,7 @@ int odb_write_object_ext(struct object_database *odb,
enum object_type type,
struct object_id *oid,
struct object_id *compat_oid,
- unsigned flags);
+ enum odb_write_object_flags flags);
static inline int odb_write_object(struct object_database *odb,
const void *buf, unsigned long len,
diff --git a/odb/source-files.c b/odb/source-files.c
index 14cb9adeca..b5abd20e97 100644
--- a/odb/source-files.c
+++ b/odb/source-files.c
@@ -75,24 +75,77 @@ static int odb_source_files_for_each_object(struct odb_source *source,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags)
+ const struct odb_for_each_object_options *opts)
{
struct odb_source_files *files = odb_source_files_downcast(source);
int ret;
- if (!(flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY)) {
- ret = odb_source_loose_for_each_object(source, request, cb, cb_data, flags);
+ if (!(opts->flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY)) {
+ ret = odb_source_loose_for_each_object(source, request, cb, cb_data, opts);
if (ret)
return ret;
}
- ret = packfile_store_for_each_object(files->packed, request, cb, cb_data, flags);
+ ret = packfile_store_for_each_object(files->packed, request, cb, cb_data, opts);
if (ret)
return ret;
return 0;
}
+static int odb_source_files_count_objects(struct odb_source *source,
+ enum odb_count_objects_flags flags,
+ unsigned long *out)
+{
+ struct odb_source_files *files = odb_source_files_downcast(source);
+ unsigned long count;
+ int ret;
+
+ ret = packfile_store_count_objects(files->packed, flags, &count);
+ if (ret < 0)
+ goto out;
+
+ if (!(flags & ODB_COUNT_OBJECTS_APPROXIMATE)) {
+ unsigned long loose_count;
+
+ ret = odb_source_loose_count_objects(source, flags, &loose_count);
+ if (ret < 0)
+ goto out;
+
+ count += loose_count;
+ }
+
+ *out = count;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int odb_source_files_find_abbrev_len(struct odb_source *source,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
+{
+ struct odb_source_files *files = odb_source_files_downcast(source);
+ unsigned len = min_len;
+ int ret;
+
+ ret = packfile_store_find_abbrev_len(files->packed, oid, len, &len);
+ if (ret < 0)
+ goto out;
+
+ ret = odb_source_loose_find_abbrev_len(source, oid, len, &len);
+ if (ret < 0)
+ goto out;
+
+ *out = len;
+ ret = 0;
+
+out:
+ return ret;
+}
+
static int odb_source_files_freshen_object(struct odb_source *source,
const struct object_id *oid)
{
@@ -108,7 +161,7 @@ static int odb_source_files_write_object(struct odb_source *source,
enum object_type type,
struct object_id *oid,
struct object_id *compat_oid,
- unsigned flags)
+ enum odb_write_object_flags flags)
{
return odb_source_loose_write_object(source, buf, len, type,
oid, compat_oid, flags);
@@ -220,6 +273,8 @@ struct odb_source_files *odb_source_files_new(struct object_database *odb,
files->base.read_object_info = odb_source_files_read_object_info;
files->base.read_object_stream = odb_source_files_read_object_stream;
files->base.for_each_object = odb_source_files_for_each_object;
+ files->base.count_objects = odb_source_files_count_objects;
+ files->base.find_abbrev_len = odb_source_files_find_abbrev_len;
files->base.freshen_object = odb_source_files_freshen_object;
files->base.write_object = odb_source_files_write_object;
files->base.write_object_stream = odb_source_files_write_object_stream;
diff --git a/odb/source.h b/odb/source.h
index caac558149..f706e0608a 100644
--- a/odb/source.h
+++ b/odb/source.h
@@ -2,6 +2,7 @@
#define ODB_SOURCE_H
#include "object.h"
+#include "odb.h"
enum odb_source_type {
/*
@@ -14,62 +15,11 @@ enum odb_source_type {
ODB_SOURCE_FILES,
};
-/* Flags that can be passed to `odb_read_object_info_extended()`. */
-enum object_info_flags {
- /* Invoke lookup_replace_object() on the given hash. */
- OBJECT_INFO_LOOKUP_REPLACE = (1 << 0),
-
- /* Do not reprepare object sources when the first lookup has failed. */
- OBJECT_INFO_QUICK = (1 << 1),
-
- /*
- * Do not attempt to fetch the object if missing (even if fetch_is_missing is
- * nonzero).
- */
- OBJECT_INFO_SKIP_FETCH_OBJECT = (1 << 2),
-
- /* Die if object corruption (not just an object being missing) was detected. */
- OBJECT_INFO_DIE_IF_CORRUPT = (1 << 3),
-
- /*
- * We have already tried reading the object, but it couldn't be found
- * via any of the attached sources, and are now doing a second read.
- * This second read asks the individual sources to also evaluate
- * whether any on-disk state may have changed that may have caused the
- * object to appear.
- *
- * This flag is for internal use, only. The second read only occurs
- * when `OBJECT_INFO_QUICK` was not passed.
- */
- OBJECT_INFO_SECOND_READ = (1 << 4),
-
- /*
- * This is meant for bulk prefetching of missing blobs in a partial
- * clone. Implies OBJECT_INFO_SKIP_FETCH_OBJECT and OBJECT_INFO_QUICK.
- */
- OBJECT_INFO_FOR_PREFETCH = (OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_QUICK),
-};
-
struct object_id;
-struct object_info;
struct odb_read_stream;
-struct odb_transaction;
-struct odb_write_stream;
struct strvec;
/*
- * A callback function that can be used to iterate through objects. If given,
- * the optional `oi` parameter will be populated the same as if you would call
- * `odb_read_object_info()`.
- *
- * Returning a non-zero error code will cause iteration to abort. The error
- * code will be propagated.
- */
-typedef int (*odb_for_each_object_cb)(const struct object_id *oid,
- struct object_info *oi,
- void *cb_data);
-
-/*
* The source is the part of the object database that stores the actual
* objects. It thus encapsulates the logic to read and write the specific
* on-disk format. An object database can have multiple sources:
@@ -190,7 +140,34 @@ struct odb_source {
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags);
+ const struct odb_for_each_object_options *opts);
+
+ /*
+ * This callback is expected to count objects in the given object
+ * database source. The callback function does not have to guarantee
+ * that only unique objects are counted. The result shall be assigned
+ * to the `out` pointer.
+ *
+ * Accepts `enum odb_count_objects_flag` flags to alter the behaviour.
+ *
+ * The callback is expected to return 0 on success, or a negative error
+ * code otherwise.
+ */
+ int (*count_objects)(struct odb_source *source,
+ enum odb_count_objects_flags flags,
+ unsigned long *out);
+
+ /*
+ * This callback is expected to find the minimum required length to
+ * make the given object ID unique.
+ *
+ * The callback is expected to return a negative error code in case it
+ * failed, 0 otherwise.
+ */
+ int (*find_abbrev_len)(struct odb_source *source,
+ const struct object_id *oid,
+ unsigned min_length,
+ unsigned *out);
/*
* This callback is expected to freshen the given object so that its
@@ -220,7 +197,7 @@ struct odb_source {
enum object_type type,
struct object_id *oid,
struct object_id *compat_oid,
- unsigned flags);
+ enum odb_write_object_flags flags);
/*
* This callback is expected to persist the given object stream into
@@ -378,9 +355,33 @@ static inline int odb_source_for_each_object(struct odb_source *source,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags)
+ const struct odb_for_each_object_options *opts)
+{
+ return source->for_each_object(source, request, cb, cb_data, opts);
+}
+
+/*
+ * Count the number of objects in the given object database source.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static inline int odb_source_count_objects(struct odb_source *source,
+ enum odb_count_objects_flags flags,
+ unsigned long *out)
+{
+ return source->count_objects(source, flags, out);
+}
+
+/*
+ * Determine the minimum required length to make the given object ID unique in
+ * the given source. Returns 0 on success, a negative error code otherwise.
+ */
+static inline int odb_source_find_abbrev_len(struct odb_source *source,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
{
- return source->for_each_object(source, request, cb, cb_data, flags);
+ return source->find_abbrev_len(source, oid, min_len, out);
}
/*
@@ -404,7 +405,7 @@ static inline int odb_source_write_object(struct odb_source *source,
enum object_type type,
struct object_id *oid,
struct object_id *compat_oid,
- unsigned flags)
+ enum odb_write_object_flags flags)
{
return source->write_object(source, buf, len, type, oid,
compat_oid, flags);
diff --git a/odb/streaming.c b/odb/streaming.c
index a4355cd245..5927a12954 100644
--- a/odb/streaming.c
+++ b/odb/streaming.c
@@ -7,6 +7,7 @@
#include "environment.h"
#include "repository.h"
#include "odb.h"
+#include "odb/source.h"
#include "odb/streaming.h"
#include "replace-object.h"
diff --git a/oidtree.c b/oidtree.c
index 324de94934..ab9fe7ec7a 100644
--- a/oidtree.c
+++ b/oidtree.c
@@ -6,14 +6,6 @@
#include "oidtree.h"
#include "hash.h"
-struct oidtree_iter_data {
- oidtree_iter fn;
- void *arg;
- size_t *last_nibble_at;
- uint32_t algo;
- uint8_t last_byte;
-};
-
void oidtree_init(struct oidtree *ot)
{
cb_init(&ot->tree);
@@ -54,8 +46,7 @@ void oidtree_insert(struct oidtree *ot, const struct object_id *oid)
cb_insert(&ot->tree, on, sizeof(*oid));
}
-
-int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
+bool oidtree_contains(struct oidtree *ot, const struct object_id *oid)
{
struct object_id k;
size_t klen = sizeof(k);
@@ -69,41 +60,51 @@ int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) <
offsetof(struct object_id, algo));
- return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0;
+ return !!cb_lookup(&ot->tree, (const uint8_t *)&k, klen);
}
-static enum cb_next iter(struct cb_node *n, void *arg)
+struct oidtree_each_data {
+ oidtree_each_cb cb;
+ void *cb_data;
+ size_t *last_nibble_at;
+ uint32_t algo;
+ uint8_t last_byte;
+};
+
+static int iter(struct cb_node *n, void *cb_data)
{
- struct oidtree_iter_data *x = arg;
+ struct oidtree_each_data *data = cb_data;
struct object_id k;
/* Copy to provide 4-byte alignment needed by struct object_id. */
memcpy(&k, n->k, sizeof(k));
- if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo)
- return CB_CONTINUE;
+ if (data->algo != GIT_HASH_UNKNOWN && data->algo != k.algo)
+ return 0;
- if (x->last_nibble_at) {
- if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0)
- return CB_CONTINUE;
+ if (data->last_nibble_at) {
+ if ((k.hash[*data->last_nibble_at] ^ data->last_byte) & 0xf0)
+ return 0;
}
- return x->fn(&k, x->arg);
+ return data->cb(&k, data->cb_data);
}
-void oidtree_each(struct oidtree *ot, const struct object_id *oid,
- size_t oidhexsz, oidtree_iter fn, void *arg)
+int oidtree_each(struct oidtree *ot, const struct object_id *prefix,
+ size_t prefix_hex_len, oidtree_each_cb cb, void *cb_data)
{
- size_t klen = oidhexsz / 2;
- struct oidtree_iter_data x = { 0 };
- assert(oidhexsz <= GIT_MAX_HEXSZ);
+ struct oidtree_each_data data = {
+ .cb = cb,
+ .cb_data = cb_data,
+ .algo = prefix->algo,
+ };
+ size_t klen = prefix_hex_len / 2;
+ assert(prefix_hex_len <= GIT_MAX_HEXSZ);
- x.fn = fn;
- x.arg = arg;
- x.algo = oid->algo;
- if (oidhexsz & 1) {
- x.last_byte = oid->hash[klen];
- x.last_nibble_at = &klen;
+ if (prefix_hex_len & 1) {
+ data.last_byte = prefix->hash[klen];
+ data.last_nibble_at = &klen;
}
- cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x);
+
+ return cb_each(&ot->tree, prefix->hash, klen, iter, &data);
}
diff --git a/oidtree.h b/oidtree.h
index 77898f510a..2b7bad2e60 100644
--- a/oidtree.h
+++ b/oidtree.h
@@ -5,18 +5,52 @@
#include "hash.h"
#include "mem-pool.h"
+/*
+ * OID trees are an efficient storage for object IDs that use a critbit tree
+ * internally. Common prefixes are duplicated and object IDs are stored in a
+ * way that allow easy iteration over the objects in lexicographic order. As a
+ * consequence, operations that want to enumerate all object IDs that match a
+ * given prefix can be answered efficiently.
+ *
+ * Note that it is not (yet) possible to store data other than the object IDs
+ * themselves in this tree.
+ */
struct oidtree {
struct cb_tree tree;
struct mem_pool mem_pool;
};
-void oidtree_init(struct oidtree *);
-void oidtree_clear(struct oidtree *);
-void oidtree_insert(struct oidtree *, const struct object_id *);
-int oidtree_contains(struct oidtree *, const struct object_id *);
+/* Initialize the oidtree so that it is ready for use. */
+void oidtree_init(struct oidtree *ot);
-typedef enum cb_next (*oidtree_iter)(const struct object_id *, void *data);
-void oidtree_each(struct oidtree *, const struct object_id *,
- size_t oidhexsz, oidtree_iter, void *data);
+/*
+ * Release all memory associated with the oidtree and reinitialize it for
+ * subsequent use.
+ */
+void oidtree_clear(struct oidtree *ot);
+
+/* Insert the object ID into the tree. */
+void oidtree_insert(struct oidtree *ot, const struct object_id *oid);
+
+/* Check whether the tree contains the given object ID. */
+bool oidtree_contains(struct oidtree *ot, const struct object_id *oid);
+
+/*
+ * Callback function used for `oidtree_each()`. Returning a non-zero exit code
+ * will cause iteration to stop. The exit code will be propagated to the caller
+ * of `oidtree_each()`.
+ */
+typedef int (*oidtree_each_cb)(const struct object_id *oid,
+ void *cb_data);
+
+/*
+ * Iterate through all object IDs in the tree whose prefix matches the given
+ * object ID prefix and invoke the callback function on each of them.
+ *
+ * Returns any non-zero exit code from the provided callback function.
+ */
+int oidtree_each(struct oidtree *ot,
+ const struct object_id *prefix, size_t prefix_hex_len,
+ oidtree_each_cb cb, void *cb_data);
#endif /* OIDTREE_H */
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 22419bfb33..f6ec18d83a 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -441,11 +441,11 @@ char *midx_bitmap_filename(struct multi_pack_index *midx)
struct strbuf buf = STRBUF_INIT;
if (midx->has_chain)
get_split_midx_filename_ext(midx->source, &buf,
- get_midx_checksum(midx),
+ midx_get_checksum_hash(midx),
MIDX_EXT_BITMAP);
else
get_midx_filename_ext(midx->source, &buf,
- get_midx_checksum(midx),
+ midx_get_checksum_hash(midx),
MIDX_EXT_BITMAP);
return strbuf_detach(&buf, NULL);
@@ -502,7 +502,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
if (load_bitmap_header(bitmap_git) < 0)
goto cleanup;
- if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum,
+ if (!hasheq(midx_get_checksum_hash(bitmap_git->midx), bitmap_git->checksum,
bitmap_repo(bitmap_git)->hash_algo)) {
error(_("checksum doesn't match in MIDX and bitmap"));
goto cleanup;
@@ -2819,8 +2819,7 @@ void test_bitmap_walk(struct rev_info *revs)
if (bitmap_is_midx(found))
fprintf_ln(stderr, "Located via MIDX '%s'.",
- hash_to_hex_algop(get_midx_checksum(found->midx),
- revs->repo->hash_algo));
+ midx_get_checksum_hex(found->midx));
else
fprintf_ln(stderr, "Located via pack '%s'.",
hash_to_hex_algop(found->pack->hash,
diff --git a/pack-check.c b/pack-check.c
index 7378c80730..79992bb509 100644
--- a/pack-check.c
+++ b/pack-check.c
@@ -53,6 +53,7 @@ static int verify_packfile(struct repository *r,
struct packed_git *p,
struct pack_window **w_curs,
verify_fn fn,
+ void *fn_data,
struct progress *progress, uint32_t base_count)
{
@@ -161,7 +162,7 @@ static int verify_packfile(struct repository *r,
oid_to_hex(&oid), p->pack_name);
else if (fn) {
int eaten = 0;
- err |= fn(&oid, type, size, data, &eaten);
+ err |= fn(&oid, type, size, data, &eaten, fn_data);
if (eaten)
data = NULL;
}
@@ -192,7 +193,7 @@ int verify_pack_index(struct packed_git *p)
return err;
}
-int verify_pack(struct repository *r, struct packed_git *p, verify_fn fn,
+int verify_pack(struct repository *r, struct packed_git *p, verify_fn fn, void *fn_data,
struct progress *progress, uint32_t base_count)
{
int err = 0;
@@ -202,7 +203,7 @@ int verify_pack(struct repository *r, struct packed_git *p, verify_fn fn,
if (!p->index_data)
return -1;
- err |= verify_packfile(r, p, &w_curs, fn, progress, base_count);
+ err |= verify_packfile(r, p, &w_curs, fn, fn_data, progress, base_count);
unuse_pack(&w_curs);
return err;
diff --git a/pack-revindex.c b/pack-revindex.c
index 1fe0afe899..1b67863606 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -394,11 +394,11 @@ int load_midx_revindex(struct multi_pack_index *m)
if (m->has_chain)
get_split_midx_filename_ext(m->source, &revindex_name,
- get_midx_checksum(m),
+ midx_get_checksum_hash(m),
MIDX_EXT_REV);
else
get_midx_filename_ext(m->source, &revindex_name,
- get_midx_checksum(m),
+ midx_get_checksum_hash(m),
MIDX_EXT_REV);
ret = load_revindex_from_disk(m->source->odb->repo->hash_algo,
diff --git a/pack.h b/pack.h
index ec76472e49..1cde92082b 100644
--- a/pack.h
+++ b/pack.h
@@ -85,7 +85,11 @@ struct pack_idx_entry {
struct progress;
/* Note, the data argument could be NULL if object type is blob */
-typedef int (*verify_fn)(const struct object_id *, enum object_type, unsigned long, void*, int*);
+typedef int (*verify_fn)(const struct object_id *oid,
+ enum object_type type,
+ unsigned long size,
+ void *buffer, int *eaten,
+ void *fn_data);
const char *write_idx_file(struct repository *repo,
const char *index_name,
@@ -95,7 +99,8 @@ const char *write_idx_file(struct repository *repo,
const unsigned char *sha1);
int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
int verify_pack_index(struct packed_git *);
-int verify_pack(struct repository *, struct packed_git *, verify_fn fn, struct progress *, uint32_t);
+int verify_pack(struct repository *, struct packed_git *, verify_fn fn, void *fn_data,
+ struct progress *, uint32_t);
off_t write_pack_header(struct hashfile *f, uint32_t);
void fixup_pack_header_footer(const struct git_hash_algo *, int,
unsigned char *, const char *, uint32_t,
diff --git a/packfile.c b/packfile.c
index 215a23e42b..b012d648ad 100644
--- a/packfile.c
+++ b/packfile.c
@@ -1101,37 +1101,35 @@ struct packfile_list_entry *packfile_store_get_packs(struct packfile_store *stor
return store->packs.head;
}
-/*
- * Give a fast, rough count of the number of objects in the repository. This
- * ignores loose objects completely. If you have a lot of them, then either
- * you should repack because your performance will be awful, or they are
- * all unreachable objects about to be pruned, in which case they're not really
- * interesting as a measure of repo size in the first place.
- */
-unsigned long repo_approximate_object_count(struct repository *r)
+int packfile_store_count_objects(struct packfile_store *store,
+ enum odb_count_objects_flags flags UNUSED,
+ unsigned long *out)
{
- if (!r->objects->approximate_object_count_valid) {
- struct odb_source *source;
- unsigned long count = 0;
- struct packed_git *p;
+ struct packfile_list_entry *e;
+ struct multi_pack_index *m;
+ unsigned long count = 0;
+ int ret;
- odb_prepare_alternates(r->objects);
+ m = get_multi_pack_index(store->source);
+ if (m)
+ count += m->num_objects + m->num_objects_in_base;
- for (source = r->objects->sources; source; source = source->next) {
- struct multi_pack_index *m = get_multi_pack_index(source);
- if (m)
- count += m->num_objects + m->num_objects_in_base;
+ for (e = packfile_store_get_packs(store); e; e = e->next) {
+ if (e->pack->multi_pack_index)
+ continue;
+ if (open_pack_index(e->pack)) {
+ ret = -1;
+ goto out;
}
- repo_for_each_pack(r, p) {
- if (p->multi_pack_index || open_pack_index(p))
- continue;
- count += p->num_objects;
- }
- r->objects->approximate_object_count = count;
- r->objects->approximate_object_count_valid = 1;
+ count += e->pack->num_objects;
}
- return r->objects->approximate_object_count;
+
+ *out = count;
+ ret = 0;
+
+out:
+ return ret;
}
unsigned long unpack_object_header_buffer(const unsigned char *buf,
@@ -2246,7 +2244,8 @@ struct packed_git **packfile_store_get_kept_pack_cache(struct packfile_store *st
struct packed_git *p = e->pack;
if ((p->pack_keep && (flags & KEPT_PACK_ON_DISK)) ||
- (p->pack_keep_in_core && (flags & KEPT_PACK_IN_CORE))) {
+ (p->pack_keep_in_core && (flags & KEPT_PACK_IN_CORE)) ||
+ (p->pack_keep_in_core_open && (flags & KEPT_PACK_IN_CORE_OPEN))) {
ALLOC_GROW(packs, nr + 1, alloc);
packs[nr++] = p;
}
@@ -2301,7 +2300,7 @@ int has_object_kept_pack(struct repository *r, const struct object_id *oid,
int for_each_object_in_pack(struct packed_git *p,
each_packed_object_fn cb, void *data,
- unsigned flags)
+ enum odb_for_each_object_flags flags)
{
uint32_t i;
int r = 0;
@@ -2373,11 +2372,182 @@ static int packfile_store_for_each_object_wrapper(const struct object_id *oid,
}
}
+static int match_hash(unsigned len, const unsigned char *a, const unsigned char *b)
+{
+ do {
+ if (*a != *b)
+ return 0;
+ a++;
+ b++;
+ len -= 2;
+ } while (len > 1);
+ if (len)
+ if ((*a ^ *b) & 0xf0)
+ return 0;
+ return 1;
+}
+
+static int for_each_prefixed_object_in_midx(
+ struct packfile_store *store,
+ struct multi_pack_index *m,
+ const struct odb_for_each_object_options *opts,
+ struct packfile_store_for_each_object_wrapper_data *data)
+{
+ int ret;
+
+ for (; m; m = m->base_midx) {
+ uint32_t num, i, first = 0;
+ int len = opts->prefix_hex_len > m->source->odb->repo->hash_algo->hexsz ?
+ m->source->odb->repo->hash_algo->hexsz : opts->prefix_hex_len;
+
+ if (!m->num_objects)
+ continue;
+
+ num = m->num_objects + m->num_objects_in_base;
+
+ bsearch_one_midx(opts->prefix, m, &first);
+
+ /*
+ * At this point, "first" is the location of the lowest
+ * object with an object name that could match "opts->prefix".
+ * See if we have 0, 1 or more objects that actually match(es).
+ */
+ for (i = first; i < num; i++) {
+ const struct object_id *current = NULL;
+ struct object_id oid;
+
+ current = nth_midxed_object_oid(&oid, m, i);
+
+ if (!match_hash(len, opts->prefix->hash, current->hash))
+ break;
+
+ if (data->request) {
+ struct object_info oi = *data->request;
+
+ ret = packfile_store_read_object_info(store, current,
+ &oi, 0);
+ if (ret)
+ goto out;
+
+ ret = data->cb(&oid, &oi, data->cb_data);
+ if (ret)
+ goto out;
+ } else {
+ ret = data->cb(&oid, NULL, data->cb_data);
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int for_each_prefixed_object_in_pack(
+ struct packfile_store *store,
+ struct packed_git *p,
+ const struct odb_for_each_object_options *opts,
+ struct packfile_store_for_each_object_wrapper_data *data)
+{
+ uint32_t num, i, first = 0;
+ int len = opts->prefix_hex_len > p->repo->hash_algo->hexsz ?
+ p->repo->hash_algo->hexsz : opts->prefix_hex_len;
+ int ret;
+
+ num = p->num_objects;
+ bsearch_pack(opts->prefix, p, &first);
+
+ /*
+ * At this point, "first" is the location of the lowest object
+ * with an object name that could match "bin_pfx". See if we have
+ * 0, 1 or more objects that actually match(es).
+ */
+ for (i = first; i < num; i++) {
+ struct object_id oid;
+
+ nth_packed_object_id(&oid, p, i);
+ if (!match_hash(len, opts->prefix->hash, oid.hash))
+ break;
+
+ if (data->request) {
+ struct object_info oi = *data->request;
+
+ ret = packfile_store_read_object_info(store, &oid, &oi, 0);
+ if (ret)
+ goto out;
+
+ ret = data->cb(&oid, &oi, data->cb_data);
+ if (ret)
+ goto out;
+ } else {
+ ret = data->cb(&oid, NULL, data->cb_data);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int packfile_store_for_each_prefixed_object(
+ struct packfile_store *store,
+ const struct odb_for_each_object_options *opts,
+ struct packfile_store_for_each_object_wrapper_data *data)
+{
+ struct packfile_list_entry *e;
+ struct multi_pack_index *m;
+ bool pack_errors = false;
+ int ret;
+
+ if (opts->flags)
+ BUG("flags unsupported");
+
+ store->skip_mru_updates = true;
+
+ m = get_multi_pack_index(store->source);
+ if (m) {
+ ret = for_each_prefixed_object_in_midx(store, m, opts, data);
+ if (ret)
+ goto out;
+ }
+
+ for (e = packfile_store_get_packs(store); e; e = e->next) {
+ if (e->pack->multi_pack_index)
+ continue;
+
+ if (open_pack_index(e->pack)) {
+ pack_errors = true;
+ continue;
+ }
+
+ if (!e->pack->num_objects)
+ continue;
+
+ ret = for_each_prefixed_object_in_pack(store, e->pack, opts, data);
+ if (ret)
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ store->skip_mru_updates = false;
+ if (!ret && pack_errors)
+ ret = -1;
+ return ret;
+}
+
int packfile_store_for_each_object(struct packfile_store *store,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags)
+ const struct odb_for_each_object_options *opts)
{
struct packfile_store_for_each_object_wrapper_data data = {
.store = store,
@@ -2388,20 +2558,23 @@ int packfile_store_for_each_object(struct packfile_store *store,
struct packfile_list_entry *e;
int pack_errors = 0, ret;
+ if (opts->prefix)
+ return packfile_store_for_each_prefixed_object(store, opts, &data);
+
store->skip_mru_updates = true;
for (e = packfile_store_get_packs(store); e; e = e->next) {
struct packed_git *p = e->pack;
- if ((flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
- if ((flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_PROMISOR_ONLY) &&
!p->pack_promisor)
continue;
- if ((flags & ODB_FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS) &&
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS) &&
p->pack_keep_in_core)
continue;
- if ((flags & ODB_FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS) &&
+ if ((opts->flags & ODB_FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS) &&
p->pack_keep)
continue;
if (open_pack_index(p)) {
@@ -2410,7 +2583,7 @@ int packfile_store_for_each_object(struct packfile_store *store,
}
ret = for_each_object_in_pack(p, packfile_store_for_each_object_wrapper,
- &data, flags);
+ &data, opts->flags);
if (ret)
goto out;
}
@@ -2425,6 +2598,117 @@ out:
return ret;
}
+static int extend_abbrev_len(const struct object_id *a,
+ const struct object_id *b,
+ unsigned *out)
+{
+ unsigned len = oid_common_prefix_hexlen(a, b);
+ if (len != hash_algos[a->algo].hexsz && len >= *out)
+ *out = len + 1;
+ return 0;
+}
+
+static void find_abbrev_len_for_midx(struct multi_pack_index *m,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
+{
+ unsigned len = min_len;
+
+ for (; m; m = m->base_midx) {
+ int match = 0;
+ uint32_t num, first = 0;
+ struct object_id found_oid;
+
+ if (!m->num_objects)
+ continue;
+
+ num = m->num_objects + m->num_objects_in_base;
+ match = bsearch_one_midx(oid, m, &first);
+
+ /*
+ * first is now the position in the packfile where we
+ * would insert the object ID if it does not exist (or the
+ * position of the object ID if it does exist). Hence, we
+ * consider a maximum of two objects nearby for the
+ * abbreviation length.
+ */
+
+ if (!match) {
+ if (nth_midxed_object_oid(&found_oid, m, first))
+ extend_abbrev_len(&found_oid, oid, &len);
+ } else if (first < num - 1) {
+ if (nth_midxed_object_oid(&found_oid, m, first + 1))
+ extend_abbrev_len(&found_oid, oid, &len);
+ }
+ if (first > 0) {
+ if (nth_midxed_object_oid(&found_oid, m, first - 1))
+ extend_abbrev_len(&found_oid, oid, &len);
+ }
+ }
+
+ *out = len;
+}
+
+static void find_abbrev_len_for_pack(struct packed_git *p,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
+{
+ int match;
+ uint32_t num, first = 0;
+ struct object_id found_oid;
+ unsigned len = min_len;
+
+ num = p->num_objects;
+ match = bsearch_pack(oid, p, &first);
+
+ /*
+ * first is now the position in the packfile where we would insert
+ * the object ID if it does not exist (or the position of mad->hash if
+ * it does exist). Hence, we consider a maximum of two objects
+ * nearby for the abbreviation length.
+ */
+ if (!match) {
+ if (!nth_packed_object_id(&found_oid, p, first))
+ extend_abbrev_len(&found_oid, oid, &len);
+ } else if (first < num - 1) {
+ if (!nth_packed_object_id(&found_oid, p, first + 1))
+ extend_abbrev_len(&found_oid, oid, &len);
+ }
+ if (first > 0) {
+ if (!nth_packed_object_id(&found_oid, p, first - 1))
+ extend_abbrev_len(&found_oid, oid, &len);
+ }
+
+ *out = len;
+}
+
+int packfile_store_find_abbrev_len(struct packfile_store *store,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out)
+{
+ struct packfile_list_entry *e;
+ struct multi_pack_index *m;
+
+ m = get_multi_pack_index(store->source);
+ if (m)
+ find_abbrev_len_for_midx(m, oid, min_len, &min_len);
+
+ for (e = packfile_store_get_packs(store); e; e = e->next) {
+ if (e->pack->multi_pack_index)
+ continue;
+ if (open_pack_index(e->pack) || !e->pack->num_objects)
+ continue;
+
+ find_abbrev_len_for_pack(e->pack, oid, min_len, &min_len);
+ }
+
+ *out = min_len;
+ return 0;
+}
+
struct add_promisor_object_data {
struct repository *repo;
struct oidset *set;
diff --git a/packfile.h b/packfile.h
index 8b04a258a7..9b647da7dd 100644
--- a/packfile.h
+++ b/packfile.h
@@ -28,6 +28,7 @@ struct packed_git {
unsigned pack_local:1,
pack_keep:1,
pack_keep_in_core:1,
+ pack_keep_in_core_open:1,
freshened:1,
do_not_close:1,
pack_promisor:1,
@@ -266,9 +267,20 @@ int packfile_store_freshen_object(struct packfile_store *store,
enum kept_pack_type {
KEPT_PACK_ON_DISK = (1 << 0),
KEPT_PACK_IN_CORE = (1 << 1),
+ KEPT_PACK_IN_CORE_OPEN = (1 << 2),
};
/*
+ * Count the number objects contained in the given packfile store. If
+ * successful, the number of objects will be written to the `out` pointer.
+ *
+ * Return 0 on success, a negative error code otherwise.
+ */
+int packfile_store_count_objects(struct packfile_store *store,
+ enum odb_count_objects_flags flags,
+ unsigned long *out);
+
+/*
* Retrieve the cache of kept packs from the given packfile store. Accepts a
* combination of `kept_pack_type` flags. The cache is computed on demand and
* will be recomputed whenever the flags change.
@@ -342,7 +354,7 @@ typedef int each_packed_object_fn(const struct object_id *oid,
void *data);
int for_each_object_in_pack(struct packed_git *p,
each_packed_object_fn, void *data,
- unsigned flags);
+ enum odb_for_each_object_flags flags);
/*
* Iterate through all packed objects in the given packfile store and invoke
@@ -357,7 +369,12 @@ int packfile_store_for_each_object(struct packfile_store *store,
const struct object_info *request,
odb_for_each_object_cb cb,
void *cb_data,
- unsigned flags);
+ const struct odb_for_each_object_options *opts);
+
+int packfile_store_find_abbrev_len(struct packfile_store *store,
+ const struct object_id *oid,
+ unsigned min_len,
+ unsigned *out);
/* A hook to report invalid files in pack directory */
#define PACKDIR_FILE_PACK 1
@@ -365,12 +382,6 @@ int packfile_store_for_each_object(struct packfile_store *store,
#define PACKDIR_FILE_GARBAGE 4
extern void (*report_garbage)(unsigned seen_bits, const char *path);
-/*
- * Give a rough count of objects in the repository. This sacrifices accuracy
- * for speed.
- */
-unsigned long repo_approximate_object_count(struct repository *r);
-
void pack_report(struct repository *repo);
/*
diff --git a/path-walk.c b/path-walk.c
index 364e4cfa19..6e426af433 100644
--- a/path-walk.c
+++ b/path-walk.c
@@ -11,6 +11,7 @@
#include "list-objects.h"
#include "object.h"
#include "oid-array.h"
+#include "path.h"
#include "prio-queue.h"
#include "repository.h"
#include "revision.h"
@@ -62,6 +63,8 @@ struct path_walk_context {
*/
struct prio_queue path_stack;
struct strset path_stack_pushed;
+
+ unsigned exact_pathspecs:1;
};
static int compare_by_type(const void *one, const void *two, void *cb_data)
@@ -171,7 +174,7 @@ static int add_tree_entries(struct path_walk_context *ctx,
if (!o) {
error(_("failed to find object %s"),
- oid_to_hex(&o->oid));
+ oid_to_hex(&entry.oid));
return -1;
}
@@ -206,6 +209,33 @@ static int add_tree_entries(struct path_walk_context *ctx,
match != MATCHED)
continue;
}
+ if (ctx->revs->prune_data.nr && ctx->exact_pathspecs) {
+ struct pathspec *pd = &ctx->revs->prune_data;
+ bool found = false;
+ int did_strip_suffix = strbuf_strip_suffix(&path, "/");
+
+
+ for (int i = 0; i < pd->nr; i++) {
+ struct pathspec_item *item = &pd->items[i];
+
+ /*
+ * Continue if either is a directory prefix
+ * of the other.
+ */
+ if (dir_prefix(path.buf, item->match) ||
+ dir_prefix(item->match, path.buf)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (did_strip_suffix)
+ strbuf_addch(&path, '/');
+
+ /* Skip paths that do not match the prefix. */
+ if (!found)
+ continue;
+ }
add_path_to_list(ctx, path.buf, type, &entry.oid,
!(o->flags & UNINTERESTING));
@@ -274,6 +304,13 @@ static int walk_path(struct path_walk_context *ctx,
return 0;
}
+ if (list->type == OBJ_BLOB &&
+ ctx->revs->prune_data.nr &&
+ !match_pathspec(ctx->repo->index, &ctx->revs->prune_data,
+ path, strlen(path), 0,
+ NULL, 0))
+ return 0;
+
/* Evaluate function pointer on this data, if requested. */
if ((list->type == OBJ_TREE && ctx->info->trees) ||
(list->type == OBJ_BLOB && ctx->info->blobs) ||
@@ -481,6 +518,12 @@ int walk_objects_by_path(struct path_walk_info *info)
if (info->tags)
info->revs->tag_objects = 1;
+ if (ctx.revs->prune_data.nr) {
+ if (!ctx.revs->prune_data.has_wildcard &&
+ !ctx.revs->prune_data.magic)
+ ctx.exact_pathspecs = 1;
+ }
+
/* Insert a single list for the root tree into the paths. */
CALLOC_ARRAY(root_tree_list, 1);
root_tree_list->type = OBJ_TREE;
diff --git a/path.c b/path.c
index c285357859..d7e17bf174 100644
--- a/path.c
+++ b/path.c
@@ -56,7 +56,7 @@ static void strbuf_cleanup_path(struct strbuf *sb)
strbuf_remove(sb, 0, path - sb->buf);
}
-static int dir_prefix(const char *buf, const char *dir)
+int dir_prefix(const char *buf, const char *dir)
{
size_t len = strlen(dir);
return !strncmp(buf, dir, len) &&
diff --git a/path.h b/path.h
index cbcad254a0..0434ba5e07 100644
--- a/path.h
+++ b/path.h
@@ -112,6 +112,12 @@ const char *repo_submodule_path_replace(struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 4, 5)));
+/*
+ * Given a directory name 'dir' (not ending with a trailing '/'),
+ * determine if 'buf' is equal to 'dir' or has prefix 'dir'+'/'.
+ */
+int dir_prefix(const char *buf, const char *dir);
+
void report_linked_checkout_garbage(struct repository *r);
/*
diff --git a/pretty.c b/pretty.c
index ebf4da4834..814803980b 100644
--- a/pretty.c
+++ b/pretty.c
@@ -1549,6 +1549,21 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
if (!commit->object.parsed)
parse_object(the_repository, &commit->object.oid);
+ if (starts_with(placeholder, "(count)")) {
+ if (!c->pretty_ctx->rev)
+ die(_("%s is not supported by this command"), "%(count)");
+ strbuf_addf(sb, "%0*d", decimal_width(c->pretty_ctx->rev->total),
+ c->pretty_ctx->rev->nr);
+ return 7;
+ }
+
+ if (starts_with(placeholder, "(total)")) {
+ if (!c->pretty_ctx->rev)
+ die(_("%s is not supported by this command"), "%(total)");
+ strbuf_addf(sb, "%d", c->pretty_ctx->rev->total);
+ return 7;
+ }
+
switch (placeholder[0]) {
case 'H': /* commit hash */
strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT));
diff --git a/range-diff.c b/range-diff.c
index 57edff40a8..2712a9a107 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -140,7 +140,7 @@ static int read_patches(const char *range, struct string_list *list,
if (eol)
*eol = '\n';
orig_len = len;
- len = parse_git_diff_header(&root, &linenr, 0, line,
+ len = parse_git_diff_header(&root, NULL, &linenr, 0, line,
len, size, &patch);
if (len < 0) {
error(_("could not parse git header '%.*s'"),
diff --git a/read-cache.c b/read-cache.c
index 5049f9baca..38a04b8de3 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -2309,13 +2309,9 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
}
munmap((void *)mmap, mmap_size);
- /*
- * TODO trace2: replace "the_repository" with the actual repo instance
- * that is associated with the given "istate".
- */
- trace2_data_intmax("index", the_repository, "read/version",
+ trace2_data_intmax("index", istate->repo, "read/version",
istate->version);
- trace2_data_intmax("index", the_repository, "read/cache_nr",
+ trace2_data_intmax("index", istate->repo, "read/cache_nr",
istate->cache_nr);
/*
@@ -2360,16 +2356,12 @@ int read_index_from(struct index_state *istate, const char *path,
if (istate->initialized)
return istate->cache_nr;
- /*
- * TODO trace2: replace "the_repository" with the actual repo instance
- * that is associated with the given "istate".
- */
- trace2_region_enter_printf("index", "do_read_index", the_repository,
+ trace2_region_enter_printf("index", "do_read_index", istate->repo,
"%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
- trace2_region_leave_printf("index", "do_read_index", the_repository,
+ trace2_region_leave_printf("index", "do_read_index", istate->repo,
"%s", path);
split_index = istate->split_index;
@@ -3096,13 +3088,9 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
- /*
- * TODO trace2: replace "the_repository" with the actual repo instance
- * that is associated with the given "istate".
- */
- trace2_data_intmax("index", the_repository, "write/version",
+ trace2_data_intmax("index", istate->repo, "write/version",
istate->version);
- trace2_data_intmax("index", the_repository, "write/cache_nr",
+ trace2_data_intmax("index", istate->repo, "write/cache_nr",
istate->cache_nr);
ret = 0;
@@ -3144,14 +3132,10 @@ static int do_write_locked_index(struct index_state *istate,
return ret;
}
- /*
- * TODO trace2: replace "the_repository" with the actual repo instance
- * that is associated with the given "istate".
- */
- trace2_region_enter_printf("index", "do_write_index", the_repository,
+ trace2_region_enter_printf("index", "do_write_index", istate->repo,
"%s", get_lock_file_path(lock));
ret = do_write_index(istate, lock->tempfile, write_extensions, flags);
- trace2_region_leave_printf("index", "do_write_index", the_repository,
+ trace2_region_leave_printf("index", "do_write_index", istate->repo,
"%s", get_lock_file_path(lock));
if (was_full)
@@ -4049,6 +4033,7 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
rev.diffopt.format_callback = update_callback;
rev.diffopt.format_callback_data = &data;
rev.diffopt.flags.override_submodule_config = 1;
+ rev.diffopt.detect_rename = 0; /* staging worktree changes does not need renames */
rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
/*
diff --git a/reflog.c b/reflog.c
index 1460ae9d0d..82337078d0 100644
--- a/reflog.c
+++ b/reflog.c
@@ -168,7 +168,7 @@ static int tree_is_complete(const struct object_id *oid)
complete = 1;
while (tree_entry(&desc, &entry)) {
if (!odb_has_object(the_repository->objects, &entry.oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) ||
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR) ||
(S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
tree->object.flags |= INCOMPLETE;
complete = 0;
diff --git a/refs.c b/refs.c
index 6fb8f9d10c..bfcb9c7ac3 100644
--- a/refs.c
+++ b/refs.c
@@ -64,6 +64,9 @@ const char *ref_storage_format_to_name(enum ref_storage_format ref_storage_forma
return be->name;
}
+static const char *abort_by_ref_transaction_hook =
+ N_("in '%s' phase, update aborted by the reference-transaction hook");
+
/*
* How to handle various characters in refnames:
* 0: An acceptable character for refs
@@ -422,7 +425,7 @@ int ref_resolves_to_object(const char *refname,
if (flags & REF_ISBROKEN)
return 0;
if (!odb_has_object(repo->objects, oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
error(_("%s does not point to a valid object!"), refname);
return 0;
}
@@ -740,7 +743,8 @@ static char *substitute_branch_name(struct repository *r,
return NULL;
}
-void copy_branchname(struct strbuf *sb, const char *name, unsigned allowed)
+void copy_branchname(struct strbuf *sb, const char *name,
+ enum interpret_branch_kind allowed)
{
int len = strlen(name);
struct interpret_branch_name_options options = {
@@ -2591,7 +2595,8 @@ static int transaction_hook_feed_stdin(int hook_stdin_fd, void *pp_cb, void *pp_
static void *transaction_feed_cb_data_alloc(void *feed_pipe_ctx UNUSED)
{
- struct transaction_feed_cb_data *data = xmalloc(sizeof(*data));
+ struct transaction_feed_cb_data *data;
+ CALLOC_ARRAY(data, 1);
strbuf_init(&data->buf, 0);
data->index = 0;
return data;
@@ -2655,6 +2660,13 @@ int ref_transaction_prepare(struct ref_transaction *transaction,
if (ref_update_reject_duplicates(&transaction->refnames, err))
return REF_TRANSACTION_ERROR_GENERIC;
+ /* Preparing checks before locking references */
+ ret = run_transaction_hook(transaction, "preparing");
+ if (ret) {
+ ref_transaction_abort(transaction, err);
+ die(_(abort_by_ref_transaction_hook), "preparing");
+ }
+
ret = refs->be->transaction_prepare(refs, transaction, err);
if (ret)
return ret;
@@ -2662,7 +2674,7 @@ int ref_transaction_prepare(struct ref_transaction *transaction,
ret = run_transaction_hook(transaction, "prepared");
if (ret) {
ref_transaction_abort(transaction, err);
- die(_("ref updates aborted by hook"));
+ die(_(abort_by_ref_transaction_hook), "prepared");
}
return 0;
diff --git a/refs.h b/refs.h
index d98c1fc591..d65de6ab5f 100644
--- a/refs.h
+++ b/refs.h
@@ -1,6 +1,7 @@
#ifndef REFS_H
#define REFS_H
+#include "object-name.h"
#include "commit.h"
#include "repository.h"
#include "repo-settings.h"
@@ -225,7 +226,7 @@ char *repo_default_branch_name(struct repository *r, int quiet);
* repo_interpret_branch_name() for details.
*/
void copy_branchname(struct strbuf *sb, const char *name,
- unsigned allowed);
+ enum interpret_branch_kind allowed);
/*
* Like copy_branchname() above, but confirm that the result is
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 7ce0d57478..0537a72b2a 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1813,7 +1813,7 @@ static int commit_ref(struct ref_lock *lock)
size_t len = strlen(path);
struct strbuf sb_path = STRBUF_INIT;
- strbuf_attach(&sb_path, path, len, len);
+ strbuf_attach(&sb_path, path, len, len + 1);
/*
* If this fails, commit_lock_file() will also fail
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index b124404663..daea30a5b4 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -366,11 +366,6 @@ static int reftable_be_config(const char *var, const char *value,
return 0;
}
-static int reftable_be_fsync(int fd)
-{
- return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
-}
-
static struct ref_store *reftable_be_init(struct repository *repo,
const char *payload,
const char *gitdir,
@@ -408,7 +403,6 @@ static struct ref_store *reftable_be_init(struct repository *repo,
refs->write_options.disable_auto_compact =
!git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
refs->write_options.lock_timeout_ms = 100;
- refs->write_options.fsync = reftable_be_fsync;
repo_config(the_repository, reftable_be_config, &refs->write_options);
diff --git a/refspec.c b/refspec.c
index 0775358d96..fb89bce1db 100644
--- a/refspec.c
+++ b/refspec.c
@@ -85,7 +85,7 @@ static int parse_refspec(struct refspec_item *item, const char *refspec, int fet
if (!*item->src)
return 0; /* negative refspecs must not be empty */
else if (llen == the_hash_algo->hexsz && !get_oid_hex(item->src, &unused))
- return 0; /* negative refpsecs cannot be exact sha1 */
+ return 0; /* negative refspecs cannot be exact sha1 */
else if (!check_refname_format(item->src, flags))
; /* valid looking ref is ok */
else
diff --git a/reftable/blocksource.c b/reftable/blocksource.c
index 573c81287f..7f7441f751 100644
--- a/reftable/blocksource.c
+++ b/reftable/blocksource.c
@@ -93,13 +93,12 @@ void block_source_from_buf(struct reftable_block_source *bs,
}
struct file_block_source {
- uint64_t size;
- unsigned char *data;
+ struct reftable_mmap mmap;
};
static uint64_t file_size(void *b)
{
- return ((struct file_block_source *)b)->size;
+ return ((struct file_block_source *)b)->mmap.size;
}
static void file_release_data(void *b REFTABLE_UNUSED, struct reftable_block_data *dest REFTABLE_UNUSED)
@@ -109,7 +108,7 @@ static void file_release_data(void *b REFTABLE_UNUSED, struct reftable_block_dat
static void file_close(void *v)
{
struct file_block_source *b = v;
- munmap(b->data, b->size);
+ reftable_munmap(&b->mmap);
reftable_free(b);
}
@@ -117,8 +116,8 @@ static ssize_t file_read_data(void *v, struct reftable_block_data *dest, uint64_
uint32_t size)
{
struct file_block_source *b = v;
- assert(off + size <= b->size);
- dest->data = b->data + off;
+ assert(off + size <= b->mmap.size);
+ dest->data = (unsigned char *) b->mmap.data + off;
dest->len = size;
return size;
}
@@ -156,13 +155,9 @@ int reftable_block_source_from_file(struct reftable_block_source *bs,
goto out;
}
- p->size = st.st_size;
- p->data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if (p->data == MAP_FAILED) {
- err = REFTABLE_IO_ERROR;
- p->data = NULL;
+ err = reftable_mmap(&p->mmap, fd, st.st_size);
+ if (err < 0)
goto out;
- }
assert(!bs->ops);
bs->ops = &file_vtable;
diff --git a/reftable/fsck.c b/reftable/fsck.c
index 26b9115b14..8e73fc83f2 100644
--- a/reftable/fsck.c
+++ b/reftable/fsck.c
@@ -63,7 +63,7 @@ static int table_check_name(struct reftable_table *table,
static int table_checks(struct reftable_table *table,
reftable_fsck_report_fn report_fn,
- reftable_fsck_verbose_fn verbose_fn UNUSED,
+ reftable_fsck_verbose_fn verbose_fn REFTABLE_UNUSED,
void *cb_data)
{
table_check_fn table_check_fns[] = {
diff --git a/reftable/reftable-basics.h b/reftable/reftable-basics.h
index 6d73f19c85..dc8622682d 100644
--- a/reftable/reftable-basics.h
+++ b/reftable/reftable-basics.h
@@ -9,7 +9,7 @@
#ifndef REFTABLE_BASICS_H
#define REFTABLE_BASICS_H
-#include <stddef.h>
+#include "reftable-system.h"
/* A buffer that contains arbitrary byte slices. */
struct reftable_buf {
diff --git a/reftable/reftable-block.h b/reftable/reftable-block.h
index 0b05a8f7e3..94c79b5c58 100644
--- a/reftable/reftable-block.h
+++ b/reftable/reftable-block.h
@@ -9,8 +9,7 @@
#ifndef REFTABLE_BLOCK_H
#define REFTABLE_BLOCK_H
-#include <stdint.h>
-
+#include "reftable-system.h"
#include "reftable-basics.h"
#include "reftable-blocksource.h"
#include "reftable-iterator.h"
diff --git a/reftable/reftable-blocksource.h b/reftable/reftable-blocksource.h
index f5ba867bd6..40c1e94646 100644
--- a/reftable/reftable-blocksource.h
+++ b/reftable/reftable-blocksource.h
@@ -9,7 +9,7 @@
#ifndef REFTABLE_BLOCKSOURCE_H
#define REFTABLE_BLOCKSOURCE_H
-#include <stdint.h>
+#include "reftable-system.h"
/*
* Generic wrapper for a seekable readable file.
diff --git a/reftable/reftable-error.h b/reftable/reftable-error.h
index d100e0df92..0535e1478b 100644
--- a/reftable/reftable-error.h
+++ b/reftable/reftable-error.h
@@ -9,6 +9,8 @@
#ifndef REFTABLE_ERROR_H
#define REFTABLE_ERROR_H
+#include "reftable-system.h"
+
/*
* Errors in reftable calls are signaled with negative integer return values. 0
* means success.
diff --git a/reftable/reftable-fsck.h b/reftable/reftable-fsck.h
index 007a392cf9..340fc7762e 100644
--- a/reftable/reftable-fsck.h
+++ b/reftable/reftable-fsck.h
@@ -1,6 +1,7 @@
#ifndef REFTABLE_FSCK_H
#define REFTABLE_FSCK_H
+#include "reftable-system.h"
#include "reftable-stack.h"
enum reftable_fsck_error {
diff --git a/reftable/reftable-iterator.h b/reftable/reftable-iterator.h
index af582028c2..a050cc153b 100644
--- a/reftable/reftable-iterator.h
+++ b/reftable/reftable-iterator.h
@@ -9,6 +9,7 @@
#ifndef REFTABLE_ITERATOR_H
#define REFTABLE_ITERATOR_H
+#include "reftable-system.h"
#include "reftable-record.h"
struct reftable_iterator_vtable;
diff --git a/reftable/reftable-merged.h b/reftable/reftable-merged.h
index e5af846b32..02a9966835 100644
--- a/reftable/reftable-merged.h
+++ b/reftable/reftable-merged.h
@@ -9,6 +9,7 @@
#ifndef REFTABLE_MERGED_H
#define REFTABLE_MERGED_H
+#include "reftable-system.h"
#include "reftable-iterator.h"
/*
diff --git a/reftable/reftable-record.h b/reftable/reftable-record.h
index 385a74cc86..e18c538238 100644
--- a/reftable/reftable-record.h
+++ b/reftable/reftable-record.h
@@ -9,8 +9,8 @@
#ifndef REFTABLE_RECORD_H
#define REFTABLE_RECORD_H
+#include "reftable-system.h"
#include "reftable-basics.h"
-#include <stdint.h>
/*
* Basic data types
diff --git a/reftable/reftable-stack.h b/reftable/reftable-stack.h
index c2415cbc6e..5f7be573fa 100644
--- a/reftable/reftable-stack.h
+++ b/reftable/reftable-stack.h
@@ -9,6 +9,7 @@
#ifndef REFTABLE_STACK_H
#define REFTABLE_STACK_H
+#include "reftable-system.h"
#include "reftable-writer.h"
/*
diff --git a/reftable/reftable-system.h b/reftable/reftable-system.h
new file mode 100644
index 0000000000..76f3e33e90
--- /dev/null
+++ b/reftable/reftable-system.h
@@ -0,0 +1,18 @@
+#ifndef REFTABLE_SYSTEM_H
+#define REFTABLE_SYSTEM_H
+
+/*
+ * This header defines the platform-specific bits required to compile the
+ * reftable library. It should provide an environment that bridges over the
+ * gaps between POSIX and your system, as well as the zlib interfaces. This
+ * header is expected to be changed by the individual project.
+ */
+
+#define MINGW_DONT_HANDLE_IN_USE_ERROR
+#include "compat/posix.h"
+#include "compat/zlib-compat.h"
+
+int reftable_fsync(int fd);
+#define fsync(fd) reftable_fsync(fd)
+
+#endif
diff --git a/reftable/reftable-table.h b/reftable/reftable-table.h
index 5f935d02e3..d7666b53a1 100644
--- a/reftable/reftable-table.h
+++ b/reftable/reftable-table.h
@@ -9,6 +9,7 @@
#ifndef REFTABLE_TABLE_H
#define REFTABLE_TABLE_H
+#include "reftable-system.h"
#include "reftable-iterator.h"
#include "reftable-block.h"
#include "reftable-blocksource.h"
diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h
index 1e7003cd69..a66db415c8 100644
--- a/reftable/reftable-writer.h
+++ b/reftable/reftable-writer.h
@@ -9,11 +9,9 @@
#ifndef REFTABLE_WRITER_H
#define REFTABLE_WRITER_H
+#include "reftable-system.h"
#include "reftable-record.h"
-#include <stdint.h>
-#include <unistd.h> /* ssize_t */
-
/* Writing single reftables */
/* reftable_write_options sets options for writing a single reftable. */
@@ -64,12 +62,6 @@ struct reftable_write_options {
long lock_timeout_ms;
/*
- * Optional callback used to fsync files to disk. Falls back to using
- * fsync(3P) when unset.
- */
- int (*fsync)(int fd);
-
- /*
* Callback function to execute whenever the stack is being reloaded.
* This can be used e.g. to discard cached information that relies on
* the old stack's data. The payload data will be passed as argument to
diff --git a/reftable/stack.c b/reftable/stack.c
index 1c9f21dfe1..1fba96ddb3 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -29,13 +29,6 @@ static int stack_filename(struct reftable_buf *dest, struct reftable_stack *st,
return 0;
}
-static int stack_fsync(const struct reftable_write_options *opts, int fd)
-{
- if (opts->fsync)
- return opts->fsync(fd);
- return fsync(fd);
-}
-
static ssize_t reftable_write_data(int fd, const void *data, size_t size)
{
size_t total_written = 0;
@@ -69,7 +62,7 @@ static ssize_t fd_writer_write(void *arg, const void *data, size_t sz)
static int fd_writer_flush(void *arg)
{
struct fd_writer *writer = arg;
- return stack_fsync(writer->opts, writer->fd);
+ return fsync(writer->fd);
}
static int fd_read_lines(int fd, char ***namesp)
@@ -372,45 +365,26 @@ done:
return err;
}
-/* return negative if a before b. */
-static int tv_cmp(struct timeval *a, struct timeval *b)
-{
- time_t diff = a->tv_sec - b->tv_sec;
- int udiff = a->tv_usec - b->tv_usec;
-
- if (diff != 0)
- return diff;
-
- return udiff;
-}
-
static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
int reuse_open)
{
char **names = NULL, **names_after = NULL;
- struct timeval deadline;
+ uint64_t deadline;
int64_t delay = 0;
int tries = 0, err;
int fd = -1;
- err = gettimeofday(&deadline, NULL);
- if (err < 0)
- goto out;
- deadline.tv_sec += 3;
+ deadline = reftable_time_ms() + 3000;
while (1) {
- struct timeval now;
-
- err = gettimeofday(&now, NULL);
- if (err < 0)
- goto out;
+ uint64_t now = reftable_time_ms();
/*
* Only look at deadlines after the first few times. This
* simplifies debugging in GDB.
*/
tries++;
- if (tries > 3 && tv_cmp(&now, &deadline) >= 0)
+ if (tries > 3 && now >= deadline)
goto out;
fd = open(st->list_file, O_RDONLY);
@@ -812,7 +786,7 @@ int reftable_addition_commit(struct reftable_addition *add)
goto done;
}
- err = stack_fsync(&add->stack->opts, add->tables_list_lock.fd);
+ err = fsync(add->tables_list_lock.fd);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -1480,7 +1454,7 @@ static int stack_compact_range(struct reftable_stack *st,
goto done;
}
- err = stack_fsync(&st->opts, tables_list_lock.fd);
+ err = fsync(tables_list_lock.fd);
if (err < 0) {
err = REFTABLE_IO_ERROR;
unlink(new_table_path.buf);
diff --git a/reftable/system.c b/reftable/system.c
index 725a25844e..9063641f30 100644
--- a/reftable/system.c
+++ b/reftable/system.c
@@ -4,7 +4,9 @@
#include "basics.h"
#include "reftable-error.h"
#include "../lockfile.h"
+#include "../trace.h"
#include "../tempfile.h"
+#include "../write-or-die.h"
uint32_t reftable_rand(void)
{
@@ -131,3 +133,33 @@ int flock_commit(struct reftable_flock *l)
return 0;
}
+
+int reftable_fsync(int fd)
+{
+ return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
+}
+
+uint64_t reftable_time_ms(void)
+{
+ return getnanotime() / 1000000;
+}
+
+int reftable_mmap(struct reftable_mmap *out, int fd, size_t len)
+{
+ void *data = xmmap_gently(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (data == MAP_FAILED)
+ return REFTABLE_IO_ERROR;
+
+ out->data = data;
+ out->size = len;
+
+ return 0;
+}
+
+int reftable_munmap(struct reftable_mmap *mmap)
+{
+ if (munmap(mmap->data, mmap->size) < 0)
+ return REFTABLE_IO_ERROR;
+ memset(mmap, 0, sizeof(*mmap));
+ return 0;
+}
diff --git a/reftable/system.h b/reftable/system.h
index c54ed4cad6..c0e2cbe0ff 100644
--- a/reftable/system.h
+++ b/reftable/system.h
@@ -9,11 +9,14 @@
#ifndef SYSTEM_H
#define SYSTEM_H
-/* This header glues the reftable library to the rest of Git */
+/*
+ * This header defines the platform-agnostic interface that is to be
+ * implemented by the project to make it work on their respective supported
+ * systems, and to integrate it into the project itself. This header is not
+ * expected to be changed by the individual project.
+ */
-#define MINGW_DONT_HANDLE_IN_USE_ERROR
-#include "compat/posix.h"
-#include "compat/zlib-compat.h"
+#include "reftable-system.h"
/*
* Return a random 32 bit integer. This function is expected to return
@@ -108,4 +111,25 @@ int flock_release(struct reftable_flock *l);
*/
int flock_commit(struct reftable_flock *l);
+/* Report the time in milliseconds. */
+uint64_t reftable_time_ms(void);
+
+struct reftable_mmap {
+ void *data;
+ size_t size;
+ void *priv;
+};
+
+/*
+ * Map the file into memory. Returns 0 on success, a reftable error code on
+ * error.
+ */
+int reftable_mmap(struct reftable_mmap *out, int fd, size_t len);
+
+/*
+ * Unmap the file from memory. Returns 0 on success, a reftable error code on
+ * error.
+ */
+int reftable_munmap(struct reftable_mmap *mmap);
+
#endif
diff --git a/remote-curl.c b/remote-curl.c
index 92e40bb682..aba60d5712 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -529,6 +529,17 @@ static struct discovery *discover_refs(const char *service, int for_push)
show_http_message(&type, &charset, &buffer);
die(_("unable to access '%s' with http.pinnedPubkey configuration: %s"),
transport_anonymize_url(url.buf), curl_errorstr);
+ case HTTP_RATE_LIMITED:
+ if (http_options.retry_after > 0) {
+ show_http_message(&type, &charset, &buffer);
+ die(_("rate limited by '%s', please try again in %ld seconds"),
+ transport_anonymize_url(url.buf),
+ http_options.retry_after);
+ } else {
+ show_http_message(&type, &charset, &buffer);
+ die(_("rate limited by '%s', please try again later"),
+ transport_anonymize_url(url.buf));
+ }
default:
show_http_message(&type, &charset, &buffer);
die(_("unable to access '%s': %s"),
@@ -1552,6 +1563,13 @@ int cmd_main(int argc, const char **argv)
goto cleanup;
}
+ /*
+ * yuck, see 9e89dcb66a (builtin/ls-remote: fall back to SHA1 outside
+ * of a repo, 2024-08-02)
+ */
+ if (nongit)
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
+
options.verbosity = 1;
options.progress = !!isatty(2);
options.thin = 1;
diff --git a/remote.c b/remote.c
index 7ca2a6501b..a664cd166a 100644
--- a/remote.c
+++ b/remote.c
@@ -1723,7 +1723,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
if (!reject_reason && !ref->deletion && !is_null_oid(&ref->old_oid)) {
if (starts_with(ref->name, "refs/tags/"))
reject_reason = REF_STATUS_REJECT_ALREADY_EXISTS;
- else if (!odb_has_object(the_repository->objects, &ref->old_oid, HAS_OBJECT_RECHECK_PACKED))
+ else if (!odb_has_object(the_repository->objects, &ref->old_oid, ODB_HAS_OBJECT_RECHECK_PACKED))
reject_reason = REF_STATUS_REJECT_FETCH_FIRST;
else if (!lookup_commit_reference_gently(the_repository, &ref->old_oid, 1) ||
!lookup_commit_reference_gently(the_repository, &ref->new_oid, 1))
diff --git a/replay.c b/replay.c
index b958ddabfa..f96f1f6551 100644
--- a/replay.c
+++ b/replay.c
@@ -254,7 +254,10 @@ static struct commit *mapped_commit(kh_oid_map_t *replayed_commits,
struct commit *commit,
struct commit *fallback)
{
- khint_t pos = kh_get_oid_map(replayed_commits, commit->object.oid);
+ khint_t pos;
+ if (!commit)
+ return fallback;
+ pos = kh_get_oid_map(replayed_commits, commit->object.oid);
if (pos == kh_end(replayed_commits))
return fallback;
return kh_value(replayed_commits, pos);
@@ -271,18 +274,26 @@ static struct commit *pick_regular_commit(struct repository *repo,
struct commit *base, *replayed_base;
struct tree *pickme_tree, *base_tree, *replayed_base_tree;
- base = pickme->parents->item;
- replayed_base = mapped_commit(replayed_commits, base, onto);
+ if (pickme->parents) {
+ base = pickme->parents->item;
+ base_tree = repo_get_commit_tree(repo, base);
+ } else {
+ base = NULL;
+ base_tree = lookup_tree(repo, repo->hash_algo->empty_tree);
+ }
+ replayed_base = mapped_commit(replayed_commits, base, onto);
replayed_base_tree = repo_get_commit_tree(repo, replayed_base);
pickme_tree = repo_get_commit_tree(repo, pickme);
- base_tree = repo_get_commit_tree(repo, base);
if (mode == REPLAY_MODE_PICK) {
/* Cherry-pick: normal order */
merge_opt->branch1 = short_commit_name(repo, replayed_base);
merge_opt->branch2 = short_commit_name(repo, pickme);
- merge_opt->ancestor = xstrfmt("parent of %s", merge_opt->branch2);
+ if (pickme->parents)
+ merge_opt->ancestor = xstrfmt("parent of %s", merge_opt->branch2);
+ else
+ merge_opt->ancestor = xstrdup("empty tree");
merge_incore_nonrecursive(merge_opt,
base_tree,
@@ -387,8 +398,6 @@ int replay_revisions(struct rev_info *revs,
oidcpy(&old_oid, &onto->object.oid);
}
- /* FIXME: Should allow replaying commits with the first as a root commit */
-
if (prepare_revision_walk(revs) < 0) {
ret = error(_("error preparing revisions"));
goto out;
@@ -403,9 +412,7 @@ int replay_revisions(struct rev_info *revs,
khint_t pos;
int hr;
- if (!commit->parents)
- die(_("replaying down from root commit is not supported yet!"));
- if (commit->parents->next)
+ if (commit->parents && commit->parents->next)
die(_("replaying merge commits is not supported yet!"));
last_commit = pick_regular_commit(revs->repo, commit, replayed_commits,
diff --git a/repository.c b/repository.c
index 0b8f7ec200..9e5537f539 100644
--- a/repository.c
+++ b/repository.c
@@ -3,6 +3,7 @@
#include "repository.h"
#include "hook.h"
#include "odb.h"
+#include "odb/source.h"
#include "config.h"
#include "gettext.h"
#include "object.h"
diff --git a/rerere.c b/rerere.c
index 6ec55964e2..0296700f9f 100644
--- a/rerere.c
+++ b/rerere.c
@@ -403,12 +403,8 @@ static int handle_conflict(struct strbuf *out, struct rerere_io *io,
strbuf_addbuf(out, &two);
rerere_strbuf_putconflict(out, '>', marker_size);
if (ctx) {
- git_hash_update(ctx, one.buf ?
- one.buf : "",
- one.len + 1);
- git_hash_update(ctx, two.buf ?
- two.buf : "",
- two.len + 1);
+ git_hash_update(ctx, one.buf, one.len + 1);
+ git_hash_update(ctx, two.buf, two.len + 1);
}
break;
} else if (hunk == RR_SIDE_1)
diff --git a/revision.c b/revision.c
index 31808e3df0..599b3a66c3 100644
--- a/revision.c
+++ b/revision.c
@@ -2038,41 +2038,32 @@ static void prepare_show_merge(struct rev_info *revs)
free(prune);
}
-static int dotdot_missing(const char *arg, char *dotdot,
+static int dotdot_missing(const char *full_name,
struct rev_info *revs, int symmetric)
{
if (revs->ignore_missing)
return 0;
- /* de-munge so we report the full argument */
- *dotdot = '.';
die(symmetric
? "Invalid symmetric difference expression %s"
- : "Invalid revision range %s", arg);
+ : "Invalid revision range %s", full_name);
}
-static int handle_dotdot_1(const char *arg, char *dotdot,
+static int handle_dotdot_1(const char *a_name, const char *b_name,
+ const char *full_name, int symmetric,
struct rev_info *revs, int flags,
int cant_be_filename,
struct object_context *a_oc,
struct object_context *b_oc)
{
- const char *a_name, *b_name;
struct object_id a_oid, b_oid;
struct object *a_obj, *b_obj;
unsigned int a_flags, b_flags;
- int symmetric = 0;
unsigned int flags_exclude = flags ^ (UNINTERESTING | BOTTOM);
unsigned int oc_flags = GET_OID_COMMITTISH | GET_OID_RECORD_PATH;
- a_name = arg;
if (!*a_name)
a_name = "HEAD";
- b_name = dotdot + 2;
- if (*b_name == '.') {
- symmetric = 1;
- b_name++;
- }
if (!*b_name)
b_name = "HEAD";
@@ -2081,15 +2072,13 @@ static int handle_dotdot_1(const char *arg, char *dotdot,
return -1;
if (!cant_be_filename) {
- *dotdot = '.';
- verify_non_filename(revs->prefix, arg);
- *dotdot = '\0';
+ verify_non_filename(revs->prefix, full_name);
}
a_obj = parse_object(revs->repo, &a_oid);
b_obj = parse_object(revs->repo, &b_oid);
if (!a_obj || !b_obj)
- return dotdot_missing(arg, dotdot, revs, symmetric);
+ return dotdot_missing(full_name, revs, symmetric);
if (!symmetric) {
/* just A..B */
@@ -2103,7 +2092,7 @@ static int handle_dotdot_1(const char *arg, char *dotdot,
a = lookup_commit_reference(revs->repo, &a_obj->oid);
b = lookup_commit_reference(revs->repo, &b_obj->oid);
if (!a || !b)
- return dotdot_missing(arg, dotdot, revs, symmetric);
+ return dotdot_missing(full_name, revs, symmetric);
if (repo_get_merge_bases(the_repository, a, b, &exclude) < 0) {
commit_list_free(exclude);
@@ -2132,16 +2121,23 @@ static int handle_dotdot(const char *arg,
int cant_be_filename)
{
struct object_context a_oc = {0}, b_oc = {0};
- char *dotdot = strstr(arg, "..");
+ const char *dotdot = strstr(arg, "..");
+ char *tmp;
+ int symmetric = 0;
int ret;
if (!dotdot)
return -1;
- *dotdot = '\0';
- ret = handle_dotdot_1(arg, dotdot, revs, flags, cant_be_filename,
- &a_oc, &b_oc);
- *dotdot = '.';
+ tmp = xmemdupz(arg, dotdot - arg);
+ dotdot += 2;
+ if (*dotdot == '.') {
+ symmetric = 1;
+ dotdot++;
+ }
+ ret = handle_dotdot_1(tmp, dotdot, arg, symmetric, revs, flags,
+ cant_be_filename, &a_oc, &b_oc);
+ free(tmp);
object_context_release(&a_oc);
object_context_release(&b_oc);
@@ -2151,7 +2147,10 @@ static int handle_dotdot(const char *arg,
static int handle_revision_arg_1(const char *arg_, struct rev_info *revs, int flags, unsigned revarg_opt)
{
struct object_context oc = {0};
- char *mark;
+ const char *mark;
+ char *arg_minus_at = NULL;
+ char *arg_minus_excl = NULL;
+ char *arg_minus_dash = NULL;
struct object *object;
struct object_id oid;
int local_flags;
@@ -2178,18 +2177,17 @@ static int handle_revision_arg_1(const char *arg_, struct rev_info *revs, int fl
mark = strstr(arg, "^@");
if (mark && !mark[2]) {
- *mark = 0;
- if (add_parents_only(revs, arg, flags, 0)) {
+ arg_minus_at = xmemdupz(arg, mark - arg);
+ if (add_parents_only(revs, arg_minus_at, flags, 0)) {
ret = 0;
goto out;
}
- *mark = '^';
}
mark = strstr(arg, "^!");
if (mark && !mark[2]) {
- *mark = 0;
- if (!add_parents_only(revs, arg, flags ^ (UNINTERESTING | BOTTOM), 0))
- *mark = '^';
+ arg_minus_excl = xmemdupz(arg, mark - arg);
+ if (add_parents_only(revs, arg_minus_excl, flags ^ (UNINTERESTING | BOTTOM), 0))
+ arg = arg_minus_excl;
}
mark = strstr(arg, "^-");
if (mark) {
@@ -2203,9 +2201,9 @@ static int handle_revision_arg_1(const char *arg_, struct rev_info *revs, int fl
}
}
- *mark = 0;
- if (!add_parents_only(revs, arg, flags ^ (UNINTERESTING | BOTTOM), exclude_parent))
- *mark = '^';
+ arg_minus_dash = xmemdupz(arg, mark - arg);
+ if (add_parents_only(revs, arg_minus_dash, flags ^ (UNINTERESTING | BOTTOM), exclude_parent))
+ arg = arg_minus_dash;
}
local_flags = 0;
@@ -2240,6 +2238,9 @@ static int handle_revision_arg_1(const char *arg_, struct rev_info *revs, int fl
out:
object_context_release(&oc);
+ free(arg_minus_at);
+ free(arg_minus_excl);
+ free(arg_minus_dash);
return ret;
}
@@ -3128,6 +3129,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
if (want_ancestry(revs))
revs->limited = 1;
revs->topo_order = 1;
+ if (!revs->diffopt.output_format)
+ revs->diffopt.output_format = DIFF_FORMAT_PATCH;
}
if (revs->topo_order && !generation_numbers_enabled(the_repository))
diff --git a/revision.h b/revision.h
index 69242ecb18..584f1338b5 100644
--- a/revision.h
+++ b/revision.h
@@ -4,6 +4,7 @@
#include "commit.h"
#include "grep.h"
#include "notes.h"
+#include "object-name.h"
#include "oidset.h"
#include "pretty.h"
#include "diff.h"
diff --git a/shallow.c b/shallow.c
index 7a3dd56795..a8ad92e303 100644
--- a/shallow.c
+++ b/shallow.c
@@ -360,7 +360,7 @@ static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
return 0;
if (data->flags & QUICK) {
if (!odb_has_object(the_repository->objects, &graft->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
return 0;
} else if (data->flags & SEEN_ONLY) {
struct commit *c = lookup_commit(the_repository, &graft->oid);
@@ -528,7 +528,7 @@ void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
ALLOC_ARRAY(info->theirs, sa->nr);
for (size_t i = 0; i < sa->nr; i++) {
if (odb_has_object(the_repository->objects, sa->oid + i,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
struct commit_graft *graft;
graft = lookup_commit_graft(the_repository,
&sa->oid[i]);
@@ -567,7 +567,7 @@ void remove_nonexistent_theirs_shallow(struct shallow_info *info)
if (i != dst)
info->theirs[dst] = info->theirs[i];
if (odb_has_object(the_repository->objects, oid + info->theirs[i],
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR))
dst++;
}
info->nr_theirs = dst;
diff --git a/split-index.c b/split-index.c
index 4c74c4adda..6ba210738c 100644
--- a/split-index.c
+++ b/split-index.c
@@ -1,4 +1,3 @@
-#define USE_THE_REPOSITORY_VARIABLE
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "git-compat-util.h"
@@ -6,6 +5,7 @@
#include "hash.h"
#include "mem-pool.h"
#include "read-cache-ll.h"
+#include "repository.h"
#include "split-index.h"
#include "strbuf.h"
#include "ewah/ewok.h"
@@ -25,16 +25,17 @@ struct split_index *init_split_index(struct index_state *istate)
int read_link_extension(struct index_state *istate,
const void *data_, unsigned long sz)
{
+ const struct git_hash_algo *algo = istate->repo->hash_algo;
const unsigned char *data = data_;
struct split_index *si;
int ret;
- if (sz < the_hash_algo->rawsz)
+ if (sz < algo->rawsz)
return error("corrupt link extension (too short)");
si = init_split_index(istate);
- oidread(&si->base_oid, data, the_repository->hash_algo);
- data += the_hash_algo->rawsz;
- sz -= the_hash_algo->rawsz;
+ oidread(&si->base_oid, data, algo);
+ data += algo->rawsz;
+ sz -= algo->rawsz;
if (!sz)
return 0;
si->delete_bitmap = ewah_new();
@@ -56,7 +57,7 @@ int write_link_extension(struct strbuf *sb,
struct index_state *istate)
{
struct split_index *si = istate->split_index;
- strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
+ strbuf_add(sb, si->base_oid.hash, istate->repo->hash_algo->rawsz);
if (!si->delete_bitmap && !si->replace_bitmap)
return 0;
ewah_serialize_strbuf(si->delete_bitmap, sb);
diff --git a/strbuf.c b/strbuf.c
index 3939863cf3..3e04addc22 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -168,7 +168,7 @@ int strbuf_reencode(struct strbuf *sb, const char *from, const char *to)
if (!out)
return -1;
- strbuf_attach(sb, out, len, len);
+ strbuf_attach(sb, out, len, len + 1);
return 0;
}
diff --git a/string-list.c b/string-list.c
index fffa2ad4b6..d260b873c8 100644
--- a/string-list.c
+++ b/string-list.c
@@ -281,6 +281,15 @@ void unsorted_string_list_delete_item(struct string_list *list, int i, int free_
list->nr--;
}
+void unsorted_string_list_remove(struct string_list *list, const char *str,
+ int free_util)
+{
+ struct string_list_item *item = unsorted_string_list_lookup(list, str);
+ if (item)
+ unsorted_string_list_delete_item(list, item - list->items,
+ free_util);
+}
+
/*
* append a substring [p..end] to list; return number of things it
* appended to the list.
diff --git a/string-list.h b/string-list.h
index 3ad862a187..b86ee7c099 100644
--- a/string-list.h
+++ b/string-list.h
@@ -266,6 +266,14 @@ struct string_list_item *unsorted_string_list_lookup(struct string_list *list,
void unsorted_string_list_delete_item(struct string_list *list, int i, int free_util);
/**
+ * Remove the first item matching `str` from an unsorted string_list.
+ * No-op if `str` is not found. If `free_util` is non-zero, the `util`
+ * pointer of the removed item is freed before deletion.
+ */
+void unsorted_string_list_remove(struct string_list *list, const char *str,
+ int free_util);
+
+/**
* Split string into substrings on characters in `delim` and append the
* substrings to `list`. The input string is not modified.
* list->strdup_strings must be set, as new memory needs to be
diff --git a/submodule-config.c b/submodule-config.c
index 1f19fe2077..72a46b7a54 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -14,6 +14,7 @@
#include "strbuf.h"
#include "object-name.h"
#include "odb.h"
+#include "odb/source.h"
#include "parse-options.h"
#include "thread-utils.h"
#include "tree-walk.h"
diff --git a/submodule.c b/submodule.c
index e20537ba8d..b1a0363f9d 100644
--- a/submodule.c
+++ b/submodule.c
@@ -1828,7 +1828,6 @@ int fetch_submodules(struct repository *r,
int default_option,
int quiet, int max_parallel_jobs)
{
- int i;
struct submodule_parallel_fetch spf = SPF_INIT;
const struct run_process_parallel_opts opts = {
.tr2_category = "submodule",
@@ -1855,8 +1854,7 @@ int fetch_submodules(struct repository *r,
die(_("index file corrupt"));
strvec_push(&spf.args, "fetch");
- for (i = 0; i < options->nr; i++)
- strvec_push(&spf.args, options->v[i]);
+ strvec_pushv(&spf.args, options->v);
strvec_push(&spf.args, "--recurse-submodules-default");
/* default value, "--submodule-prefix" and its value are added later */
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
index 6de5d1665a..388d29e2b5 100644
--- a/t/helper/test-read-midx.c
+++ b/t/helper/test-read-midx.c
@@ -26,18 +26,22 @@ static int read_midx_file(const char *object_dir, const char *checksum,
int show_objects)
{
uint32_t i;
- struct multi_pack_index *m;
+ struct multi_pack_index *m, *tip;
+ int ret = 0;
- m = setup_midx(object_dir);
+ m = tip = setup_midx(object_dir);
if (!m)
return 1;
if (checksum) {
- while (m && strcmp(hash_to_hex(get_midx_checksum(m)), checksum))
+ while (m && strcmp(midx_get_checksum_hex(m), checksum))
m = m->base_midx;
- if (!m)
- return 1;
+ if (!m) {
+ ret = error(_("could not find MIDX with checksum %s"),
+ checksum);
+ goto out;
+ }
}
printf("header: %08x %d %d %d %d\n",
@@ -82,9 +86,10 @@ static int read_midx_file(const char *object_dir, const char *checksum,
}
}
- close_midx(m);
+out:
+ close_midx(tip);
- return 0;
+ return ret;
}
static int read_midx_checksum(const char *object_dir)
@@ -94,7 +99,7 @@ static int read_midx_checksum(const char *object_dir)
m = setup_midx(object_dir);
if (!m)
return 1;
- printf("%s\n", hash_to_hex(get_midx_checksum(m)));
+ printf("%s\n", midx_get_checksum_hex(m));
close_midx(m);
return 0;
diff --git a/t/lib-httpd.sh b/t/lib-httpd.sh
index 5f42c311c2..4c76e813e3 100644
--- a/t/lib-httpd.sh
+++ b/t/lib-httpd.sh
@@ -167,6 +167,7 @@ prepare_httpd() {
install_script error.sh
install_script apply-one-time-script.sh
install_script nph-custom-auth.sh
+ install_script http-429.sh
ln -s "$LIB_HTTPD_MODULE_PATH" "$HTTPD_ROOT_PATH/modules"
diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf
index 6b8c50a51a..40a690b0bb 100644
--- a/t/lib-httpd/apache.conf
+++ b/t/lib-httpd/apache.conf
@@ -139,6 +139,10 @@ SetEnv PERL_PATH ${PERL_PATH}
SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
SetEnv GIT_HTTP_EXPORT_ALL
</LocationMatch>
+<LocationMatch /http_429/>
+ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
+ SetEnv GIT_HTTP_EXPORT_ALL
+</LocationMatch>
<LocationMatch /smart_v0/>
SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
SetEnv GIT_HTTP_EXPORT_ALL
@@ -160,6 +164,7 @@ ScriptAlias /broken_smart/ broken-smart-http.sh/
ScriptAlias /error_smart/ error-smart-http.sh/
ScriptAlias /error/ error.sh/
ScriptAliasMatch /one_time_script/(.*) apply-one-time-script.sh/$1
+ScriptAliasMatch /http_429/(.*) http-429.sh/$1
ScriptAliasMatch /custom_auth/(.*) nph-custom-auth.sh/$1
<Directory ${GIT_EXEC_PATH}>
Options FollowSymlinks
@@ -185,6 +190,9 @@ ScriptAliasMatch /custom_auth/(.*) nph-custom-auth.sh/$1
<Files apply-one-time-script.sh>
Options ExecCGI
</Files>
+<Files http-429.sh>
+ Options ExecCGI
+</Files>
<Files ${GIT_EXEC_PATH}/git-http-backend>
Options ExecCGI
</Files>
diff --git a/t/lib-httpd/http-429.sh b/t/lib-httpd/http-429.sh
new file mode 100644
index 0000000000..c97b16145b
--- /dev/null
+++ b/t/lib-httpd/http-429.sh
@@ -0,0 +1,98 @@
+#!/bin/sh
+
+# Script to return HTTP 429 Too Many Requests responses for testing retry logic.
+# Usage: /http_429/<test-context>/<retry-after-value>/<repo-path>
+#
+# The test-context is a unique identifier for each test to isolate state files.
+# The retry-after-value can be:
+# - A number (e.g., "1", "2", "100") - sets Retry-After header to that many seconds
+# - "none" - no Retry-After header
+# - "invalid" - invalid Retry-After format
+# - "permanent" - always return 429 (never succeed)
+# - An HTTP-date string (RFC 2822 format) - sets Retry-After to that date
+#
+# On first call, returns 429. On subsequent calls (after retry), forwards to git-http-backend
+# unless retry-after-value is "permanent".
+
+# Extract test context, retry-after value and repo path from PATH_INFO
+# PATH_INFO format: /<test-context>/<retry-after-value>/<repo-path>
+path_info="${PATH_INFO#/}" # Remove leading slash
+test_context="${path_info%%/*}" # Get first component (test context)
+remaining="${path_info#*/}" # Get rest
+retry_after="${remaining%%/*}" # Get second component (retry-after value)
+repo_path="${remaining#*/}" # Get rest (repo path)
+
+# Extract repository name from repo_path (e.g., "repo.git" from "repo.git/info/refs")
+# The repo name is the first component before any "/"
+repo_name="${repo_path%%/*}"
+
+# Use current directory (HTTPD_ROOT_PATH) for state file
+# Create a safe filename from test_context, retry_after and repo_name
+# This ensures all requests for the same test context share the same state file
+safe_name=$(echo "${test_context}-${retry_after}-${repo_name}" | tr '/' '_' | tr -cd 'a-zA-Z0-9_-')
+state_file="http-429-state-${safe_name}"
+
+# Check if this is the first call (no state file exists)
+if test -f "$state_file"
+then
+ # Already returned 429 once, forward to git-http-backend
+ # Set PATH_INFO to just the repo path (without retry-after value)
+ # Set GIT_PROJECT_ROOT so git-http-backend can find the repository
+ # Use exec to replace this process so git-http-backend gets the updated environment
+ PATH_INFO="/$repo_path"
+ export PATH_INFO
+ # GIT_PROJECT_ROOT points to the document root where repositories are stored
+ # The script runs from HTTPD_ROOT_PATH, and www/ is the document root
+ if test -z "$GIT_PROJECT_ROOT"
+ then
+ # Construct path: current directory (HTTPD_ROOT_PATH) + /www
+ GIT_PROJECT_ROOT="$(pwd)/www"
+ export GIT_PROJECT_ROOT
+ fi
+ exec "$GIT_EXEC_PATH/git-http-backend"
+fi
+
+# Mark that we've returned 429
+touch "$state_file"
+
+# Output HTTP 429 response
+printf "Status: 429 Too Many Requests\r\n"
+
+# Set Retry-After header based on retry_after value
+case "$retry_after" in
+ none)
+ # No Retry-After header
+ ;;
+ invalid)
+ printf "Retry-After: invalid-format-123abc\r\n"
+ ;;
+ permanent)
+ # Always return 429, don't set state file for success
+ rm -f "$state_file"
+ printf "Retry-After: 1\r\n"
+ printf "Content-Type: text/plain\r\n"
+ printf "\r\n"
+ printf "Permanently rate limited\n"
+ exit 0
+ ;;
+ *)
+ # Check if it's a number
+ case "$retry_after" in
+ [0-9]*)
+ # Numeric value
+ printf "Retry-After: %s\r\n" "$retry_after"
+ ;;
+ *)
+ # Assume it's an HTTP-date format (passed as-is, URL decoded)
+ # Apache may URL-encode the path, so decode common URL-encoded characters
+ # %20 = space, %2C = comma, %3A = colon
+ retry_value=$(echo "$retry_after" | sed -e 's/%20/ /g' -e 's/%2C/,/g' -e 's/%3A/:/g')
+ printf "Retry-After: %s\r\n" "$retry_value"
+ ;;
+ esac
+ ;;
+esac
+
+printf "Content-Type: text/plain\r\n"
+printf "\r\n"
+printf "Rate limited\n"
diff --git a/t/meson.build b/t/meson.build
index bafb8e63a9..7528e5cda5 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -626,6 +626,7 @@ integration_tests = [
't5332-multi-pack-reuse.sh',
't5333-pseudo-merge-bitmaps.sh',
't5334-incremental-multi-pack-index.sh',
+ 't5335-compact-multi-pack-index.sh',
't5351-unpack-large-objects.sh',
't5400-send-pack.sh',
't5401-update-hooks.sh',
@@ -712,6 +713,7 @@ integration_tests = [
't5581-http-curl-verbose.sh',
't5582-fetch-negative-refspec.sh',
't5583-push-branches.sh',
+ 't5584-http-429-retry.sh',
't5600-clone-fail-cleanup.sh',
't5601-clone.sh',
't5602-clone-remote-exec.sh',
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 2f77fde0d9..60cfe65979 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -287,16 +287,8 @@ test_expect_success MINGW 'can spawn .bat with argv[0] containing spaces' '
rm -f out &&
echo "echo %* >>out" >"$bat" &&
- # Ask git to invoke .bat; clone will fail due to fake SSH helper
- test_must_fail env GIT_SSH="$bat" git clone myhost:src ssh-clone &&
-
- # Spawning .bat can fail if there are two quoted cmd.exe arguments.
- # .bat itself is first (due to spaces in name), so just one more is
- # needed to verify. GIT_SSH will invoke .bat multiple times:
- # 1) -G myhost
- # 2) myhost "git-upload-pack src"
- # First invocation will always succeed. Test the second one.
- grep "git-upload-pack" out
+ test-tool run-command run-command "$bat" "arg with spaces" &&
+ test_grep "arg with spaces" out
'
test_done
diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
index 07aa834d33..64ead1571a 100755
--- a/t/t0300-credentials.sh
+++ b/t/t0300-credentials.sh
@@ -675,7 +675,9 @@ test_expect_success 'match percent-encoded values' '
test_expect_success 'match percent-encoded UTF-8 values in path' '
test_config credential.https://example.com.useHttpPath true &&
test_config credential.https://example.com/perú.git.helper "$HELPER" &&
- check fill <<-\EOF
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ check fill <<-EOF
url=https://example.com/per%C3%BA.git
--
protocol=https
diff --git a/t/t0450/adoc-help-mismatches b/t/t0450/adoc-help-mismatches
index 8ee2d3f7c8..e8d6c13ccd 100644
--- a/t/t0450/adoc-help-mismatches
+++ b/t/t0450/adoc-help-mismatches
@@ -33,7 +33,6 @@ merge
merge-file
merge-index
merge-one-file
-multi-pack-index
name-rev
notes
push
diff --git a/t/t1416-ref-transaction-hooks.sh b/t/t1416-ref-transaction-hooks.sh
index d91dd3a3b5..4fe9d9b234 100755
--- a/t/t1416-ref-transaction-hooks.sh
+++ b/t/t1416-ref-transaction-hooks.sh
@@ -20,6 +20,7 @@ test_expect_success 'hook allows updating ref if successful' '
echo "$*" >>actual
EOF
cat >expect <<-EOF &&
+ preparing
prepared
committed
EOF
@@ -27,6 +28,18 @@ test_expect_success 'hook allows updating ref if successful' '
test_cmp expect actual
'
+test_expect_success 'hook aborts updating ref in preparing state' '
+ git reset --hard PRE &&
+ test_hook reference-transaction <<-\EOF &&
+ if test "$1" = preparing
+ then
+ exit 1
+ fi
+ EOF
+ test_must_fail git update-ref HEAD POST 2>err &&
+ test_grep "in '\''preparing'\'' phase, update aborted by the reference-transaction hook" err
+'
+
test_expect_success 'hook aborts updating ref in prepared state' '
git reset --hard PRE &&
test_hook reference-transaction <<-\EOF &&
@@ -36,7 +49,7 @@ test_expect_success 'hook aborts updating ref in prepared state' '
fi
EOF
test_must_fail git update-ref HEAD POST 2>err &&
- test_grep "ref updates aborted by hook" err
+ test_grep "in '\''prepared'\'' phase, update aborted by the reference-transaction hook" err
'
test_expect_success 'hook gets all queued updates in prepared state' '
@@ -121,6 +134,7 @@ test_expect_success 'interleaving hook calls succeed' '
cat >expect <<-EOF &&
hooks/update refs/tags/PRE $ZERO_OID $PRE_OID
hooks/update refs/tags/POST $ZERO_OID $POST_OID
+ hooks/reference-transaction preparing
hooks/reference-transaction prepared
hooks/reference-transaction committed
EOF
@@ -143,6 +157,8 @@ test_expect_success 'hook captures git-symbolic-ref updates' '
git symbolic-ref refs/heads/symref refs/heads/main &&
cat >expect <<-EOF &&
+ preparing
+ $ZERO_OID ref:refs/heads/main refs/heads/symref
prepared
$ZERO_OID ref:refs/heads/main refs/heads/symref
committed
@@ -171,14 +187,20 @@ test_expect_success 'hook gets all queued symref updates' '
# In the files backend, "delete" also triggers an additional transaction
# update on the packed-refs backend, which constitutes additional reflog
# entries.
+ cat >expect <<-EOF &&
+ preparing
+ ref:refs/heads/main $ZERO_OID refs/heads/symref
+ ref:refs/heads/main $ZERO_OID refs/heads/symrefd
+ $ZERO_OID ref:refs/heads/main refs/heads/symrefc
+ ref:refs/heads/main ref:refs/heads/branch refs/heads/symrefu
+ EOF
+
if test_have_prereq REFFILES
then
- cat >expect <<-EOF
+ cat >>expect <<-EOF
aborted
$ZERO_OID $ZERO_OID refs/heads/symrefd
EOF
- else
- >expect
fi &&
cat >>expect <<-EOF &&
diff --git a/t/t1800-hook.sh b/t/t1800-hook.sh
index b1583e9ef9..33decc66c0 100755
--- a/t/t1800-hook.sh
+++ b/t/t1800-hook.sh
@@ -6,16 +6,16 @@ test_description='git-hook command and config-managed multihooks'
. "$TEST_DIRECTORY"/lib-terminal.sh
setup_hooks () {
- test_config hook.ghi.command "/path/ghi"
- test_config hook.ghi.event pre-commit --add
- test_config hook.ghi.event test-hook --add
- test_config_global hook.def.command "/path/def"
+ test_config hook.ghi.command "/path/ghi" &&
+ test_config hook.ghi.event pre-commit --add &&
+ test_config hook.ghi.event test-hook --add &&
+ test_config_global hook.def.command "/path/def" &&
test_config_global hook.def.event pre-commit --add
}
setup_hookdir () {
- mkdir .git/hooks
- write_script .git/hooks/pre-commit <<-EOF
+ mkdir -p .git/hooks &&
+ write_script .git/hooks/pre-commit <<-EOF &&
echo \"Legacy Hook\"
EOF
test_when_finished rm -rf .git/hooks
@@ -25,18 +25,47 @@ test_expect_success 'git hook usage' '
test_expect_code 129 git hook &&
test_expect_code 129 git hook run &&
test_expect_code 129 git hook run -h &&
- test_expect_code 129 git hook list -h &&
test_expect_code 129 git hook run --unknown 2>err &&
test_expect_code 129 git hook list &&
test_expect_code 129 git hook list -h &&
grep "unknown option" err
'
+test_expect_success 'git hook list: unknown hook name is rejected' '
+ test_must_fail git hook list prereceive 2>err &&
+ test_grep "unknown hook event" err
+'
+
+test_expect_success 'git hook run: unknown hook name is rejected' '
+ test_must_fail git hook run prereceive 2>err &&
+ test_grep "unknown hook event" err
+'
+
+test_expect_success 'git hook list: known hook name is accepted' '
+ test_must_fail git hook list pre-receive 2>err &&
+ test_grep ! "unknown hook event" err
+'
+
+test_expect_success 'git hook run: known hook name is accepted' '
+ git hook run --ignore-missing pre-receive 2>err &&
+ test_grep ! "unknown hook event" err
+'
+
+test_expect_success 'git hook run: --allow-unknown-hook-name overrides rejection' '
+ git hook run --allow-unknown-hook-name --ignore-missing custom-hook 2>err &&
+ test_grep ! "unknown hook event" err
+'
+
+test_expect_success 'git hook list: --allow-unknown-hook-name overrides rejection' '
+ test_must_fail git hook list --allow-unknown-hook-name custom-hook 2>err &&
+ test_grep ! "unknown hook event" err
+'
+
test_expect_success 'git hook list: nonexistent hook' '
cat >stderr.expect <<-\EOF &&
- warning: No hooks found for event '\''test-hook'\''
+ warning: no hooks found for event '\''test-hook'\''
EOF
- test_expect_code 1 git hook list test-hook 2>stderr.actual &&
+ test_expect_code 1 git hook list --allow-unknown-hook-name test-hook 2>stderr.actual &&
test_cmp stderr.expect stderr.actual
'
@@ -48,7 +77,7 @@ test_expect_success 'git hook list: traditional hook from hookdir' '
cat >expect <<-\EOF &&
hook from hookdir
EOF
- git hook list test-hook >actual &&
+ git hook list --allow-unknown-hook-name test-hook >actual &&
test_cmp expect actual
'
@@ -57,7 +86,7 @@ test_expect_success 'git hook list: configured hook' '
test_config hook.myhook.event test-hook --add &&
echo "myhook" >expect &&
- git hook list test-hook >actual &&
+ git hook list --allow-unknown-hook-name test-hook >actual &&
test_cmp expect actual
'
@@ -69,7 +98,7 @@ test_expect_success 'git hook list: -z shows NUL-terminated output' '
test_config hook.myhook.event test-hook --add &&
printf "myhookQhook from hookdirQ" >expect &&
- git hook list -z test-hook >actual.raw &&
+ git hook list --allow-unknown-hook-name -z test-hook >actual.raw &&
nul_to_q <actual.raw >actual &&
test_cmp expect actual
'
@@ -78,12 +107,12 @@ test_expect_success 'git hook run: nonexistent hook' '
cat >stderr.expect <<-\EOF &&
error: cannot find a hook named test-hook
EOF
- test_expect_code 1 git hook run test-hook 2>stderr.actual &&
+ test_expect_code 1 git hook run --allow-unknown-hook-name test-hook 2>stderr.actual &&
test_cmp stderr.expect stderr.actual
'
test_expect_success 'git hook run: nonexistent hook with --ignore-missing' '
- git hook run --ignore-missing does-not-exist 2>stderr.actual &&
+ git hook run --allow-unknown-hook-name --ignore-missing does-not-exist 2>stderr.actual &&
test_must_be_empty stderr.actual
'
@@ -95,7 +124,7 @@ test_expect_success 'git hook run: basic' '
cat >expect <<-\EOF &&
Test hook
EOF
- git hook run test-hook 2>actual &&
+ git hook run --allow-unknown-hook-name test-hook 2>actual &&
test_cmp expect actual
'
@@ -109,7 +138,7 @@ test_expect_success 'git hook run: stdout and stderr both write to our stderr' '
Will end up on stderr
Will end up on stderr
EOF
- git hook run test-hook >stdout.actual 2>stderr.actual &&
+ git hook run --allow-unknown-hook-name test-hook >stdout.actual 2>stderr.actual &&
test_cmp stderr.expect stderr.actual &&
test_must_be_empty stdout.actual
'
@@ -121,12 +150,12 @@ do
exit $code
EOF
- test_expect_code $code git hook run test-hook
+ test_expect_code $code git hook run --allow-unknown-hook-name test-hook
'
done
test_expect_success 'git hook run arg u ments without -- is not allowed' '
- test_expect_code 129 git hook run test-hook arg u ments
+ test_expect_code 129 git hook run --allow-unknown-hook-name test-hook arg u ments
'
test_expect_success 'git hook run -- pass arguments' '
@@ -140,7 +169,7 @@ test_expect_success 'git hook run -- pass arguments' '
u ments
EOF
- git hook run test-hook -- arg "u ments" 2>actual &&
+ git hook run --allow-unknown-hook-name test-hook -- arg "u ments" 2>actual &&
test_cmp expect actual
'
@@ -149,12 +178,12 @@ test_expect_success 'git hook run: out-of-repo runs execute global hooks' '
test_config_global hook.global-hook.command "echo no repo no problems" --add &&
echo "global-hook" >expect &&
- nongit git hook list test-hook >actual &&
+ nongit git hook list --allow-unknown-hook-name test-hook >actual &&
test_cmp expect actual &&
echo "no repo no problems" >expect &&
- nongit git hook run test-hook 2>actual &&
+ nongit git hook run --allow-unknown-hook-name test-hook 2>actual &&
test_cmp expect actual
'
@@ -179,11 +208,11 @@ test_expect_success 'git -c core.hooksPath=<PATH> hook run' '
# Test various ways of specifying the path. See also
# t1350-config-hooks-path.sh
>actual &&
- git hook run test-hook -- ignored 2>>actual &&
- git -c core.hooksPath=my-hooks hook run test-hook -- one 2>>actual &&
- git -c core.hooksPath=my-hooks/ hook run test-hook -- two 2>>actual &&
- git -c core.hooksPath="$PWD/my-hooks" hook run test-hook -- three 2>>actual &&
- git -c core.hooksPath="$PWD/my-hooks/" hook run test-hook -- four 2>>actual &&
+ git hook run --allow-unknown-hook-name test-hook -- ignored 2>>actual &&
+ git -c core.hooksPath=my-hooks hook run --allow-unknown-hook-name test-hook -- one 2>>actual &&
+ git -c core.hooksPath=my-hooks/ hook run --allow-unknown-hook-name test-hook -- two 2>>actual &&
+ git -c core.hooksPath="$PWD/my-hooks" hook run --allow-unknown-hook-name test-hook -- three 2>>actual &&
+ git -c core.hooksPath="$PWD/my-hooks/" hook run --allow-unknown-hook-name test-hook -- four 2>>actual &&
test_cmp expect actual
'
@@ -263,7 +292,7 @@ test_expect_success 'hook can be configured for multiple events' '
# 'ghi' should be included in both 'pre-commit' and 'test-hook'
git hook list pre-commit >actual &&
grep "ghi" actual &&
- git hook list test-hook >actual &&
+ git hook list --allow-unknown-hook-name test-hook >actual &&
grep "ghi" actual
'
@@ -337,15 +366,15 @@ test_expect_success 'stdin to multiple hooks' '
b3
EOF
- git hook run --to-stdin=input test-hook 2>actual &&
+ git hook run --allow-unknown-hook-name --to-stdin=input test-hook 2>actual &&
test_cmp expected actual
'
test_expect_success 'rejects hooks with no commands configured' '
test_config hook.broken.event "test-hook" &&
- test_must_fail git hook list test-hook 2>actual &&
+ test_must_fail git hook list --allow-unknown-hook-name test-hook 2>actual &&
test_grep "hook.broken.command" actual &&
- test_must_fail git hook run test-hook 2>actual &&
+ test_must_fail git hook run --allow-unknown-hook-name test-hook 2>actual &&
test_grep "hook.broken.command" actual
'
@@ -354,11 +383,19 @@ test_expect_success 'disabled hook is not run' '
test_config hook.skipped.command "echo \"Should not run\"" &&
test_config hook.skipped.enabled false &&
- git hook run --ignore-missing test-hook 2>actual &&
+ git hook run --allow-unknown-hook-name --ignore-missing test-hook 2>actual &&
test_must_be_empty actual
'
-test_expect_success 'disabled hook does not appear in git hook list' '
+test_expect_success 'disabled hook with no command warns' '
+ test_config hook.nocommand.event "pre-commit" &&
+ test_config hook.nocommand.enabled false &&
+
+ git hook list pre-commit 2>actual &&
+ test_grep "disabled hook.*nocommand.*no command configured" actual
+'
+
+test_expect_success 'disabled hook appears as disabled in git hook list' '
test_config hook.active.event "pre-commit" &&
test_config hook.active.command "echo active" &&
test_config hook.inactive.event "pre-commit" &&
@@ -366,8 +403,27 @@ test_expect_success 'disabled hook does not appear in git hook list' '
test_config hook.inactive.enabled false &&
git hook list pre-commit >actual &&
- test_grep "active" actual &&
- test_grep ! "inactive" actual
+ test_grep "^active$" actual &&
+ test_grep "^disabled inactive$" actual
+'
+
+test_expect_success 'disabled hook shows scope with --show-scope' '
+ test_config hook.myhook.event "pre-commit" &&
+ test_config hook.myhook.command "echo hi" &&
+ test_config hook.myhook.enabled false &&
+
+ git hook list --show-scope pre-commit >actual &&
+ test_grep "^local disabled myhook$" actual
+'
+
+test_expect_success 'disabled configured hook is not reported as existing by hook_exists' '
+ test_when_finished "rm -f git-bugreport-hook-exists-test.txt" &&
+ test_config hook.linter.event "pre-commit" &&
+ test_config hook.linter.command "echo lint" &&
+ test_config hook.linter.enabled false &&
+
+ git bugreport -s hook-exists-test &&
+ test_grep ! "pre-commit" git-bugreport-hook-exists-test.txt
'
test_expect_success 'globally disabled hook can be re-enabled locally' '
@@ -377,10 +433,59 @@ test_expect_success 'globally disabled hook can be re-enabled locally' '
test_config hook.global-hook.enabled true &&
echo "global-hook ran" >expected &&
- git hook run test-hook 2>actual &&
+ git hook run --allow-unknown-hook-name test-hook 2>actual &&
test_cmp expected actual
'
+test_expect_success 'configured hooks run before hookdir hook' '
+ setup_hookdir &&
+ test_config hook.first.event "pre-commit" &&
+ test_config hook.first.command "echo first" &&
+ test_config hook.second.event "pre-commit" &&
+ test_config hook.second.command "echo second" &&
+
+ cat >expected <<-\EOF &&
+ first
+ second
+ hook from hookdir
+ EOF
+
+ git hook list pre-commit >actual &&
+ test_cmp expected actual &&
+
+ # "Legacy Hook" is the output of the hookdir pre-commit script
+ # written by setup_hookdir() above.
+ cat >expected <<-\EOF &&
+ first
+ second
+ "Legacy Hook"
+ EOF
+
+ git hook run pre-commit 2>actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'git hook list --show-scope shows config scope' '
+ setup_hookdir &&
+ test_config_global hook.global-hook.command "echo global" &&
+ test_config_global hook.global-hook.event pre-commit --add &&
+ test_config hook.local-hook.command "echo local" &&
+ test_config hook.local-hook.event pre-commit --add &&
+
+ cat >expected <<-\EOF &&
+ global global-hook
+ local local-hook
+ hook from hookdir
+ EOF
+ git hook list --show-scope pre-commit >actual &&
+ test_cmp expected actual &&
+
+ # without --show-scope the scope must not appear
+ git hook list pre-commit >actual &&
+ test_grep ! "^global " actual &&
+ test_grep ! "^local " actual
+'
+
test_expect_success 'git hook run a hook with a bad shebang' '
test_when_finished "rm -rf bad-hooks" &&
mkdir bad-hooks &&
@@ -388,7 +493,7 @@ test_expect_success 'git hook run a hook with a bad shebang' '
test_expect_code 1 git \
-c core.hooksPath=bad-hooks \
- hook run test-hook >out 2>err &&
+ hook run --allow-unknown-hook-name test-hook >out 2>err &&
test_must_be_empty out &&
# TODO: We should emit the same (or at least a more similar)
@@ -412,7 +517,7 @@ test_expect_success 'stdin to hooks' '
EOF
echo hello >input &&
- git hook run --to-stdin=input test-hook 2>actual &&
+ git hook run --allow-unknown-hook-name --to-stdin=input test-hook 2>actual &&
test_cmp expect actual
'
diff --git a/t/t1900-repo-info.sh b/t/t1900-repo-info.sh
index a9eb07abe8..39bb77dda0 100755
--- a/t/t1900-repo-info.sh
+++ b/t/t1900-repo-info.sh
@@ -149,4 +149,10 @@ test_expect_success 'git repo info --keys uses lines as its default output forma
test_cmp expect actual
'
+test_expect_success 'git repo info -h shows only repo info usage' '
+ test_must_fail git repo info -h >actual &&
+ test_grep "git repo info" actual &&
+ test_grep ! "git repo structure" actual
+'
+
test_done
diff --git a/t/t1901-repo-structure.sh b/t/t1901-repo-structure.sh
index 98921ce1cb..10050abd70 100755
--- a/t/t1901-repo-structure.sh
+++ b/t/t1901-repo-structure.sh
@@ -224,4 +224,10 @@ test_expect_success 'progress meter option' '
)
'
+test_expect_success 'git repo structure -h shows only repo structure usage' '
+ test_must_fail git repo structure -h >actual &&
+ test_grep "git repo structure" actual &&
+ test_grep ! "git repo info" actual
+'
+
test_done
diff --git a/t/t2000-conflict-when-checking-files-out.sh b/t/t2000-conflict-when-checking-files-out.sh
index f18616ad2b..af199d8191 100755
--- a/t/t2000-conflict-when-checking-files-out.sh
+++ b/t/t2000-conflict-when-checking-files-out.sh
@@ -35,30 +35,30 @@ show_files() {
sed -e 's/^\([0-9]*\) [^ ]* [0-9a-f]* /tr: \1 /'
}
-date >path0
-mkdir path1
-date >path1/file1
-
-test_expect_success \
- 'git update-index --add various paths.' \
- 'git update-index --add path0 path1/file1'
-
-rm -fr path0 path1
-mkdir path0
-date >path0/file0
-date >path1
+test_expect_success 'prepare files path0 and path1/file1' '
+ date >path0 &&
+ mkdir path1 &&
+ date >path1/file1 &&
+ git update-index --add path0 path1/file1
+'
-test_expect_success \
- 'git checkout-index without -f should fail on conflicting work tree.' \
- 'test_must_fail git checkout-index -a'
+test_expect_success 'prepare working tree files with D/F conflicts' '
+ rm -fr path0 path1 &&
+ mkdir path0 &&
+ date >path0/file0 &&
+ date >path1
+'
-test_expect_success \
- 'git checkout-index with -f should succeed.' \
- 'git checkout-index -f -a'
+test_expect_success 'git checkout-index without -f should fail on conflicting work tree.' '
+ test_must_fail git checkout-index -a
+'
-test_expect_success \
- 'git checkout-index conflicting paths.' \
- 'test -f path0 && test -d path1 && test -f path1/file1'
+test_expect_success 'git checkout-index with -f should succeed.' '
+ git checkout-index -f -a &&
+ test_path_is_file path0 &&
+ test_path_is_dir path1 &&
+ test_path_is_file path1/file1
+'
test_expect_success SYMLINKS 'checkout-index -f twice with --prefix' '
mkdir -p tar/get &&
@@ -83,53 +83,63 @@ test_expect_success SYMLINKS 'checkout-index -f twice with --prefix' '
# path path3 is occupied by a non-directory. With "-f" it should remove
# the symlink path3 and create directory path3 and file path3/file1.
-mkdir path2
-date >path2/file0
-test_expect_success \
- 'git update-index --add path2/file0' \
- 'git update-index --add path2/file0'
-test_expect_success \
- 'writing tree out with git write-tree' \
- 'tree1=$(git write-tree)'
+test_expect_success 'prepare path2/file0 and index' '
+ mkdir path2 &&
+ date >path2/file0 &&
+ git update-index --add path2/file0
+'
+
+test_expect_success 'write tree with path2/file0' '
+ tree1=$(git write-tree)
+'
+
test_debug 'show_files $tree1'
-mkdir path3
-date >path3/file1
-test_expect_success \
- 'git update-index --add path3/file1' \
- 'git update-index --add path3/file1'
-test_expect_success \
- 'writing tree out with git write-tree' \
- 'tree2=$(git write-tree)'
+test_expect_success 'prepare path3/file1 and index' '
+ mkdir path3 &&
+ date >path3/file1 &&
+ git update-index --add path3/file1
+'
+
+test_expect_success 'write tree with path3/file1' '
+ tree2=$(git write-tree)
+'
+
test_debug 'show_files $tree2'
-rm -fr path3
-test_expect_success \
- 'read previously written tree and checkout.' \
- 'git read-tree -m $tree1 && git checkout-index -f -a'
+test_expect_success 'read previously written tree and checkout.' '
+ rm -fr path3 &&
+ git read-tree -m $tree1 &&
+ git checkout-index -f -a
+'
+
test_debug 'show_files $tree1'
-test_expect_success \
- 'add a symlink' \
- 'test_ln_s_add path2 path3'
-test_expect_success \
- 'writing tree out with git write-tree' \
- 'tree3=$(git write-tree)'
+test_expect_success 'add a symlink' '
+ test_ln_s_add path2 path3
+'
+
+test_expect_success 'write tree with symlink path3' '
+ tree3=$(git write-tree)
+'
+
test_debug 'show_files $tree3'
# Morten says "Got that?" here.
# Test begins.
-test_expect_success \
- 'read previously written tree and checkout.' \
- 'git read-tree $tree2 && git checkout-index -f -a'
+test_expect_success 'read previously written tree and checkout.' '
+ git read-tree $tree2 &&
+ git checkout-index -f -a
+'
+
test_debug 'show_files $tree2'
-test_expect_success \
- 'checking out conflicting path with -f' \
- 'test ! -h path2 && test -d path2 &&
- test ! -h path3 && test -d path3 &&
- test ! -h path2/file0 && test -f path2/file0 &&
- test ! -h path3/file1 && test -f path3/file1'
+test_expect_success 'checking out conflicting path with -f' '
+ test_path_is_dir_not_symlink path2 &&
+ test_path_is_dir_not_symlink path3 &&
+ test_path_is_file_not_symlink path2/file0 &&
+ test_path_is_file_not_symlink path3/file1
+'
test_done
diff --git a/t/t2107-update-index-basic.sh b/t/t2107-update-index-basic.sh
index cc72ead79f..3bffe5da8a 100755
--- a/t/t2107-update-index-basic.sh
+++ b/t/t2107-update-index-basic.sh
@@ -86,7 +86,7 @@ test_expect_success '.lock files cleaned up' '
# the_index.cache_changed is zero, rollback_lock_file fails
git update-index --refresh --verbose >out &&
test_must_be_empty out &&
- ! test -f .git/index.lock
+ test_path_is_missing .git/index.lock
)
'
diff --git a/t/t2200-add-update.sh b/t/t2200-add-update.sh
index 06e83d3333..0a96655cfe 100755
--- a/t/t2200-add-update.sh
+++ b/t/t2200-add-update.sh
@@ -200,6 +200,44 @@ test_expect_success 'add -u resolves unmerged paths' '
test_cmp expect actual
'
+test_expect_success 'add -u avoids rename pairing on unmerged paths' '
+ test_create_repo rename-crash &&
+ (
+ cd rename-crash &&
+ test_seq 1 100 |
+ sed "s/.*/line &: same text/" >conflict.txt &&
+ cp conflict.txt bystander.txt &&
+ git add conflict.txt bystander.txt &&
+ git commit -m "initial: two files with identical content" &&
+ main_branch=$(git symbolic-ref --short HEAD) &&
+ git checkout -b feature &&
+ sed "s/^line 50:.*/line 50: FEATURE/" \
+ conflict.txt >conflict.txt.tmp &&
+ mv conflict.txt.tmp conflict.txt &&
+ git add conflict.txt &&
+ git commit -m "feature: modify line 50" &&
+ git checkout "$main_branch" &&
+ sed "s/^line 50:.*/line 50: MAIN/" \
+ conflict.txt >conflict.txt.tmp &&
+ mv conflict.txt.tmp conflict.txt &&
+ git add conflict.txt &&
+ git commit -m "main: modify line 50 differently" &&
+ test_must_fail git merge feature &&
+ rm bystander.txt &&
+ git add -u >out &&
+ test_must_be_empty out &&
+ git ls-files -u >actual &&
+ test_must_be_empty actual &&
+ git ls-files bystander.txt conflict.txt >actual &&
+ cat >expect <<-\EOF &&
+ conflict.txt
+ EOF
+ test_cmp expect actual &&
+ git diff-files --name-only >actual &&
+ test_must_be_empty actual
+ )
+'
+
test_expect_success '"add -u non-existent" should fail' '
test_must_fail git add -u non-existent &&
git ls-files >actual &&
diff --git a/t/t2203-add-intent.sh b/t/t2203-add-intent.sh
index 192ad14b5f..44c1936e4d 100755
--- a/t/t2203-add-intent.sh
+++ b/t/t2203-add-intent.sh
@@ -16,7 +16,8 @@ test_expect_success 'intent to add' '
'
test_expect_success 'git status' '
- git status --porcelain | grep -v actual >actual &&
+ git status --porcelain >actual.raw &&
+ grep -v actual actual.raw >actual &&
cat >expect <<-\EOF &&
DA 1.t
A elif
@@ -26,7 +27,8 @@ test_expect_success 'git status' '
'
test_expect_success 'git status with porcelain v2' '
- git status --porcelain=v2 | grep -v "^?" >actual &&
+ git status --porcelain=v2 >actual.raw &&
+ grep -v "^?" actual.raw >actual &&
nam1=$(echo 1 | git hash-object --stdin) &&
nam2=$(git hash-object elif) &&
cat >expect <<-EOF &&
@@ -171,17 +173,20 @@ test_expect_success 'rename detection finds the right names' '
mv first third &&
git add -N third &&
- git status | grep -v "^?" >actual.1 &&
+ git status >actual.raw.1 &&
+ grep -v "^?" actual.raw.1 >actual.1 &&
test_grep "renamed: *first -> third" actual.1 &&
- git status --porcelain | grep -v "^?" >actual.2 &&
+ git status --porcelain >actual.raw.2 &&
+ grep -v "^?" actual.raw.2 >actual.2 &&
cat >expected.2 <<-\EOF &&
R first -> third
EOF
test_cmp expected.2 actual.2 &&
hash=$(git hash-object third) &&
- git status --porcelain=v2 | grep -v "^?" >actual.3 &&
+ git status --porcelain=v2 >actual.raw.3 &&
+ grep -v "^?" actual.raw.3 >actual.3 &&
cat >expected.3 <<-EOF &&
2 .R N... 100644 100644 100644 $hash $hash R100 third first
EOF
@@ -211,11 +216,13 @@ test_expect_success 'double rename detection in status' '
mv second third &&
git add -N third &&
- git status | grep -v "^?" >actual.1 &&
+ git status >actual.raw.1 &&
+ grep -v "^?" actual.raw.1 >actual.1 &&
test_grep "renamed: *first -> second" actual.1 &&
test_grep "renamed: *second -> third" actual.1 &&
- git status --porcelain | grep -v "^?" >actual.2 &&
+ git status --porcelain >actual.raw.2 &&
+ grep -v "^?" actual.raw.2 >actual.2 &&
cat >expected.2 <<-\EOF &&
R first -> second
R second -> third
@@ -223,7 +230,8 @@ test_expect_success 'double rename detection in status' '
test_cmp expected.2 actual.2 &&
hash=$(git hash-object third) &&
- git status --porcelain=v2 | grep -v "^?" >actual.3 &&
+ git status --porcelain=v2 >actual.raw.3 &&
+ grep -v "^?" actual.raw.3 >actual.3 &&
cat >expected.3 <<-EOF &&
2 R. N... 100644 100644 100644 $hash $hash R100 second first
2 .R N... 100644 100644 100644 $hash $hash R100 third second
diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh
index 023e1301c8..58b4445cc4 100755
--- a/t/t2400-worktree-add.sh
+++ b/t/t2400-worktree-add.sh
@@ -987,7 +987,7 @@ test_dwim_orphan () {
then
test_must_be_empty actual
else
- grep "$info_text" actual
+ test_grep "$info_text" actual
fi
elif [ "$outcome" = "no_infer" ]
then
@@ -996,39 +996,35 @@ test_dwim_orphan () {
then
test_must_be_empty actual
else
- ! grep "$info_text" actual
+ test_grep ! "$info_text" actual
fi
elif [ "$outcome" = "fetch_error" ]
then
test_must_fail git $dashc_args worktree add $args 2>actual &&
- grep "$fetch_error_text" actual
+ test_grep "$fetch_error_text" actual
elif [ "$outcome" = "fatal_orphan_bad_combo" ]
then
test_must_fail git $dashc_args worktree add $args 2>actual &&
if [ $use_quiet -eq 1 ]
then
- ! grep "$info_text" actual
+ test_grep ! "$info_text" actual
else
- grep "$info_text" actual
+ test_grep "$info_text" actual
fi &&
- grep "$bad_combo_regex" actual
+ test_grep "$bad_combo_regex" actual
elif [ "$outcome" = "warn_bad_head" ]
then
test_must_fail git $dashc_args worktree add $args 2>actual &&
if [ $use_quiet -eq 1 ]
then
- grep "$invalid_ref_regex" actual &&
- ! grep "$orphan_hint" actual
+ test_grep "$invalid_ref_regex" actual &&
+ test_grep ! "$orphan_hint" actual
else
- headpath=$(git $dashc_args rev-parse --path-format=absolute --git-path HEAD) &&
- headcontents=$(cat "$headpath") &&
- grep "HEAD points to an invalid (or orphaned) reference" actual &&
- grep "HEAD path: .$headpath." actual &&
- grep "HEAD contents: .$headcontents." actual &&
- grep "$orphan_hint" actual &&
- ! grep "$info_text" actual
+ test_grep "HEAD points to an invalid (or orphaned) reference" actual &&
+ test_grep "$orphan_hint" actual &&
+ test_grep ! "$info_text" actual
fi &&
- grep "$invalid_ref_regex" actual
+ test_grep "$invalid_ref_regex" actual
else
# Unreachable
false
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index cc627e34a7..84b2d0e664 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -507,9 +507,11 @@ test_expect_success 'octopus merges' '
git rebase -i --force-rebase -r HEAD^^ &&
test "Hank" = "$(git show -s --format=%an HEAD)" &&
test "$before" != $(git rev-parse HEAD) &&
- test_cmp_graph HEAD^^.. <<-\EOF
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ test_cmp_graph HEAD^^.. <<-EOF
*-. Tüntenfüsch
- |\ \
+ |\\ \\
| | * three
| * | two
| |/
diff --git a/t/t3650-replay-basics.sh b/t/t3650-replay-basics.sh
index d5c7dd1bf4..3353bc4a4d 100755
--- a/t/t3650-replay-basics.sh
+++ b/t/t3650-replay-basics.sh
@@ -81,9 +81,13 @@ test_expect_success 'exactly one of --onto, --advance, or --revert is required'
test_cmp expect actual
'
-test_expect_success 'no base or negative ref gives no-replaying down to root error' '
- echo "fatal: replaying down from root commit is not supported yet!" >expect &&
- test_must_fail git replay --onto=topic1 topic2 2>actual &&
+test_expect_success 'replay down to root onto another branch' '
+ git replay --ref-action=print --onto main topic2 >result &&
+
+ test_line_count = 1 result &&
+
+ git log --format=%s $(cut -f 3 -d " " result) >actual &&
+ test_write_lines E D C M L B A >expect &&
test_cmp expect actual
'
diff --git a/t/t3902-quoted.sh b/t/t3902-quoted.sh
index f528008c36..8660ec5cb0 100755
--- a/t/t3902-quoted.sh
+++ b/t/t3902-quoted.sh
@@ -60,16 +60,18 @@ With SP in it
"\346\277\261\351\207\216\347\264\224"
EOF
-cat >expect.raw <<\EOF
+# NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+# that contain multibyte chars.
+cat >expect.raw <<EOF
Name
-"Name and a\nLF"
-"Name and an\tHT"
-"Name\""
+"Name and a\\nLF"
+"Name and an\\tHT"
+"Name\\""
With SP in it
-"濱野\t純"
-"濱野\n純"
+"濱野\\t純"
+"濱野\\n純"
濱野 純
-"濱野\"純"
+"濱野\\"純"
濱野/file
濱野純
EOF
diff --git a/t/t4012-diff-binary.sh b/t/t4012-diff-binary.sh
index d1d30ac2a9..97b5ac0407 100755
--- a/t/t4012-diff-binary.sh
+++ b/t/t4012-diff-binary.sh
@@ -68,7 +68,7 @@ test_expect_success 'apply detecting corrupt patch correctly' '
sed -e "s/-CIT/xCIT/" <output >broken &&
test_must_fail git apply --stat --summary broken 2>detected &&
detected=$(cat detected) &&
- detected=$(expr "$detected" : "error.*at line \\([0-9]*\\)\$") &&
+ detected=$(expr "$detected" : "error.*broken:\\([0-9]*\\)\$") &&
detected=$(sed -ne "${detected}p" broken) &&
test "$detected" = xCIT
'
@@ -77,7 +77,7 @@ test_expect_success 'apply detecting corrupt patch correctly' '
git diff --binary | sed -e "s/-CIT/xCIT/" >broken &&
test_must_fail git apply --stat --summary broken 2>detected &&
detected=$(cat detected) &&
- detected=$(expr "$detected" : "error.*at line \\([0-9]*\\)\$") &&
+ detected=$(expr "$detected" : "error.*broken:\\([0-9]*\\)\$") &&
detected=$(sed -ne "${detected}p" broken) &&
test "$detected" = xCIT
'
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index bcdb944017..0b89d127b5 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -380,6 +380,131 @@ test_expect_success 'filename limit applies only to basename' '
done
'
+test_expect_success 'cover letter with subject, author and count' '
+ rm -rf patches &&
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf patches test_file" &&
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ git format-patch --commit-list-format="log:[%(count)/%(total)] %s (%an)" \
+ -o patches HEAD~1 &&
+ test_grep "^\[1/1\] This is a subject (A U Thor)$" patches/0000-cover-letter.patch
+'
+
+test_expect_success 'cover letter with custom format no prefix' '
+ rm -rf patches &&
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf patches test_file" &&
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ git format-patch --commit-list-format="[%(count)/%(total)] %s (%an)" \
+ -o patches HEAD~1 &&
+ test_grep "^\[1/1\] This is a subject (A U Thor)$" patches/0000-cover-letter.patch
+'
+
+test_expect_success 'cover letter fail when no prefix and no placeholder' '
+ rm -rf patches &&
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf patches test_file err" &&
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ test_must_fail git format-patch --commit-list-format="this should fail" \
+ -o patches HEAD~1 2>err &&
+ test_grep "is not a valid format string" err
+'
+
+test_expect_success 'cover letter modern format' '
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf patches test_file" &&
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ git format-patch --commit-list-format="modern" -o patches HEAD~1 &&
+ test_grep "^\[1/1\] This is a subject$" patches/0000-cover-letter.patch
+'
+
+test_expect_success 'cover letter shortlog format' '
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf expect patches result test_file" &&
+ cat >expect <<-"EOF" &&
+ A U Thor (1):
+ This is a subject
+ EOF
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ git format-patch --commit-list-format=shortlog -o patches HEAD~1 &&
+ grep -E -A 1 "^A U Thor \([[:digit:]]+\):$" patches/0000-cover-letter.patch >result &&
+ cat result &&
+ test_cmp expect result
+'
+
+test_expect_success 'no cover letter but with format specified' '
+ test_when_finished "git reset --hard HEAD~1" &&
+ test_when_finished "rm -rf patches result test_file" &&
+ touch test_file &&
+ git add test_file &&
+ git commit -m "This is a subject" &&
+ git format-patch --no-cover-letter --commit-list-format="[%(count)] %s" -o patches HEAD~1 &&
+ test_path_is_missing patches/0000-cover-letter.patch
+'
+
+test_expect_success 'cover letter config with count, subject and author' '
+ test_when_finished "rm -rf patches result" &&
+ test_when_finished "git config unset format.coverletter" &&
+ test_when_finished "git config unset format.commitlistformat" &&
+ git config set format.coverletter true &&
+ git config set format.commitlistformat "log:[%(count)/%(total)] %s (%an)" &&
+ git format-patch -o patches HEAD~2 &&
+ grep -E "^[[[:digit:]]+/[[:digit:]]+] .* \(A U Thor\)" patches/0000-cover-letter.patch >result &&
+ test_line_count = 2 result
+'
+
+test_expect_success 'cover letter config with count and author' '
+ test_when_finished "rm -rf patches result" &&
+ test_when_finished "git config unset format.coverletter" &&
+ test_when_finished "git config unset format.commitlistformat" &&
+ git config set format.coverletter true &&
+ git config set format.commitlistformat "log:[%(count)/%(total)] (%an)" &&
+ git format-patch -o patches HEAD~2 &&
+ grep -E "^[[[:digit:]]+/[[:digit:]]+] \(A U Thor\)" patches/0000-cover-letter.patch >result &&
+ test_line_count = 2 result
+'
+
+test_expect_success 'cover letter config commitlistformat set to modern' '
+ test_when_finished "rm -rf patches result" &&
+ test_when_finished "git config unset format.coverletter" &&
+ test_when_finished "git config unset format.commitlistformat" &&
+ git config set format.coverletter true &&
+ git config set format.commitlistformat modern &&
+ git format-patch -o patches HEAD~2 &&
+ grep -E "^[[[:digit:]]+/[[:digit:]]+] .*$" patches/0000-cover-letter.patch >result &&
+ test_line_count = 2 result
+'
+
+test_expect_success 'cover letter config commitlistformat set to shortlog' '
+ test_when_finished "rm -rf patches result" &&
+ test_when_finished "git config unset format.coverletter" &&
+ test_when_finished "git config unset format.commitlistformat" &&
+ git config set format.coverletter true &&
+ git config set format.commitlistformat shortlog &&
+ git format-patch -o patches HEAD~2 &&
+ grep -E "^A U Thor \([[:digit:]]+\)" patches/0000-cover-letter.patch >result &&
+ test_line_count = 1 result
+'
+
+test_expect_success 'cover letter config commitlistformat not set' '
+ test_when_finished "rm -rf patches result" &&
+ test_when_finished "git config unset format.coverletter" &&
+ git config set format.coverletter true &&
+ git format-patch -o patches HEAD~2 &&
+ grep -E "^A U Thor \([[:digit:]]+\)" patches/0000-cover-letter.patch >result &&
+ test_line_count = 1 result
+'
+
test_expect_success 'reroll count' '
rm -fr patches &&
git format-patch -o patches --cover-letter --reroll-count 4 main..side >list &&
@@ -1285,7 +1410,9 @@ test_expect_success 'format-patch wraps extremely long from-header (rfc2047)' '
check_author "Foö Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar"
'
-cat >expect <<'EOF'
+# NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+# that contain multibyte chars.
+cat >expect <<EOF
From: Foö Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar
Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo
Bar Foo Bar Foo Bar Foo Bar <author@example.com>
@@ -1300,7 +1427,9 @@ test_expect_success 'format-patch wraps extremely long from-header (non-ASCII wi
test_cmp expect actual
'
-cat >expect <<'EOF'
+# NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+# that contain multibyte chars.
+cat >expect <<EOF
Subject: [PATCH] Foö
EOF
test_expect_success 'subject lines are unencoded with --no-encode-email-headers' '
@@ -1312,7 +1441,9 @@ test_expect_success 'subject lines are unencoded with --no-encode-email-headers'
test_cmp expect actual
'
-cat >expect <<'EOF'
+# NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+# that contain multibyte chars.
+cat >expect <<EOF
Subject: [PATCH] Foö
EOF
test_expect_success 'subject lines are unencoded with format.encodeEmailHeaders=false' '
@@ -1531,7 +1662,9 @@ test_expect_success 'in-body headers trigger content encoding' '
test_env GIT_AUTHOR_NAME="éxötìc" test_commit exotic &&
test_when_finished "git reset --hard HEAD^" &&
git format-patch -1 --stdout --from >patch &&
- cat >expect <<-\EOF &&
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ cat >expect <<-EOF &&
From: C O Mitter <committer@example.com>
Content-Type: text/plain; charset=UTF-8
diff --git a/t/t4100-apply-stat.sh b/t/t4100-apply-stat.sh
index a5664f3eb3..8393076469 100755
--- a/t/t4100-apply-stat.sh
+++ b/t/t4100-apply-stat.sh
@@ -48,7 +48,93 @@ test_expect_success 'applying a hunk header which overflows fails' '
+b
EOF
test_must_fail git apply patch 2>err &&
- echo "error: corrupt patch at line 4" >expect &&
+ echo "error: corrupt patch at patch:4" >expect &&
+ test_cmp expect err
+'
+
+test_expect_success 'applying a hunk header which overflows from stdin fails' '
+ cat >patch <<-\EOF &&
+ diff -u a/file b/file
+ --- a/file
+ +++ b/file
+ @@ -98765432109876543210 +98765432109876543210 @@
+ -a
+ +b
+ EOF
+ test_must_fail git apply <patch 2>err &&
+ echo "error: corrupt patch at <stdin>:4" >expect &&
+ test_cmp expect err
+'
+
+test_expect_success 'applying multiple patches reports the corrupted input' '
+ cat >good.patch <<-\EOF &&
+ diff -u a/file b/file
+ --- a/file
+ +++ b/file
+ @@ -1 +1 @@
+ -a
+ +b
+ EOF
+ cat >bad.patch <<-\EOF &&
+ diff -u a/file b/file
+ --- a/file
+ +++ b/file
+ @@ -98765432109876543210 +98765432109876543210 @@
+ -a
+ +b
+ EOF
+ test_must_fail git apply --stat --summary good.patch bad.patch 2>err &&
+ echo "error: corrupt patch at bad.patch:4" >expect &&
+ test_cmp expect err
+'
+
+test_expect_success 'applying a patch without a header reports the input' '
+ cat >fragment.patch <<-\EOF &&
+ @@ -1 +1 @@
+ -a
+ +b
+ EOF
+ test_must_fail git apply fragment.patch 2>err &&
+ echo "error: patch fragment without header at fragment.patch:1: @@ -1 +1 @@" >expect &&
+ test_cmp expect err
+'
+
+test_expect_success 'applying a patch with a missing filename reports the input' '
+ cat >missing.patch <<-\EOF &&
+ diff --git a/f b/f
+ index 7898192..6178079 100644
+ --- a/f
+ @@ -1 +1 @@
+ -a
+ +b
+ EOF
+ test_must_fail git apply missing.patch 2>err &&
+ echo "error: git diff header lacks filename information at missing.patch:4" >expect &&
+ test_cmp expect err
+'
+
+test_expect_success 'applying a patch with an invalid mode reports the input' '
+ cat >mode.patch <<-\EOF &&
+ diff --git a/f b/f
+ old mode 10x644
+ EOF
+ test_must_fail git apply mode.patch 2>err &&
+ cat >expect <<-\EOF &&
+ error: invalid mode at mode.patch:2: 10x644
+
+ EOF
+ test_cmp expect err
+'
+
+test_expect_success 'applying a patch with only garbage reports the input' '
+ cat >garbage.patch <<-\EOF &&
+ diff --git a/f b/f
+ --- a/f
+ +++ b/f
+ this is garbage
+ EOF
+ test_must_fail git apply garbage.patch 2>err &&
+ echo "error: patch with only garbage at garbage.patch:4" >expect &&
test_cmp expect err
'
test_done
diff --git a/t/t4103-apply-binary.sh b/t/t4103-apply-binary.sh
index 8e302a5a57..f2d41e06bc 100755
--- a/t/t4103-apply-binary.sh
+++ b/t/t4103-apply-binary.sh
@@ -179,6 +179,24 @@ test_expect_success PERL_TEST_HELPERS 'reject truncated binary diff' '
" <patch >patch.trunc &&
do_reset &&
- test_must_fail git apply patch.trunc
+ test_must_fail git apply patch.trunc 2>err &&
+ line=$(awk "END { print NR + 1 }" patch.trunc) &&
+ grep "error: corrupt binary patch at patch.trunc:$line: " err
+'
+
+test_expect_success 'reject unrecognized binary diff' '
+ cat >patch.bad <<-\EOF &&
+ diff --git a/f b/f
+ new file mode 100644
+ index 0000000..7898192
+ GIT binary patch
+ bogus
+ EOF
+ test_must_fail git apply patch.bad 2>err &&
+ cat >expect <<-\EOF &&
+ error: unrecognized binary patch at patch.bad:4
+ error: No valid patches in input (allow with "--allow-empty")
+ EOF
+ test_cmp expect err
'
test_done
diff --git a/t/t4120-apply-popt.sh b/t/t4120-apply-popt.sh
index 697e86c0ff..c960fdf622 100755
--- a/t/t4120-apply-popt.sh
+++ b/t/t4120-apply-popt.sh
@@ -23,6 +23,47 @@ test_expect_success setup '
rmdir süb
'
+test_expect_success 'git apply -p 1 patch' '
+ cat >patch <<-\EOF &&
+ From 90ad11d5b2d437e82d4d992f72fb44c2227798b5 Mon Sep 17 00:00:00 2001
+ From: Mroik <mroik@delayed.space>
+ Date: Mon, 9 Mar 2026 23:25:00 +0100
+ Subject: [PATCH] Test
+
+ ---
+ t/test/test | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+ create mode 100644 t/test/test
+
+ diff --git a/t/test/test b/t/test/test
+ new file mode 100644
+ index 0000000000..e69de29bb2
+ --
+ 2.53.0.851.ga537e3e6e9
+ EOF
+ test_when_finished "rm -rf t" &&
+ git apply -p 1 patch &&
+ test_path_is_dir t
+'
+
+test_expect_success 'apply fails due to non-num -p' '
+ test_when_finished "rm -rf t test err" &&
+ test_must_fail git apply -p malformed patch 2>err &&
+ test_grep "option -p expects a non-negative integer" err
+'
+
+test_expect_success 'apply fails due to trailing non-digit in -p' '
+ test_when_finished "rm -rf t test err" &&
+ test_must_fail git apply -p 2q patch 2>err &&
+ test_grep "option -p expects a non-negative integer" err
+'
+
+test_expect_success 'apply fails due to negative number in -p' '
+ test_when_finished "rm -rf t test err patch" &&
+ test_must_fail git apply -p -1 patch 2> err &&
+ test_grep "option -p expects a non-negative integer" err
+'
+
test_expect_success 'apply git diff with -p2' '
cp file1.saved file1 &&
git apply -p2 patch.file
diff --git a/t/t4124-apply-ws-rule.sh b/t/t4124-apply-ws-rule.sh
index 29ea7d4268..205d86d05e 100755
--- a/t/t4124-apply-ws-rule.sh
+++ b/t/t4124-apply-ws-rule.sh
@@ -561,6 +561,22 @@ test_expect_success 'check incomplete lines (setup)' '
git config core.whitespace incomplete-line
'
+test_expect_success 'no incomplete context line (not an error)' '
+ test_when_finished "rm -f sample*-i patch patch-new target" &&
+ test_write_lines 1 2 3 "" 4 5 >sample-i &&
+ test_write_lines 1 2 3 "" 0 5 >sample2-i &&
+ cat sample-i >target &&
+ git add target &&
+ cat sample2-i >target &&
+ git diff-files -p target >patch &&
+ sed -e "s/^ $//" <patch >patch-new &&
+
+ cat sample-i >target &&
+ git apply --whitespace=fix <patch-new 2>error &&
+ test_cmp sample2-i target &&
+ test_must_be_empty error
+'
+
test_expect_success 'incomplete context line (not an error)' '
(test_write_lines 1 2 3 4 5 && printf 6) >sample-i &&
(test_write_lines 1 2 3 0 5 && printf 6) >sample2-i &&
diff --git a/t/t4201-shortlog.sh b/t/t4201-shortlog.sh
index 5f23fc147b..9f41d56d9a 100755
--- a/t/t4201-shortlog.sh
+++ b/t/t4201-shortlog.sh
@@ -105,7 +105,9 @@ test_expect_success 'output from user-defined format is re-wrapped' '
'
test_expect_success !MINGW,ICONV 'shortlog wrapping' '
- cat >expect <<\EOF &&
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ cat >expect <<EOF &&
A U Thor (5):
Test
This is a very, very long first line for the commit message to see if
diff --git a/t/t4211-line-log.sh b/t/t4211-line-log.sh
index 0a7c3ca42f..aaf197d2ed 100755
--- a/t/t4211-line-log.sh
+++ b/t/t4211-line-log.sh
@@ -129,7 +129,7 @@ test_expect_success '-L with --output' '
git checkout parallel-change &&
git log --output=log -L :main:b.c >output &&
test_must_be_empty output &&
- test_line_count = 70 log
+ test_line_count = 75 log
'
test_expect_success 'range_set_union' '
@@ -339,14 +339,106 @@ test_expect_success 'zero-width regex .* matches any function name' '
test_cmp expect actual
'
+test_expect_success 'setup for diff pipeline tests' '
+ git checkout parent-oids &&
+
+ head_blob_old=$(git rev-parse --short HEAD^:file.c) &&
+ head_blob_new=$(git rev-parse --short HEAD:file.c) &&
+ root_blob=$(git rev-parse --short HEAD~4:file.c) &&
+ null_blob=$(test_oid zero | cut -c1-7) &&
+ head_blob_old_full=$(git rev-parse HEAD^:file.c) &&
+ head_blob_new_full=$(git rev-parse HEAD:file.c) &&
+ root_blob_full=$(git rev-parse HEAD~4:file.c) &&
+ null_blob_full=$(test_oid zero)
+'
+
+test_expect_success '-L diff output includes index and new file mode' '
+ git log -L:func2:file.c --format= >actual &&
+
+ # Output should contain index headers (not present in old code path)
+ grep "^index $head_blob_old\.\.$head_blob_new 100644" actual &&
+
+ # Root commit should show new file mode and null index
+ grep "^new file mode 100644" actual &&
+ grep "^index $null_blob\.\.$root_blob$" actual &&
+
+ # Hunk headers should include funcname context
+ grep "^@@ .* @@ int func1()" actual
+'
+
+test_expect_success '-L with --word-diff' '
+ cat >expect <<-\EOF &&
+
+ diff --git a/file.c b/file.c
+ --- a/file.c
+ +++ b/file.c
+ @@ -6,4 +6,4 @@ int func1()
+ int func2()
+ {
+ return [-F2;-]{+F2 + 2;+}
+ }
+
+ diff --git a/file.c b/file.c
+ new file mode 100644
+ --- /dev/null
+ +++ b/file.c
+ @@ -0,0 +6,4 @@
+ {+int func2()+}
+ {+{+}
+ {+ return F2;+}
+ {+}+}
+ EOF
+ git log -L:func2:file.c --word-diff --format= >actual &&
+ grep -v "^index " actual >actual.filtered &&
+ grep -v "^index " expect >expect.filtered &&
+ test_cmp expect.filtered actual.filtered
+'
+
+test_expect_success '-L with --no-prefix' '
+ git log -L:func2:file.c --no-prefix --format= >actual &&
+ grep "^diff --git file.c file.c" actual &&
+ grep "^--- file.c" actual &&
+ ! grep "^--- a/" actual
+'
+
+test_expect_success '-L with --full-index' '
+ git log -L:func2:file.c --full-index --format= >actual &&
+ grep "^index $head_blob_old_full\.\.$head_blob_new_full 100644" actual &&
+ grep "^index $null_blob_full\.\.$root_blob_full$" actual
+'
+
+test_expect_success 'setup -L with whitespace change' '
+ git checkout -b ws-change parent-oids &&
+ sed "s/ return F2 + 2;/ return F2 + 2;/" file.c >tmp &&
+ mv tmp file.c &&
+ git commit -a -m "Whitespace change in func2()"
+'
+
+test_expect_success '-L with --ignore-all-space suppresses whitespace-only diff' '
+ git log -L:func2:file.c --format= >without_w &&
+ git log -L:func2:file.c --format= -w >with_w &&
+
+ # Without -w: three commits produce diffs (whitespace, modify, root)
+ test $(grep -c "^diff --git" without_w) = 3 &&
+
+ # With -w: whitespace-only commit produces no hunk, so only two diffs
+ test $(grep -c "^diff --git" with_w) = 2
+'
+
test_expect_success 'show line-log with graph' '
+ git checkout parent-oids &&
+ head_blob_old=$(git rev-parse --short HEAD^:file.c) &&
+ head_blob_new=$(git rev-parse --short HEAD:file.c) &&
+ root_blob=$(git rev-parse --short HEAD~4:file.c) &&
+ null_blob=$(test_oid zero | cut -c1-7) &&
qz_to_tab_space >expect <<-EOF &&
* $head_oid Modify func2() in file.c
|Z
| diff --git a/file.c b/file.c
+ | index $head_blob_old..$head_blob_new 100644
| --- a/file.c
| +++ b/file.c
- | @@ -6,4 +6,4 @@
+ | @@ -6,4 +6,4 @@ int func1()
| int func2()
| {
| - return F2;
@@ -355,6 +447,8 @@ test_expect_success 'show line-log with graph' '
* $root_oid Add func1() and func2() in file.c
ZZ
diff --git a/file.c b/file.c
+ new file mode 100644
+ index $null_blob..$root_blob
--- /dev/null
+++ b/file.c
@@ -0,0 +6,4 @@
@@ -367,4 +461,254 @@ test_expect_success 'show line-log with graph' '
test_cmp expect actual
'
+test_expect_success 'setup for -L with -G/-S/--find-object and a merge with rename' '
+ git checkout --orphan pickaxe-rename &&
+ git reset --hard &&
+
+ echo content >file &&
+ git add file &&
+ git commit -m "add file" &&
+
+ git checkout -b pickaxe-rename-side &&
+ git mv file renamed-file &&
+ git commit -m "rename file" &&
+
+ git checkout pickaxe-rename &&
+ git commit --allow-empty -m "diverge" &&
+ git merge --no-edit pickaxe-rename-side &&
+
+ git mv renamed-file file &&
+ git commit -m "rename back"
+'
+
+test_expect_success '-L -G does not crash with merge and rename' '
+ git log --format="%s" --no-patch -L 1,1:file -G "." >actual
+'
+
+test_expect_success '-L -S does not crash with merge and rename' '
+ git log --format="%s" --no-patch -L 1,1:file -S content >actual
+'
+
+test_expect_success '-L --find-object does not crash with merge and rename' '
+ git log --format="%s" --no-patch -L 1,1:file \
+ --find-object=$(git rev-parse HEAD:file) >actual
+'
+
+# Commit-level filtering with pickaxe does not yet work for -L.
+# show_log() prints the commit header before diffcore_std() runs
+# pickaxe, so commits cannot be suppressed even when no diff pairs
+# survive filtering. Fixing this would require deferring show_log()
+# until after diffcore_std(), which is a larger restructuring of the
+# log-tree output pipeline.
+test_expect_failure '-L -G should filter commits by pattern' '
+ git log --format="%s" --no-patch -L 1,1:file -G "nomatch" >actual &&
+ test_must_be_empty actual
+'
+
+test_expect_failure '-L -S should filter commits by pattern' '
+ git log --format="%s" --no-patch -L 1,1:file -S "nomatch" >actual &&
+ test_must_be_empty actual
+'
+
+test_expect_failure '-L --find-object should filter commits by object' '
+ git log --format="%s" --no-patch -L 1,1:file \
+ --find-object=$ZERO_OID >actual &&
+ test_must_be_empty actual
+'
+
+test_expect_success '-L with --word-diff-regex' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c --word-diff \
+ --word-diff-regex="[a-zA-Z0-9_]+" --format= >actual &&
+ # Word-diff markers must be present
+ grep "{+" actual &&
+ grep "+}" actual &&
+ # No line-level +/- markers (word-diff replaces them);
+ # exclude --- header lines from the check
+ ! grep "^+[^+]" actual &&
+ ! grep "^-[^-]" actual
+'
+
+test_expect_success '-L with --src-prefix and --dst-prefix' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c --src-prefix=old/ --dst-prefix=new/ \
+ --format= >actual &&
+ grep "^diff --git old/file.c new/file.c" actual &&
+ grep "^--- old/file.c" actual &&
+ grep "^+++ new/file.c" actual &&
+ ! grep "^--- a/" actual
+'
+
+test_expect_success '-L with --abbrev' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c --abbrev=4 --format= -1 >actual &&
+ # 4-char abbreviated hashes on index line
+ grep "^index [0-9a-f]\{4\}\.\.[0-9a-f]\{4\}" actual
+'
+
+test_expect_success '-L with -b suppresses whitespace-only diff' '
+ git checkout ws-change &&
+ git log -L:func2:file.c --format= >without_b &&
+ git log -L:func2:file.c --format= -b >with_b &&
+ test $(grep -c "^diff --git" without_b) = 3 &&
+ test $(grep -c "^diff --git" with_b) = 2
+'
+
+test_expect_success '-L with --output-indicator-*' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c --output-indicator-new=">" \
+ --output-indicator-old="<" --output-indicator-context="|" \
+ --format= -1 >actual &&
+ grep "^>" actual &&
+ grep "^<" actual &&
+ grep "^|" actual &&
+ # No standard +/-/space content markers; exclude ---/+++ headers
+ ! grep "^+[^+]" actual &&
+ ! grep "^-[^-]" actual &&
+ ! grep "^ " actual
+'
+
+test_expect_success '-L with -R reverses diff' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c -R --format= -1 >actual &&
+ grep "^diff --git b/file.c a/file.c" actual &&
+ grep "^--- b/file.c" actual &&
+ grep "^+++ a/file.c" actual &&
+ # The modification added "F2 + 2", so reversed it is removed
+ grep "^-.*F2 + 2" actual &&
+ grep "^+.*return F2;" actual
+'
+
+test_expect_success 'setup for color-moved test' '
+ git checkout -b color-moved-test parent-oids &&
+ cat >big.c <<-\EOF &&
+ int bigfunc()
+ {
+ int a = 1;
+ int b = 2;
+ int c = 3;
+ return a + b + c;
+ }
+ EOF
+ git add big.c &&
+ git commit -m "add bigfunc" &&
+ sed "s/ / /" big.c >tmp && mv tmp big.c &&
+ git commit -a -m "reindent bigfunc"
+'
+
+test_expect_success '-L with --color-moved' '
+ git log -L:bigfunc:big.c --color-moved=zebra \
+ --color-moved-ws=ignore-all-space \
+ --color=always --format= -1 >actual.raw &&
+ test_decode_color <actual.raw >actual &&
+ # Old moved lines: bold magenta; new moved lines: bold cyan
+ grep "BOLD;MAGENTA" actual &&
+ grep "BOLD;CYAN" actual
+'
+
+test_expect_success 'setup for no-newline-at-eof tests' '
+ git checkout --orphan no-newline &&
+ git reset --hard &&
+ printf "int top()\n{\n return 1;\n}\n\nint bot()\n{\n return 2;\n}" >noeol.c &&
+ git add noeol.c &&
+ test_tick &&
+ git commit -m "add noeol.c (no trailing newline)" &&
+ sed "s/return 2/return 22/" noeol.c >tmp && mv tmp noeol.c &&
+ git commit -a -m "modify bot()" &&
+ printf "int top()\n{\n return 1;\n}\n\nint bot()\n{\n return 33;\n}\n" >noeol.c &&
+ git commit -a -m "modify bot() and add trailing newline"
+'
+
+# When the tracked function is at the end of a file with no trailing
+# newline, the "\ No newline at end of file" marker should appear.
+test_expect_success '-L no-newline-at-eof appears in tracked range' '
+ git log -L:bot:noeol.c --format= -1 HEAD~1 >actual &&
+ grep "No newline at end of file" actual
+'
+
+# When tracking a function that ends before the no-newline content,
+# the marker should not appear in the output.
+test_expect_success '-L no-newline-at-eof suppressed outside range' '
+ git log -L:top:noeol.c --format= >actual &&
+ ! grep "No newline at end of file" actual
+'
+
+# When a commit removes a no-newline last line and replaces it with
+# a newline-terminated line, the marker should still appear (on the
+# old side of the diff).
+test_expect_success '-L no-newline-at-eof marker with deleted line' '
+ git log -L:bot:noeol.c --format= -1 >actual &&
+ grep "No newline at end of file" actual
+'
+
+test_expect_success 'setup for range boundary deletion test' '
+ git checkout --orphan range-boundary &&
+ git reset --hard &&
+ cat >boundary.c <<-\EOF &&
+ void above()
+ {
+ return;
+ }
+
+ void tracked()
+ {
+ int x = 1;
+ int y = 2;
+ }
+
+ void below()
+ {
+ return;
+ }
+ EOF
+ git add boundary.c &&
+ test_tick &&
+ git commit -m "add boundary.c" &&
+ cat >boundary.c <<-\EOF &&
+ void above()
+ {
+ return;
+ }
+
+ void tracked()
+ {
+ int x = 1;
+ int y = 2;
+ }
+
+ void below_renamed()
+ {
+ return 0;
+ }
+ EOF
+ git commit -a -m "modify below() only"
+'
+
+# When only a function below the tracked range is modified, the
+# tracked function should not produce a diff.
+test_expect_success '-L suppresses deletions outside tracked range' '
+ git log -L:tracked:boundary.c --format= >actual &&
+ test $(grep -c "^diff --git" actual) = 1
+'
+
+test_expect_success '-L with -S filters to string-count changes' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c -S "F2 + 2" --format= >actual &&
+ # -S searches the whole file, not just the tracked range;
+ # combined with the -L range walk, this selects commits that
+ # both touch func2 and change the count of "F2 + 2" in the file.
+ test $(grep -c "^diff --git" actual) = 1 &&
+ grep "F2 + 2" actual
+'
+
+test_expect_success '-L with -G filters to diff-text matches' '
+ git checkout parent-oids &&
+ git log -L:func2:file.c -G "F2 [+] 2" --format= >actual &&
+ # -G greps the whole-file diff text, not just the tracked range;
+ # combined with -L, this selects commits that both touch func2
+ # and have "F2 + 2" in their diff.
+ test $(grep -c "^diff --git" actual) = 1 &&
+ grep "F2 + 2" actual
+'
+
test_done
diff --git a/t/t4211/sha1/expect.beginning-of-file b/t/t4211/sha1/expect.beginning-of-file
index 91b4054898..52c90afb3a 100644
--- a/t/t4211/sha1/expect.beginning-of-file
+++ b/t/t4211/sha1/expect.beginning-of-file
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:47:40 2013 +0100
change at very beginning
diff --git a/a.c b/a.c
+index bdb2bb1..5e709a1 100644
--- a/a.c
+++ b/a.c
@@ -1,3 +1,4 @@
@@ -20,6 +21,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -1,3 +1,3 @@
@@ -35,6 +37,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +1,3 @@
diff --git a/t/t4211/sha1/expect.end-of-file b/t/t4211/sha1/expect.end-of-file
index bd25bb2f59..c40036899a 100644
--- a/t/t4211/sha1/expect.end-of-file
+++ b/t/t4211/sha1/expect.end-of-file
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -20,3 +20,5 @@
+@@ -20,3 +20,5 @@ long f(long x)
printf("%ld\n", f(15));
return 0;
-}
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -20,3 +20,3 @@
+@@ -20,3 +20,3 @@ int main ()
printf("%ld\n", f(15));
return 0;
-}
@@ -39,9 +41,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
-@@ -19,3 +19,3 @@
+@@ -19,3 +19,3 @@ int f(int x)
- printf("%d\n", f(15));
+ printf("%ld\n", f(15));
return 0;
@@ -54,6 +57,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +18,3 @@
diff --git a/t/t4211/sha1/expect.move-support-f b/t/t4211/sha1/expect.move-support-f
index c905e01bc2..ead6500d4d 100644
--- a/t/t4211/sha1/expect.move-support-f
+++ b/t/t4211/sha1/expect.move-support-f
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:49:50 2013 +0100
another simple change
diff --git a/b.c b/b.c
+index 5de3ea4..bf79c2f 100644
--- a/b.c
+++ b/b.c
@@ -4,9 +4,9 @@
@@ -26,6 +27,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -47,6 +49,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -67,6 +70,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha1/expect.multiple b/t/t4211/sha1/expect.multiple
index 1eee8a7801..a41851a51d 100644
--- a/t/t4211/sha1/expect.multiple
+++ b/t/t4211/sha1/expect.multiple
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -43,6 +45,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -71,6 +74,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -91,6 +95,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha1/expect.multiple-overlapping b/t/t4211/sha1/expect.multiple-overlapping
index d930b6eec4..0ec9990eab 100644
--- a/t/t4211/sha1/expect.multiple-overlapping
+++ b/t/t4211/sha1/expect.multiple-overlapping
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,21 @@
@@ -39,6 +40,7 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,19 @@
@@ -71,6 +73,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index e51de13..bdb2bb1 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -102,6 +105,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -134,6 +138,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,18 +3,19 @@
@@ -164,6 +169,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,18 @@
diff --git a/t/t4211/sha1/expect.multiple-superset b/t/t4211/sha1/expect.multiple-superset
index d930b6eec4..0ec9990eab 100644
--- a/t/t4211/sha1/expect.multiple-superset
+++ b/t/t4211/sha1/expect.multiple-superset
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,21 @@
@@ -39,6 +40,7 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,19 @@
@@ -71,6 +73,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index e51de13..bdb2bb1 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -102,6 +105,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -134,6 +138,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,18 +3,19 @@
@@ -164,6 +169,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,18 @@
diff --git a/t/t4211/sha1/expect.no-assertion-error b/t/t4211/sha1/expect.no-assertion-error
index 994c37db1e..54c568f273 100644
--- a/t/t4211/sha1/expect.no-assertion-error
+++ b/t/t4211/sha1/expect.no-assertion-error
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:50:24 2013 +0100
move within the file
diff --git a/b.c b/b.c
+index bf79c2f..27c829c 100644
--- a/b.c
+++ b/b.c
@@ -25,0 +18,9 @@
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -45,9 +47,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -63,9 +66,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -80,6 +84,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha1/expect.parallel-change-f-to-main b/t/t4211/sha1/expect.parallel-change-f-to-main
index 052def8074..65a8cc673a 100644
--- a/t/t4211/sha1/expect.parallel-change-f-to-main
+++ b/t/t4211/sha1/expect.parallel-change-f-to-main
@@ -13,6 +13,7 @@ Date: Thu Feb 28 10:49:50 2013 +0100
another simple change
diff --git a/b.c b/b.c
+index 5de3ea4..bf79c2f 100644
--- a/b.c
+++ b/b.c
@@ -4,14 +4,14 @@
@@ -39,6 +40,7 @@ Date: Fri Apr 12 16:15:57 2013 +0200
change on another line of history while rename happens
diff --git a/a.c b/a.c
+index 5de3ea4..01b5b65 100644
--- a/a.c
+++ b/a.c
@@ -4,14 +4,14 @@
@@ -65,6 +67,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index e51de13..bdb2bb1 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -91,6 +94,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -117,6 +121,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,13 +3,14 @@
@@ -142,6 +147,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,13 @@
diff --git a/t/t4211/sha1/expect.simple-f b/t/t4211/sha1/expect.simple-f
index a1f5bc49c8..b24ae40e03 100644
--- a/t/t4211/sha1/expect.simple-f
+++ b/t/t4211/sha1/expect.simple-f
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -26,6 +27,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -46,6 +48,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha1/expect.simple-f-to-main b/t/t4211/sha1/expect.simple-f-to-main
index a475768710..cd92100dfc 100644
--- a/t/t4211/sha1/expect.simple-f-to-main
+++ b/t/t4211/sha1/expect.simple-f-to-main
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index e51de13..bdb2bb1 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -31,6 +32,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -57,6 +59,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,13 +3,14 @@
@@ -82,6 +85,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,13 @@
diff --git a/t/t4211/sha1/expect.simple-main b/t/t4211/sha1/expect.simple-main
index 39ce39bebe..ff31291d34 100644
--- a/t/t4211/sha1/expect.simple-main
+++ b/t/t4211/sha1/expect.simple-main
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -41,9 +43,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -58,6 +61,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha1/expect.simple-main-to-end b/t/t4211/sha1/expect.simple-main-to-end
index 8480bd9cc4..4bef21e657 100644
--- a/t/t4211/sha1/expect.simple-main-to-end
+++ b/t/t4211/sha1/expect.simple-main-to-end
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -43,9 +45,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -60,6 +63,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha1/expect.two-ranges b/t/t4211/sha1/expect.two-ranges
index c5164f3be3..aed01522e3 100644
--- a/t/t4211/sha1/expect.two-ranges
+++ b/t/t4211/sha1/expect.two-ranges
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -41,6 +43,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 3233403..e51de13 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -69,6 +72,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 444e415..3233403 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -89,6 +93,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha1/expect.vanishes-early b/t/t4211/sha1/expect.vanishes-early
index 1f7cd06941..a413ad3659 100644
--- a/t/t4211/sha1/expect.vanishes-early
+++ b/t/t4211/sha1/expect.vanishes-early
@@ -5,11 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index 0b9cae5..5de3ea4 100644
--- a/a.c
+++ b/a.c
-@@ -22,1 +24,1 @@
--}
-\ No newline at end of file
+@@ -23,0 +24,1 @@ int main ()
+/* incomplete lines are bad! */
commit 100b61a6f2f720f812620a9d10afb3a960ccb73c
@@ -19,9 +18,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index 5e709a1..0b9cae5 100644
--- a/a.c
+++ b/a.c
-@@ -22,1 +22,1 @@
+@@ -22,1 +22,1 @@ int main ()
-}
+}
\ No newline at end of file
@@ -33,6 +33,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..444e415
--- /dev/null
+++ b/a.c
@@ -0,0 +20,1 @@
diff --git a/t/t4211/sha256/expect.beginning-of-file b/t/t4211/sha256/expect.beginning-of-file
index 5adfdfc1a1..e8d62328cf 100644
--- a/t/t4211/sha256/expect.beginning-of-file
+++ b/t/t4211/sha256/expect.beginning-of-file
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:47:40 2013 +0100
change at very beginning
diff --git a/a.c b/a.c
+index 3a78aaf..d325124 100644
--- a/a.c
+++ b/a.c
@@ -1,3 +1,4 @@
@@ -20,6 +21,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -1,3 +1,3 @@
@@ -35,6 +37,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +1,3 @@
diff --git a/t/t4211/sha256/expect.end-of-file b/t/t4211/sha256/expect.end-of-file
index 03ab5c1784..3b2e2384da 100644
--- a/t/t4211/sha256/expect.end-of-file
+++ b/t/t4211/sha256/expect.end-of-file
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -20,3 +20,5 @@
+@@ -20,3 +20,5 @@ long f(long x)
printf("%ld\n", f(15));
return 0;
-}
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -20,3 +20,3 @@
+@@ -20,3 +20,3 @@ int main ()
printf("%ld\n", f(15));
return 0;
-}
@@ -39,9 +41,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
-@@ -19,3 +19,3 @@
+@@ -19,3 +19,3 @@ int f(int x)
- printf("%d\n", f(15));
+ printf("%ld\n", f(15));
return 0;
@@ -54,6 +57,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +18,3 @@
diff --git a/t/t4211/sha256/expect.move-support-f b/t/t4211/sha256/expect.move-support-f
index 223b4ed2a0..f49abcea3e 100644
--- a/t/t4211/sha256/expect.move-support-f
+++ b/t/t4211/sha256/expect.move-support-f
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:49:50 2013 +0100
another simple change
diff --git a/b.c b/b.c
+index 62c1fc2..69cb69c 100644
--- a/b.c
+++ b/b.c
@@ -4,9 +4,9 @@
@@ -26,6 +27,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -47,6 +49,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -67,6 +70,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha256/expect.multiple b/t/t4211/sha256/expect.multiple
index dbd987b74a..0dee50ffb7 100644
--- a/t/t4211/sha256/expect.multiple
+++ b/t/t4211/sha256/expect.multiple
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -43,6 +45,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -71,6 +74,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -91,6 +95,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha256/expect.multiple-overlapping b/t/t4211/sha256/expect.multiple-overlapping
index 9015a45a25..b8c260e8ae 100644
--- a/t/t4211/sha256/expect.multiple-overlapping
+++ b/t/t4211/sha256/expect.multiple-overlapping
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,21 @@
@@ -39,6 +40,7 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,19 @@
@@ -71,6 +73,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index 75c0119..3a78aaf 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -102,6 +105,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -134,6 +138,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,18 +3,19 @@
@@ -164,6 +169,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,18 @@
diff --git a/t/t4211/sha256/expect.multiple-superset b/t/t4211/sha256/expect.multiple-superset
index 9015a45a25..b8c260e8ae 100644
--- a/t/t4211/sha256/expect.multiple-superset
+++ b/t/t4211/sha256/expect.multiple-superset
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,21 @@
@@ -39,6 +40,7 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
@@ -4,19 +4,19 @@
@@ -71,6 +73,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index 75c0119..3a78aaf 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -102,6 +105,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,19 +3,19 @@
@@ -134,6 +138,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,18 +3,19 @@
@@ -164,6 +169,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,18 @@
diff --git a/t/t4211/sha256/expect.no-assertion-error b/t/t4211/sha256/expect.no-assertion-error
index 36ed12aa9c..c25f2ce19c 100644
--- a/t/t4211/sha256/expect.no-assertion-error
+++ b/t/t4211/sha256/expect.no-assertion-error
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:50:24 2013 +0100
move within the file
diff --git a/b.c b/b.c
+index 69cb69c..a0d566e 100644
--- a/b.c
+++ b/b.c
@@ -25,0 +18,9 @@
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -45,9 +47,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -63,9 +66,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -80,6 +84,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha256/expect.parallel-change-f-to-main b/t/t4211/sha256/expect.parallel-change-f-to-main
index e68f8928ea..3178989253 100644
--- a/t/t4211/sha256/expect.parallel-change-f-to-main
+++ b/t/t4211/sha256/expect.parallel-change-f-to-main
@@ -13,6 +13,7 @@ Date: Thu Feb 28 10:49:50 2013 +0100
another simple change
diff --git a/b.c b/b.c
+index 62c1fc2..69cb69c 100644
--- a/b.c
+++ b/b.c
@@ -4,14 +4,14 @@
@@ -39,6 +40,7 @@ Date: Fri Apr 12 16:15:57 2013 +0200
change on another line of history while rename happens
diff --git a/a.c b/a.c
+index 62c1fc2..e1e8475 100644
--- a/a.c
+++ b/a.c
@@ -4,14 +4,14 @@
@@ -65,6 +67,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index 75c0119..3a78aaf 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -91,6 +94,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -117,6 +121,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,13 +3,14 @@
@@ -142,6 +147,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,13 @@
diff --git a/t/t4211/sha256/expect.simple-f b/t/t4211/sha256/expect.simple-f
index 65508d7c0b..983c711fe3 100644
--- a/t/t4211/sha256/expect.simple-f
+++ b/t/t4211/sha256/expect.simple-f
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -26,6 +27,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -46,6 +48,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha256/expect.simple-f-to-main b/t/t4211/sha256/expect.simple-f-to-main
index 77b721c196..e67fa017a7 100644
--- a/t/t4211/sha256/expect.simple-f-to-main
+++ b/t/t4211/sha256/expect.simple-f-to-main
@@ -5,6 +5,7 @@ Date: Thu Feb 28 10:45:41 2013 +0100
touch comment
diff --git a/a.c b/a.c
+index 75c0119..3a78aaf 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -31,6 +32,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,14 +3,14 @@
@@ -57,6 +59,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,13 +3,14 @@
@@ -82,6 +85,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,13 @@
diff --git a/t/t4211/sha256/expect.simple-main b/t/t4211/sha256/expect.simple-main
index d20708c9f9..0792b27cad 100644
--- a/t/t4211/sha256/expect.simple-main
+++ b/t/t4211/sha256/expect.simple-main
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -41,9 +43,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -58,6 +61,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha256/expect.simple-main-to-end b/t/t4211/sha256/expect.simple-main-to-end
index 617cdf3481..d3bd7c7bc6 100644
--- a/t/t4211/sha256/expect.simple-main-to-end
+++ b/t/t4211/sha256/expect.simple-main-to-end
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,7 @@
+@@ -18,5 +18,7 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -25,9 +26,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -43,9 +45,10 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
-@@ -17,5 +17,5 @@
+@@ -17,5 +17,5 @@ int f(int x)
int main ()
{
- printf("%d\n", f(15));
@@ -60,6 +63,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +16,5 @@
diff --git a/t/t4211/sha256/expect.two-ranges b/t/t4211/sha256/expect.two-ranges
index 6a94d3b9cb..7735b19723 100644
--- a/t/t4211/sha256/expect.two-ranges
+++ b/t/t4211/sha256/expect.two-ranges
@@ -5,9 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -23,9 +24,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -18,5 +18,5 @@
+@@ -18,5 +18,5 @@ long f(long x)
int main ()
{
printf("%ld\n", f(15));
@@ -41,6 +43,7 @@ Date: Thu Feb 28 10:45:16 2013 +0100
touch both functions
diff --git a/a.c b/a.c
+index 7a296b9..75c0119 100644
--- a/a.c
+++ b/a.c
@@ -3,9 +3,9 @@
@@ -69,6 +72,7 @@ Date: Thu Feb 28 10:44:55 2013 +0100
change f()
diff --git a/a.c b/a.c
+index 9f550c3..7a296b9 100644
--- a/a.c
+++ b/a.c
@@ -3,8 +3,9 @@
@@ -89,6 +93,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +3,8 @@
diff --git a/t/t4211/sha256/expect.vanishes-early b/t/t4211/sha256/expect.vanishes-early
index 11ec9bdecf..bc33b963dc 100644
--- a/t/t4211/sha256/expect.vanishes-early
+++ b/t/t4211/sha256/expect.vanishes-early
@@ -5,11 +5,10 @@ Date: Thu Feb 28 10:48:43 2013 +0100
change back to complete line
diff --git a/a.c b/a.c
+index e4fa1d8..62c1fc2 100644
--- a/a.c
+++ b/a.c
-@@ -22,1 +24,1 @@
--}
-\ No newline at end of file
+@@ -23,0 +24,1 @@ int main ()
+/* incomplete lines are bad! */
commit 29f32ac3141c48b22803e5c4127b719917b67d0f8ca8c5248bebfa2a19f7da10
@@ -19,9 +18,10 @@ Date: Thu Feb 28 10:48:10 2013 +0100
change to an incomplete line at end
diff --git a/a.c b/a.c
+index d325124..e4fa1d8 100644
--- a/a.c
+++ b/a.c
-@@ -22,1 +22,1 @@
+@@ -22,1 +22,1 @@ int main ()
-}
+}
\ No newline at end of file
@@ -33,6 +33,8 @@ Date: Thu Feb 28 10:44:48 2013 +0100
initial
diff --git a/a.c b/a.c
+new file mode 100644
+index 0000000..9f550c3
--- /dev/null
+++ b/a.c
@@ -0,0 +20,1 @@
diff --git a/t/t4254-am-corrupt.sh b/t/t4254-am-corrupt.sh
index ae0a56cf5e..96ddf3c53a 100755
--- a/t/t4254-am-corrupt.sh
+++ b/t/t4254-am-corrupt.sh
@@ -65,9 +65,8 @@ test_expect_success setup '
test_expect_success 'try to apply corrupted patch' '
test_when_finished "git am --abort" &&
test_must_fail git -c advice.amWorkDir=false -c advice.mergeConflict=false am bad-patch.diff 2>actual &&
- echo "error: git diff header lacks filename information (line 4)" >expected &&
test_path_is_file f &&
- test_cmp expected actual
+ test_grep "error: git diff header lacks filename information at .*rebase-apply/patch:4" actual
'
test_expect_success "NUL in commit message's body" '
diff --git a/t/t5315-pack-objects-compression.sh b/t/t5315-pack-objects-compression.sh
index 8bacd96275..d0feab17b4 100755
--- a/t/t5315-pack-objects-compression.sh
+++ b/t/t5315-pack-objects-compression.sh
@@ -10,7 +10,7 @@ test_expect_success setup '
# make sure it resulted in a loose object
ob=$(sed -e "s/\(..\).*/\1/" object-name) &&
ject=$(sed -e "s/..\(.*\)/\1/" object-name) &&
- test -f .git/objects/$ob/$ject
+ test_path_is_file .git/objects/$ob/$ject
'
while read expect config
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index 98c6910963..1c40f904f8 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -417,6 +417,26 @@ test_expect_success TIME_IS_64BIT,TIME_T_IS_64BIT 'lower layers have overflow ch
test_cmp full/.git/objects/info/commit-graph commit-graph-upgraded
'
+test_expect_success TIME_IS_64BIT,TIME_T_IS_64BIT 'overflow chunk when replacing commit-graph' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ cat >commit <<-EOF &&
+ tree $(test_oid empty_tree)
+ author Example <committer@example.com> 9223372036854775 +0000
+ committer Example <committer@example.com> 9223372036854775 +0000
+
+ Weird commit date
+ EOF
+ commit_id=$(git hash-object -t commit -w commit) &&
+ git reset --hard "$commit_id" &&
+ git commit-graph write --reachable &&
+ git commit-graph write --reachable --split=replace &&
+ git log
+ )
+'
+
# the verify tests below expect the commit-graph to contain
# exactly the commits reachable from the commits/8 branch.
# If the file changes the set of commits in the list, then the
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index a7c79225f6..58e0b685b1 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -21,7 +21,7 @@ midx_read_expect () {
EXTRA_CHUNKS="$5"
{
cat <<-EOF &&
- header: 4d494458 1 $HASH_LEN $NUM_CHUNKS $NUM_PACKS
+ header: 4d494458 2 $HASH_LEN $NUM_CHUNKS $NUM_PACKS
chunks: pack-names oid-fanout oid-lookup object-offsets$EXTRA_CHUNKS
num_objects: $NUM_OBJECTS
packs:
@@ -512,12 +512,7 @@ test_expect_success 'verify invalid chunk offset' '
"improper chunk offset(s)"
'
-test_expect_success 'verify packnames out of order' '
- corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "z" $objdir \
- "pack names out of order"
-'
-
-test_expect_success 'verify packnames out of order' '
+test_expect_success 'verify missing pack' '
corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "a" $objdir \
"failed to load pack"
'
@@ -578,6 +573,15 @@ test_expect_success 'verify incorrect checksum' '
$objdir "incorrect checksum"
'
+test_expect_success 'setup for v1-specific fsck tests' '
+ git -c midx.version=1 multi-pack-index write
+'
+
+test_expect_success 'verify packnames out of order (v1)' '
+ corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "z" $objdir \
+ "pack names out of order"
+'
+
test_expect_success 'repack progress off for redirected stderr' '
GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir repack 2>err &&
test_line_count = 0 err
diff --git a/t/t5331-pack-objects-stdin.sh b/t/t5331-pack-objects-stdin.sh
index 7eb79bc2cd..c74b5861af 100755
--- a/t/t5331-pack-objects-stdin.sh
+++ b/t/t5331-pack-objects-stdin.sh
@@ -415,4 +415,109 @@ test_expect_success '--stdin-packs=follow tolerates missing commits' '
stdin_packs__follow_with_only HEAD HEAD^{tree}
'
+test_expect_success '--stdin-packs=follow with open-excluded packs' '
+ test_when_finished "rm -fr repo" &&
+
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+
+ git branch -M main &&
+
+ # Create the following commit structure:
+ #
+ # A <-- B <-- D (main)
+ # ^
+ # \
+ # C (other)
+ test_commit A &&
+ test_commit B &&
+ git checkout -B other &&
+ test_commit C &&
+ git checkout main &&
+ test_commit D &&
+
+ A="$(echo A | git pack-objects --revs $packdir/pack)" &&
+ B="$(echo A..B | git pack-objects --revs $packdir/pack)" &&
+ C="$(echo B..C | git pack-objects --revs $packdir/pack)" &&
+ D="$(echo B..D | git pack-objects --revs $packdir/pack)" &&
+
+ C_ONLY="$(git rev-parse other | git pack-objects $packdir/pack)" &&
+
+ git prune-packed &&
+
+ # Create a pack using --stdin-packs=follow where:
+ #
+ # - pack D is included,
+ # - pack C_ONLY is excluded, but open,
+ # - pack B is excluded, but closed, and
+ # - packs A and C are unknown
+ #
+ # The resulting pack should therefore contain:
+ #
+ # - objects from the included pack D,
+ # - A.t (rescued via D^{tree}), and
+ # - C^{tree} and C.t (rescued via pack C_ONLY)
+ #
+ # , but should omit:
+ #
+ # - C (excluded via C_ONLY),
+ # - objects from pack B (trivially excluded-closed)
+ # - A and A^{tree} (ancestors of B)
+ P=$(git pack-objects --stdin-packs=follow $packdir/pack <<-EOF
+ pack-$D.pack
+ !pack-$C_ONLY.pack
+ ^pack-$B.pack
+ EOF
+ ) &&
+
+ {
+ objects_in_packs $D &&
+ git rev-parse A:A.t "C^{tree}" C:C.t
+ } >expect.raw &&
+ sort expect.raw >expect &&
+
+ objects_in_packs $P >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success '--stdin-packs with !-delimited pack without follow' '
+ test_when_finished "rm -fr repo" &&
+
+ git init repo &&
+ (
+ test_commit A &&
+ test_commit B &&
+ test_commit C &&
+
+ A="$(echo A | git pack-objects --revs $packdir/pack)" &&
+ B="$(echo A..B | git pack-objects --revs $packdir/pack)" &&
+ C="$(echo B..C | git pack-objects --revs $packdir/pack)" &&
+
+ cat >in <<-EOF &&
+ !pack-$A.pack
+ pack-$B.pack
+ pack-$C.pack
+ EOF
+
+ # Without --stdin-packs=follow, we treat the first
+ # line of input as a literal packfile name, and thus
+ # expect pack-objects to complain of a missing pack
+ test_must_fail git pack-objects --stdin-packs --stdout \
+ >/dev/null <in 2>err &&
+ test_grep "could not find pack .!pack-$A.pack." err &&
+
+ # With --stdin-packs=follow, we treat the second line
+ # of input as indicating pack-$A.pack is an excluded
+ # open pack, and thus expect pack-objects to succeed
+ P=$(git pack-objects --stdin-packs=follow $packdir/pack <in) &&
+
+ objects_in_packs $B $C >expect &&
+ objects_in_packs $P >actual &&
+ test_cmp expect actual
+ )
+'
+
test_done
diff --git a/t/t5335-compact-multi-pack-index.sh b/t/t5335-compact-multi-pack-index.sh
new file mode 100755
index 0000000000..40f3844282
--- /dev/null
+++ b/t/t5335-compact-multi-pack-index.sh
@@ -0,0 +1,293 @@
+#!/bin/sh
+
+test_description='multi-pack-index compaction'
+
+. ./test-lib.sh
+
+GIT_TEST_MULTI_PACK_INDEX=0
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
+GIT_TEST_MULTI_PACK_INDEX_WRITE_INCREMENTAL=0
+
+objdir=.git/objects
+packdir=$objdir/pack
+midxdir=$packdir/multi-pack-index.d
+midx_chain=$midxdir/multi-pack-index-chain
+
+nth_line() {
+ local n="$1"
+ shift
+ awk "NR==$n" "$@"
+}
+
+write_packs () {
+ for c in "$@"
+ do
+ test_commit "$c" &&
+
+ git pack-objects --all --unpacked $packdir/pack-$c &&
+ git prune-packed &&
+
+ git multi-pack-index write --incremental --bitmap || return 1
+ done
+}
+
+test_midx_layer_packs () {
+ local checksum="$1" &&
+ shift &&
+
+ test-tool read-midx $objdir "$checksum" >out &&
+
+ printf "%s\n" "$@" >expect &&
+ # NOTE: do *not* pipe through sort here, we want to ensure the
+ # order of packs is preserved during compaction.
+ grep "^pack-" out | cut -d"-" -f2 >actual &&
+
+ test_cmp expect actual
+}
+
+test_midx_layer_object_uniqueness () {
+ : >objs.all
+ while read layer
+ do
+ test-tool read-midx --show-objects $objdir "$layer" >out &&
+ grep "\.pack$" out | cut -d" " -f1 | sort >objs.layer &&
+ test_stdout_line_count = 0 comm -12 objs.all objs.layer &&
+ cat objs.all objs.layer | sort >objs.tmp &&
+ mv objs.tmp objs.all || return 1
+ done <$midx_chain
+}
+
+test_expect_success 'MIDX compaction with lex-ordered pack names' '
+ git init midx-compact-lex-order &&
+ (
+ cd midx-compact-lex-order &&
+
+ git config maintenance.auto false &&
+
+ write_packs A B C D E &&
+ test_line_count = 5 $midx_chain &&
+
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 2 "$midx_chain")" \
+ "$(nth_line 4 "$midx_chain")" &&
+ test_line_count = 3 $midx_chain &&
+
+ test_midx_layer_packs "$(nth_line 1 "$midx_chain")" A &&
+ test_midx_layer_packs "$(nth_line 2 "$midx_chain")" B C D &&
+ test_midx_layer_packs "$(nth_line 3 "$midx_chain")" E &&
+
+ test_midx_layer_object_uniqueness
+ )
+'
+
+test_expect_success 'MIDX compaction with non-lex-ordered pack names' '
+ git init midx-compact-non-lex-order &&
+ (
+ cd midx-compact-non-lex-order &&
+
+ git config maintenance.auto false &&
+
+ write_packs D C A B E &&
+ test_line_count = 5 $midx_chain &&
+
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 2 "$midx_chain")" \
+ "$(nth_line 4 "$midx_chain")" &&
+ test_line_count = 3 $midx_chain &&
+
+ test_midx_layer_packs "$(nth_line 1 "$midx_chain")" D &&
+ test_midx_layer_packs "$(nth_line 2 "$midx_chain")" C A B &&
+ test_midx_layer_packs "$(nth_line 3 "$midx_chain")" E &&
+
+ test_midx_layer_object_uniqueness
+ )
+'
+
+test_expect_success 'setup for bogus MIDX compaction scenarios' '
+ git init midx-compact-bogus &&
+ (
+ cd midx-compact-bogus &&
+
+ git config maintenance.auto false &&
+
+ write_packs A B C
+ )
+'
+
+test_expect_success 'MIDX compaction with missing endpoints' '
+ (
+ cd midx-compact-bogus &&
+
+ test_must_fail git multi-pack-index compact --incremental \
+ "<missing>" "<missing>" 2>err &&
+ test_grep "could not find MIDX: <missing>" err &&
+
+ test_must_fail git multi-pack-index compact --incremental \
+ "<missing>" "$(nth_line 2 "$midx_chain")" 2>err &&
+ test_grep "could not find MIDX: <missing>" err &&
+
+ test_must_fail git multi-pack-index compact --incremental \
+ "$(nth_line 2 "$midx_chain")" "<missing>" 2>err &&
+ test_grep "could not find MIDX: <missing>" err
+ )
+'
+
+test_expect_success 'MIDX compaction with reversed endpoints' '
+ (
+ cd midx-compact-bogus &&
+
+ from="$(nth_line 3 "$midx_chain")" &&
+ to="$(nth_line 1 "$midx_chain")" &&
+
+ test_must_fail git multi-pack-index compact --incremental \
+ "$from" "$to" 2>err &&
+
+ test_grep "MIDX $from must be an ancestor of $to" err
+ )
+'
+
+test_expect_success 'MIDX compaction with identical endpoints' '
+ (
+ cd midx-compact-bogus &&
+
+ from="$(nth_line 3 "$midx_chain")" &&
+ to="$(nth_line 3 "$midx_chain")" &&
+
+ test_must_fail git multi-pack-index compact --incremental \
+ "$from" "$to" 2>err &&
+
+ test_grep "MIDX compaction endpoints must be unique" err
+ )
+'
+
+test_expect_success 'MIDX compaction with midx.version=1' '
+ (
+ cd midx-compact-bogus &&
+
+ test_must_fail git -c midx.version=1 multi-pack-index compact \
+ "$(nth_line 1 "$midx_chain")" \
+ "$(nth_line 2 "$midx_chain")" 2>err &&
+
+ test_grep "fatal: cannot perform MIDX compaction with v1 format" err
+ )
+'
+
+midx_objs_by_pack () {
+ awk '/\.pack$/ { split($3, a, "-"); print a[2], $1 }' | sort
+}
+
+tag_objs_from_pack () {
+ objs="$(git rev-list --objects --no-object-names "$2")" &&
+ printf "$1 %s\n" $objs | sort
+}
+
+test_expect_success 'MIDX compaction preserves pack object selection' '
+ git init midx-compact-preserve-selection &&
+ (
+ cd midx-compact-preserve-selection &&
+
+ git config maintenance.auto false &&
+
+ test_commit A &&
+ test_commit B &&
+
+ # Create two packs, one containing just the objects from
+ # A, and another containing all objects from the
+ # repository.
+ p1="$(echo A | git pack-objects --revs --delta-base-offset \
+ $packdir/pack-1)" &&
+ p0="$(echo B | git pack-objects --revs --delta-base-offset \
+ $packdir/pack-0)" &&
+
+ echo "pack-1-$p1.idx" | git multi-pack-index write \
+ --incremental --bitmap --stdin-packs &&
+ echo "pack-0-$p0.idx" | git multi-pack-index write \
+ --incremental --bitmap --stdin-packs &&
+
+ write_packs C &&
+
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 1 "$midx_chain")" \
+ "$(nth_line 2 "$midx_chain")" &&
+
+
+ test-tool read-midx --show-objects $objdir \
+ "$(nth_line 1 "$midx_chain")" >AB.info &&
+ test-tool read-midx --show-objects $objdir \
+ "$(nth_line 2 "$midx_chain")" >C.info &&
+
+ midx_objs_by_pack <AB.info >AB.actual &&
+ midx_objs_by_pack <C.info >C.actual &&
+
+ {
+ tag_objs_from_pack 1 A &&
+ tag_objs_from_pack 0 A..B
+ } | sort >AB.expect &&
+ tag_objs_from_pack C B..C >C.expect &&
+
+ test_cmp AB.expect AB.actual &&
+ test_cmp C.expect C.actual
+ )
+'
+
+test_expect_success 'MIDX compaction with bitmaps' '
+ git init midx-compact-with-bitmaps &&
+ (
+ cd midx-compact-with-bitmaps &&
+
+ git config maintenance.auto false &&
+
+ write_packs foo bar baz quux woot &&
+
+ test-tool read-midx --bitmap $objdir >bitmap.expect &&
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 2 "$midx_chain")" \
+ "$(nth_line 4 "$midx_chain")" &&
+ test-tool read-midx --bitmap $objdir >bitmap.actual &&
+
+ test_cmp bitmap.expect bitmap.actual &&
+
+ true
+ )
+'
+
+test_expect_success 'MIDX compaction with bitmaps (non-trivial)' '
+ git init midx-compact-with-bitmaps-non-trivial &&
+ (
+ cd midx-compact-with-bitmaps-non-trivial &&
+
+ git config maintenance.auto false &&
+
+ git branch -m main &&
+
+ # D(4)
+ # /
+ # A(1) --- B(2) --- C(3) --- G(7)
+ # \
+ # E(5) --- F(6)
+ write_packs A B C &&
+ git checkout -b side &&
+ write_packs D &&
+ git checkout -b other B &&
+ write_packs E F &&
+ git checkout main &&
+ write_packs G &&
+
+ # Compact layers 2-4, leaving us with:
+ #
+ # [A, [B, C, D], E, F, G]
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 2 "$midx_chain")" \
+ "$(nth_line 4 "$midx_chain")" &&
+
+ # Then compact the top two layers, condensing the above
+ # such that the new 4th layer contains F and G.
+ #
+ # [A, [B, C, D], E, [F, G]]
+ git multi-pack-index compact --incremental --bitmap \
+ "$(nth_line 4 "$midx_chain")" \
+ "$(nth_line 5 "$midx_chain")"
+ )
+'
+
+test_done
diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh
index 5dcb4b51a4..6fe21e2b3a 100755
--- a/t/t5510-fetch.sh
+++ b/t/t5510-fetch.sh
@@ -469,12 +469,17 @@ test_expect_success 'fetch --atomic executes a single reference transaction only
head_oid=$(git rev-parse HEAD) &&
cat >expected <<-EOF &&
+ preparing
+ $ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-1
+ $ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-2
prepared
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-1
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-2
committed
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-1
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-2
+ preparing
+ $ZERO_OID ref:refs/remotes/origin/main refs/remotes/origin/HEAD
EOF
rm -f atomic/actual &&
@@ -497,7 +502,7 @@ test_expect_success 'fetch --atomic aborts all reference updates if hook aborts'
head_oid=$(git rev-parse HEAD) &&
cat >expected <<-EOF &&
- prepared
+ preparing
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-abort-1
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-abort-2
$ZERO_OID $head_oid refs/remotes/origin/atomic-hooks-abort-3
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 29e2f17608..117cfa051f 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -1792,6 +1792,7 @@ test_expect_success 'updateInstead with push-to-checkout hook' '
'
test_expect_success 'denyCurrentBranch and worktrees' '
+ test_when_finished "rm -fr cloned && git worktree remove --force new-wt" &&
git worktree add new-wt &&
git clone . cloned &&
test_commit -C cloned first &&
@@ -1816,6 +1817,20 @@ test_expect_success 'denyCurrentBranch and bare repository worktrees' '
test_must_fail git push --delete bare.git wt
'
+test_expect_success 'updateInstead with bare repository worktree and unborn bare HEAD' '
+ test_when_finished "rm -fr bare.git cloned" &&
+ git clone --bare . bare.git &&
+ git -C bare.git worktree add wt &&
+ git -C bare.git config receive.denyCurrentBranch updateInstead &&
+ git -C bare.git symbolic-ref HEAD refs/heads/unborn &&
+ test_must_fail git -C bare.git rev-parse -q --verify HEAD^{commit} &&
+ git clone . cloned &&
+ test_commit -C cloned mozzarella &&
+ git -C cloned push ../bare.git HEAD:wt &&
+ test_path_exists bare.git/wt/mozzarella.t &&
+ test "$(git -C cloned rev-parse HEAD)" = "$(git -C bare.git/wt rev-parse HEAD)"
+'
+
test_expect_success 'refuse fetch to current branch of worktree' '
test_when_finished "git worktree remove --force wt && git branch -D wt" &&
git worktree add wt &&
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index 73cf531580..a26b6c2844 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -782,4 +782,11 @@ test_expect_success 'tag following always works over v0 http' '
test_cmp expect actual
'
+test_expect_success 'ls-remote outside repo does not segfault with fetch refspec' '
+ nongit git \
+ -c remote.origin.url="$HTTPD_URL/smart/repo.git" \
+ -c remote.origin.fetch=anything \
+ ls-remote origin
+'
+
test_done
diff --git a/t/t5584-http-429-retry.sh b/t/t5584-http-429-retry.sh
new file mode 100755
index 0000000000..a22007b2cf
--- /dev/null
+++ b/t/t5584-http-429-retry.sh
@@ -0,0 +1,266 @@
+#!/bin/sh
+
+test_description='test HTTP 429 Too Many Requests retry logic'
+
+. ./test-lib.sh
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+
+start_httpd
+
+test_expect_success 'setup test repository' '
+ test_commit initial &&
+ git clone --bare . "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
+ git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" config http.receivepack true
+'
+
+# This test suite uses a special HTTP 429 endpoint at /http_429/ that simulates
+# rate limiting. The endpoint format is:
+# /http_429/<test-context>/<retry-after-value>/<repo-path>
+# The http-429.sh script (in t/lib-httpd) returns a 429 response with the
+# specified Retry-After header on the first request for each test context,
+# then forwards subsequent requests to git-http-backend. Each test context
+# is isolated, allowing multiple tests to run independently.
+
+test_expect_success 'HTTP 429 with retries disabled (maxRetries=0) fails immediately' '
+ # Set maxRetries to 0 (disabled)
+ test_config http.maxRetries 0 &&
+ test_config http.retryAfter 1 &&
+
+ # Should fail immediately without any retry attempt
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/retries-disabled/1/repo.git" 2>err &&
+
+ # Verify no retry happened (no "waiting" message in stderr)
+ test_grep ! -i "waiting.*retry" err
+'
+
+test_expect_success 'HTTP 429 permanent should fail after max retries' '
+ # Enable retries with a limit
+ test_config http.maxRetries 2 &&
+
+ # Git should retry but eventually fail when 429 persists
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/permanent-fail/permanent/repo.git" 2>err
+'
+
+test_expect_success 'HTTP 429 with Retry-After is retried and succeeds' '
+ # Enable retries
+ test_config http.maxRetries 3 &&
+
+ # Git should retry after receiving 429 and eventually succeed
+ git ls-remote "$HTTPD_URL/http_429/retry-succeeds/1/repo.git" >output 2>err &&
+ test_grep "refs/heads/" output
+'
+
+test_expect_success 'HTTP 429 without Retry-After uses configured default' '
+ # Enable retries and configure default delay
+ test_config http.maxRetries 3 &&
+ test_config http.retryAfter 1 &&
+
+ # Git should retry using configured default and succeed
+ git ls-remote "$HTTPD_URL/http_429/no-retry-after-header/none/repo.git" >output 2>err &&
+ test_grep "refs/heads/" output
+'
+
+test_expect_success 'HTTP 429 retry delays are respected' '
+ # Enable retries
+ test_config http.maxRetries 3 &&
+
+ # Time the operation - it should take at least 2 seconds due to retry delay
+ start=$(test-tool date getnanos) &&
+ git ls-remote "$HTTPD_URL/http_429/retry-delays-respected/2/repo.git" >output 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Verify it took at least 2 seconds (allowing some tolerance)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -ge 1 &&
+ test_grep "refs/heads/" output
+'
+
+test_expect_success 'HTTP 429 fails immediately if Retry-After exceeds http.maxRetryTime' '
+ # Configure max retry time to 3 seconds (much less than requested 100)
+ test_config http.maxRetries 3 &&
+ test_config http.maxRetryTime 3 &&
+
+ # Should fail immediately without waiting
+ start=$(test-tool date getnanos) &&
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/retry-after-exceeds-max-time/100/repo.git" 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should fail quickly (no 100 second wait)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -lt 99 &&
+ test_grep "greater than http.maxRetryTime" err
+'
+
+test_expect_success 'HTTP 429 fails if configured http.retryAfter exceeds http.maxRetryTime' '
+ # Test misconfiguration: retryAfter > maxRetryTime
+ # Configure retryAfter larger than maxRetryTime
+ test_config http.maxRetries 3 &&
+ test_config http.retryAfter 100 &&
+ test_config http.maxRetryTime 5 &&
+
+ # Should fail immediately with configuration error
+ start=$(test-tool date getnanos) &&
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/config-retry-after-exceeds-max-time/none/repo.git" 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should fail quickly (no 100 second wait)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -lt 99 &&
+ test_grep "configured http.retryAfter.*exceeds.*http.maxRetryTime" err
+'
+
+test_expect_success 'HTTP 429 with Retry-After HTTP-date format' '
+ # Test HTTP-date format (RFC 2822) in Retry-After header
+ raw=$(test-tool date timestamp now) &&
+ now="${raw#* -> }" &&
+ future_time=$((now + 2)) &&
+ raw=$(test-tool date show:rfc2822 $future_time) &&
+ future_date="${raw#* -> }" &&
+ future_date_encoded=$(echo "$future_date" | sed "s/ /%20/g") &&
+
+ # Enable retries
+ test_config http.maxRetries 3 &&
+
+ # Git should parse the HTTP-date and retry after the delay
+ start=$(test-tool date getnanos) &&
+ git ls-remote "$HTTPD_URL/http_429/http-date-format/$future_date_encoded/repo.git" >output 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should take at least 1 second (allowing tolerance for processing time)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -ge 1 &&
+ test_grep "refs/heads/" output
+'
+
+test_expect_success 'HTTP 429 with HTTP-date exceeding maxRetryTime fails immediately' '
+ raw=$(test-tool date timestamp now) &&
+ now="${raw#* -> }" &&
+ future_time=$((now + 200)) &&
+ raw=$(test-tool date show:rfc2822 $future_time) &&
+ future_date="${raw#* -> }" &&
+ future_date_encoded=$(echo "$future_date" | sed "s/ /%20/g") &&
+
+ # Configure max retry time much less than the 200 second delay
+ test_config http.maxRetries 3 &&
+ test_config http.maxRetryTime 10 &&
+
+ # Should fail immediately without waiting 200 seconds
+ start=$(test-tool date getnanos) &&
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/http-date-exceeds-max-time/$future_date_encoded/repo.git" 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should fail quickly (not wait 200 seconds)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -lt 199 &&
+ test_grep "http.maxRetryTime" err
+'
+
+test_expect_success 'HTTP 429 with past HTTP-date should not wait' '
+ raw=$(test-tool date timestamp now) &&
+ now="${raw#* -> }" &&
+ past_time=$((now - 10)) &&
+ raw=$(test-tool date show:rfc2822 $past_time) &&
+ past_date="${raw#* -> }" &&
+ past_date_encoded=$(echo "$past_date" | sed "s/ /%20/g") &&
+
+ # Enable retries
+ test_config http.maxRetries 3 &&
+
+ # Git should retry immediately without waiting
+ start=$(test-tool date getnanos) &&
+ git ls-remote "$HTTPD_URL/http_429/past-http-date/$past_date_encoded/repo.git" >output 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should complete quickly (no wait for a past-date Retry-After)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -lt 5 &&
+ test_grep "refs/heads/" output
+'
+
+test_expect_success 'HTTP 429 with invalid Retry-After format uses configured default' '
+ # Configure default retry-after
+ test_config http.maxRetries 3 &&
+ test_config http.retryAfter 1 &&
+
+ # Should use configured default (1 second) since header is invalid
+ start=$(test-tool date getnanos) &&
+ git ls-remote "$HTTPD_URL/http_429/invalid-retry-after-format/invalid/repo.git" >output 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should take at least 1 second (the configured default)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -ge 1 &&
+ test_grep "refs/heads/" output &&
+ test_grep "waiting.*retry" err
+'
+
+test_expect_success 'HTTP 429 will not be retried without config' '
+ # Default config means http.maxRetries=0 (retries disabled)
+ # When 429 is received, it should fail immediately without retry
+ # Do NOT configure anything - use defaults (http.maxRetries defaults to 0)
+
+ # Should fail immediately without retry
+ test_must_fail git ls-remote "$HTTPD_URL/http_429/no-retry-without-config/1/repo.git" 2>err &&
+
+ # Verify no retry happened (no "waiting" message)
+ test_grep ! -i "waiting.*retry" err &&
+
+ # Should get 429 error
+ test_grep "429" err
+'
+
+test_expect_success 'GIT_HTTP_RETRY_AFTER overrides http.retryAfter config' '
+ # Configure retryAfter to 10 seconds
+ test_config http.maxRetries 3 &&
+ test_config http.retryAfter 10 &&
+
+ # Override with environment variable to 1 second
+ start=$(test-tool date getnanos) &&
+ GIT_HTTP_RETRY_AFTER=1 git ls-remote "$HTTPD_URL/http_429/env-retry-after-override/none/repo.git" >output 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should use env var (1 second), not config (10 seconds)
+ duration_int=${duration%.*} &&
+ test "$duration_int" -ge 1 &&
+ test "$duration_int" -lt 5 &&
+ test_grep "refs/heads/" output &&
+ test_grep "waiting.*retry" err
+'
+
+test_expect_success 'GIT_HTTP_MAX_RETRIES overrides http.maxRetries config' '
+ # Configure maxRetries to 0 (disabled)
+ test_config http.maxRetries 0 &&
+ test_config http.retryAfter 1 &&
+
+ # Override with environment variable to enable retries
+ GIT_HTTP_MAX_RETRIES=3 git ls-remote "$HTTPD_URL/http_429/env-max-retries-override/1/repo.git" >output 2>err &&
+
+ # Should retry (env var enables it despite config saying disabled)
+ test_grep "refs/heads/" output &&
+ test_grep "waiting.*retry" err
+'
+
+test_expect_success 'GIT_HTTP_MAX_RETRY_TIME overrides http.maxRetryTime config' '
+ # Configure maxRetryTime to 100 seconds (would accept 50 second delay)
+ test_config http.maxRetries 3 &&
+ test_config http.maxRetryTime 100 &&
+
+ # Override with environment variable to 10 seconds (should reject 50 second delay)
+ start=$(test-tool date getnanos) &&
+ test_must_fail env GIT_HTTP_MAX_RETRY_TIME=10 \
+ git ls-remote "$HTTPD_URL/http_429/env-max-retry-time-override/50/repo.git" 2>err &&
+ duration=$(test-tool date getnanos $start) &&
+
+ # Should fail quickly (not wait 50 seconds) because env var limits to 10
+ duration_int=${duration%.*} &&
+ test "$duration_int" -lt 49 &&
+ test_grep "greater than http.maxRetryTime" err
+'
+
+test_expect_success 'verify normal repository access still works' '
+ git ls-remote "$HTTPD_URL/smart/repo.git" >output &&
+ test_grep "refs/heads/" output
+'
+
+test_done
diff --git a/t/t5620-backfill.sh b/t/t5620-backfill.sh
index 58c81556e7..f3b5e39493 100755
--- a/t/t5620-backfill.sh
+++ b/t/t5620-backfill.sh
@@ -7,6 +7,14 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
+test_expect_success 'backfill rejects unexpected arguments' '
+ test_must_fail git backfill unexpected-arg 2>err &&
+ test_grep "ambiguous argument .*unexpected-arg" err &&
+
+ test_must_fail git backfill --all --unexpected-arg --first-parent 2>err &&
+ test_grep "unrecognized argument: --unexpected-arg" err
+'
+
# We create objects in the 'src' repo.
test_expect_success 'setup repo for object creation' '
echo "{print \$1}" >print_1.awk &&
@@ -15,7 +23,7 @@ test_expect_success 'setup repo for object creation' '
git init src &&
mkdir -p src/a/b/c &&
- mkdir -p src/d/e &&
+ mkdir -p src/d/f &&
for i in 1 2
do
@@ -26,8 +34,9 @@ test_expect_success 'setup repo for object creation' '
echo "Version $i of file a/b/$n" > src/a/b/file.$n.txt &&
echo "Version $i of file a/b/c/$n" > src/a/b/c/file.$n.txt &&
echo "Version $i of file d/$n" > src/d/file.$n.txt &&
- echo "Version $i of file d/e/$n" > src/d/e/file.$n.txt &&
+ echo "Version $i of file d/f/$n" > src/d/f/file.$n.txt &&
git -C src add . &&
+ test_tick &&
git -C src commit -m "Iteration $n" || return 1
done
done
@@ -41,6 +50,53 @@ test_expect_success 'setup bare clone for server' '
git -C srv.bare config --local uploadpack.allowanysha1inwant 1
'
+# Create a version of the repo with branches for testing revision
+# arguments like --all, --first-parent, and --since.
+#
+# main: 8 commits (linear) + merge of side branch
+# 48 original blobs + 4 side blobs = 52 blobs from main HEAD
+# side: 2 commits adding s/file.{1,2}.txt (v1, v2), merged into main
+# other: 1 commit adding o/file.{1,2}.txt (not merged)
+# 54 total blobs reachable from --all
+test_expect_success 'setup branched repo for revision tests' '
+ git clone src src-revs &&
+
+ # Side branch from tip of main with unique files
+ git -C src-revs checkout -b side HEAD &&
+ mkdir -p src-revs/s &&
+ echo "Side version 1 of file 1" >src-revs/s/file.1.txt &&
+ echo "Side version 1 of file 2" >src-revs/s/file.2.txt &&
+ test_tick &&
+ git -C src-revs add . &&
+ git -C src-revs commit -m "Side commit 1" &&
+
+ echo "Side version 2 of file 1" >src-revs/s/file.1.txt &&
+ echo "Side version 2 of file 2" >src-revs/s/file.2.txt &&
+ test_tick &&
+ git -C src-revs add . &&
+ git -C src-revs commit -m "Side commit 2" &&
+
+ # Merge side into main
+ git -C src-revs checkout main &&
+ test_tick &&
+ git -C src-revs merge side --no-ff -m "Merge side branch" &&
+
+ # Other branch (not merged) for --all testing
+ git -C src-revs checkout -b other main~1 &&
+ mkdir -p src-revs/o &&
+ echo "Other content 1" >src-revs/o/file.1.txt &&
+ echo "Other content 2" >src-revs/o/file.2.txt &&
+ test_tick &&
+ git -C src-revs add . &&
+ git -C src-revs commit -m "Other commit" &&
+
+ git -C src-revs checkout main &&
+
+ git clone --bare "file://$(pwd)/src-revs" srv-revs.bare &&
+ git -C srv-revs.bare config --local uploadpack.allowfilter 1 &&
+ git -C srv-revs.bare config --local uploadpack.allowanysha1inwant 1
+'
+
# do basic partial clone from "srv.bare"
test_expect_success 'do partial clone 1, backfill gets all objects' '
git clone --no-checkout --filter=blob:none \
@@ -119,6 +175,21 @@ test_expect_success 'backfill --sparse' '
test_line_count = 0 missing
'
+test_expect_success 'backfill auto-detects sparse-checkout from config' '
+ git clone --sparse --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-auto-sparse &&
+
+ git -C backfill-auto-sparse rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 44 missing &&
+
+ GIT_TRACE2_EVENT="$(pwd)/auto-sparse-trace" git \
+ -C backfill-auto-sparse backfill &&
+
+ test_trace2_data promisor fetch_count 4 <auto-sparse-trace &&
+ test_trace2_data path-walk paths 5 <auto-sparse-trace
+'
+
test_expect_success 'backfill --sparse without cone mode (positive)' '
git clone --no-checkout --filter=blob:none \
--single-branch --branch=main \
@@ -176,6 +247,157 @@ test_expect_success 'backfill --sparse without cone mode (negative)' '
test_line_count = 12 missing
'
+test_expect_success 'backfill with revision range' '
+ test_when_finished rm -rf backfill-revs &&
+ git clone --no-checkout --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-revs &&
+
+ # No blobs yet
+ git -C backfill-revs rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 48 missing &&
+
+ git -C backfill-revs backfill HEAD~2..HEAD &&
+
+ # 30 objects downloaded.
+ git -C backfill-revs rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 18 missing
+'
+
+test_expect_success 'backfill with revisions over stdin' '
+ test_when_finished rm -rf backfill-revs &&
+ git clone --no-checkout --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-revs &&
+
+ # No blobs yet
+ git -C backfill-revs rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 48 missing &&
+
+ cat >in <<-EOF &&
+ HEAD
+ ^HEAD~2
+ EOF
+
+ git -C backfill-revs backfill --stdin <in &&
+
+ # 30 objects downloaded.
+ git -C backfill-revs rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 18 missing
+'
+
+test_expect_success 'backfill with prefix pathspec' '
+ test_when_finished rm -rf backfill-path &&
+ git clone --bare --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-path &&
+
+ # No blobs yet
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 48 missing &&
+
+ git -C backfill-path backfill HEAD -- d/f 2>err &&
+ test_must_be_empty err &&
+
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 40 missing
+'
+
+test_expect_success 'backfill with multiple pathspecs' '
+ test_when_finished rm -rf backfill-path &&
+ git clone --bare --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-path &&
+
+ # No blobs yet
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 48 missing &&
+
+ git -C backfill-path backfill HEAD -- d/f a 2>err &&
+ test_must_be_empty err &&
+
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 16 missing
+'
+
+test_expect_success 'backfill with wildcard pathspec' '
+ test_when_finished rm -rf backfill-path &&
+ git clone --bare --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv.bare" backfill-path &&
+
+ # No blobs yet
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 48 missing &&
+
+ git -C backfill-path backfill HEAD -- "d/file.*.txt" 2>err &&
+ test_must_be_empty err &&
+
+ git -C backfill-path rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 40 missing
+'
+
+test_expect_success 'backfill with --all' '
+ test_when_finished rm -rf backfill-all &&
+ git clone --no-checkout --filter=blob:none \
+ "file://$(pwd)/srv-revs.bare" backfill-all &&
+
+ # All blobs from all refs are missing
+ git -C backfill-all rev-list --quiet --objects --all --missing=print >missing &&
+ test_line_count = 54 missing &&
+
+ # Backfill from HEAD gets main blobs only
+ git -C backfill-all backfill HEAD &&
+
+ # Other branch blobs still missing
+ git -C backfill-all rev-list --quiet --objects --all --missing=print >missing &&
+ test_line_count = 2 missing &&
+
+ # Backfill with --all gets everything
+ git -C backfill-all backfill --all &&
+
+ git -C backfill-all rev-list --quiet --objects --all --missing=print >missing &&
+ test_line_count = 0 missing
+'
+
+test_expect_success 'backfill with --first-parent' '
+ test_when_finished rm -rf backfill-fp &&
+ git clone --no-checkout --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv-revs.bare" backfill-fp &&
+
+ git -C backfill-fp rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 52 missing &&
+
+ # --first-parent skips the side branch commits, so
+ # s/file.{1,2}.txt v1 blobs (only in side commit 1) are missed.
+ git -C backfill-fp backfill --first-parent HEAD &&
+
+ git -C backfill-fp rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 2 missing
+'
+
+test_expect_success 'backfill with --since' '
+ test_when_finished rm -rf backfill-since &&
+ git clone --no-checkout --filter=blob:none \
+ --single-branch --branch=main \
+ "file://$(pwd)/srv-revs.bare" backfill-since &&
+
+ git -C backfill-since rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 52 missing &&
+
+ # Use a cutoff between commits 4 and 5 (between v1 and v2
+ # iterations). Commits 5-8 still carry v1 of files 2-4 in
+ # their trees, but v1 of file.1.txt is only in commits 1-4.
+ SINCE=$(git -C backfill-since log --first-parent --reverse \
+ --format=%ct HEAD~1 | sed -n 5p) &&
+ git -C backfill-since backfill --since="@$((SINCE - 1))" HEAD &&
+
+ # 6 missing: v1 of file.1.txt in all 6 directories
+ git -C backfill-since rev-list --quiet --objects --missing=print HEAD >missing &&
+ test_line_count = 6 missing
+'
+
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t6101-rev-parse-parents.sh b/t/t6101-rev-parse-parents.sh
index 5f55ab98d3..7281889717 100755
--- a/t/t6101-rev-parse-parents.sh
+++ b/t/t6101-rev-parse-parents.sh
@@ -39,7 +39,8 @@ test_expect_success 'setup' '
'
test_expect_success 'start is valid' '
- git rev-parse start | grep "^$OID_REGEX$"
+ git rev-parse start >actual &&
+ test_grep "^$OID_REGEX$" actual
'
test_expect_success 'start^0' '
diff --git a/t/t6403-merge-file.sh b/t/t6403-merge-file.sh
index 4d6e748320..801284cf8f 100755
--- a/t/t6403-merge-file.sh
+++ b/t/t6403-merge-file.sh
@@ -542,6 +542,15 @@ test_expect_success '--object-id fails without repository' '
grep "not a git repository" err
'
+test_expect_success 'run in a linked worktree with --object-id' '
+ empty="$(test_oid empty_blob)" &&
+ git worktree add work &&
+ git -C work merge-file --object-id $empty $empty $empty >actual &&
+ git worktree remove work &&
+ git merge-file --object-id $empty $empty $empty >expected &&
+ test_cmp actual expected
+'
+
test_expect_success 'merging C files with "myers" diff algorithm creates some spurious conflicts' '
cat >expect.c <<-\EOF &&
int g(size_t u)
diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh
index ce2ff2a28a..faf7d97fc4 100755
--- a/t/t7004-tag.sh
+++ b/t/t7004-tag.sh
@@ -33,8 +33,10 @@ test_expect_success 'listing all tags in an empty tree should succeed' '
'
test_expect_success 'listing all tags in an empty tree should output nothing' '
- test $(git tag -l | wc -l) -eq 0 &&
- test $(git tag | wc -l) -eq 0
+ git tag -l >actual &&
+ test_must_be_empty actual &&
+ git tag >actual &&
+ test_must_be_empty actual
'
test_expect_success 'sort tags, ignore case' '
@@ -178,7 +180,8 @@ test_expect_success 'listing tags using a non-matching pattern should succeed' '
'
test_expect_success 'listing tags using a non-matching pattern should output nothing' '
- test $(git tag -l xxx | wc -l) -eq 0
+ git tag -l xxx >actual &&
+ test_must_be_empty actual
'
# special cases for creating tags:
@@ -188,13 +191,15 @@ test_expect_success 'trying to create a tag with the name of one existing should
'
test_expect_success 'trying to create a tag with a non-valid name should fail' '
- test $(git tag -l | wc -l) -eq 1 &&
+ git tag -l >actual &&
+ test_line_count = 1 actual &&
test_must_fail git tag "" &&
test_must_fail git tag .othertag &&
test_must_fail git tag "other tag" &&
test_must_fail git tag "othertag^" &&
test_must_fail git tag "other~tag" &&
- test $(git tag -l | wc -l) -eq 1
+ git tag -l >actual &&
+ test_line_count = 1 actual
'
test_expect_success 'creating a tag using HEAD directly should succeed' '
diff --git a/t/t7704-repack-cruft.sh b/t/t7704-repack-cruft.sh
index aa2e2e6ad8..9e03b04315 100755
--- a/t/t7704-repack-cruft.sh
+++ b/t/t7704-repack-cruft.sh
@@ -869,4 +869,26 @@ test_expect_success 'repack --write-midx includes cruft when already geometric'
)
'
+test_expect_success 'repack rescues once-cruft objects above geometric split' '
+ git config repack.midxMustContainCruft false &&
+
+ test_commit reachable &&
+ test_commit unreachable &&
+
+ unreachable="$(git rev-parse HEAD)" &&
+
+ git reset --hard HEAD^ &&
+ git tag -d unreachable &&
+ git reflog expire --all --expire=all &&
+
+ git repack --cruft -d &&
+
+ echo $unreachable | git pack-objects .git/objects/pack/pack &&
+
+ test_commit new &&
+
+ git update-ref refs/heads/other $unreachable &&
+ git repack --geometric=2 -d --write-midx --write-bitmap-index
+'
+
test_done
diff --git a/t/t8003-blame-corner-cases.sh b/t/t8003-blame-corner-cases.sh
index 731265541a..30e7960ace 100755
--- a/t/t8003-blame-corner-cases.sh
+++ b/t/t8003-blame-corner-cases.sh
@@ -49,80 +49,69 @@ test_expect_success setup '
'
test_expect_success 'straight copy without -C' '
-
- git blame uno | grep Second
-
+ git blame uno >actual &&
+ test_grep Second actual
'
test_expect_success 'straight move without -C' '
-
- git blame dos | grep Initial
-
+ git blame dos >actual &&
+ test_grep Initial actual
'
test_expect_success 'straight copy with -C' '
-
- git blame -C1 uno | grep Second
-
+ git blame -C1 uno >actual &&
+ test_grep Second actual
'
test_expect_success 'straight move with -C' '
-
- git blame -C1 dos | grep Initial
-
+ git blame -C1 dos >actual &&
+ test_grep Initial actual
'
test_expect_success 'straight copy with -C -C' '
-
- git blame -C -C1 uno | grep Initial
-
+ git blame -C -C1 uno >actual &&
+ test_grep Initial actual
'
test_expect_success 'straight move with -C -C' '
-
- git blame -C -C1 dos | grep Initial
-
+ git blame -C -C1 dos >actual &&
+ test_grep Initial actual
'
test_expect_success 'append without -C' '
-
- git blame -L2 tres | grep Second
-
+ git blame -L2 tres >actual &&
+ test_grep Second actual
'
test_expect_success 'append with -C' '
-
- git blame -L2 -C1 tres | grep Second
-
+ git blame -L2 -C1 tres >actual &&
+ test_grep Second actual
'
test_expect_success 'append with -C -C' '
-
- git blame -L2 -C -C1 tres | grep Second
-
+ git blame -L2 -C -C1 tres >actual &&
+ test_grep Second actual
'
test_expect_success 'append with -C -C -C' '
-
- git blame -L2 -C -C -C1 tres | grep Initial
-
+ git blame -L2 -C -C -C1 tres >actual &&
+ test_grep Initial actual
'
test_expect_success 'blame wholesale copy' '
-
- git blame -f -C -C1 HEAD^ -- cow | sed -e "$pick_fc" >current &&
+ git blame -f -C -C1 HEAD^ -- cow >actual &&
+ sed -e "$pick_fc" actual >current &&
cat >expected <<-\EOF &&
mouse-Initial
mouse-Second
mouse-Third
EOF
test_cmp expected current
-
'
test_expect_success 'blame wholesale copy and more' '
-
- git blame -f -C -C1 HEAD -- cow | sed -e "$pick_fc" >current &&
+ git blame -f -C -C1 HEAD -- cow >actual &&
+ sed -e "$pick_fc" actual >current &&
cat >expected <<-\EOF &&
mouse-Initial
mouse-Second
@@ -130,11 +119,9 @@ test_expect_success 'blame wholesale copy and more' '
mouse-Third
EOF
test_cmp expected current
-
'
test_expect_success 'blame wholesale copy and more in the index' '
-
cat >horse <<-\EOF &&
ABC
DEF
@@ -144,7 +131,8 @@ test_expect_success 'blame wholesale copy and more in the index' '
EOF
git add horse &&
test_when_finished "git rm -f horse" &&
- git blame -f -C -C1 -- horse | sed -e "$pick_fc" >current &&
+ git blame -f -C -C1 -- horse >actual &&
+ sed -e "$pick_fc" actual >current &&
cat >expected <<-\EOF &&
mouse-Initial
mouse-Second
@@ -153,11 +141,9 @@ test_expect_success 'blame wholesale copy and more in the index' '
mouse-Third
EOF
test_cmp expected current
-
'
test_expect_success 'blame during cherry-pick with file rename conflict' '
-
test_when_finished "git reset --hard && git checkout main" &&
git checkout HEAD~3 &&
echo MOUSE >> mouse &&
@@ -168,7 +154,8 @@ test_expect_success 'blame during cherry-pick with file rename conflict' '
(git cherry-pick HEAD@{1} || test $? -eq 1) &&
git show HEAD@{1}:rodent > rodent &&
git add rodent &&
- git blame -f -C -C1 rodent | sed -e "$pick_fc" >current &&
+ git blame -f -C -C1 rodent >actual &&
+ sed -e "$pick_fc" actual >current &&
cat >expected <<-\EOF &&
mouse-Initial
mouse-Second
@@ -246,14 +233,14 @@ test_expect_success 'setup file with CRLF newlines' '
test_expect_success 'blame file with CRLF core.autocrlf true' '
git config core.autocrlf true &&
git blame crlffile >actual &&
- grep "A U Thor" actual
+ test_grep "A U Thor" actual
'
test_expect_success 'blame file with CRLF attributes text' '
git config core.autocrlf false &&
echo "crlffile text" >.gitattributes &&
git blame crlffile >actual &&
- grep "A U Thor" actual
+ test_grep "A U Thor" actual
'
test_expect_success 'blame file with CRLF core.autocrlf=true' '
@@ -267,7 +254,7 @@ test_expect_success 'blame file with CRLF core.autocrlf=true' '
git checkout crlfinrepo &&
rm tmp &&
git blame crlfinrepo >actual &&
- grep "A U Thor" actual
+ test_grep "A U Thor" actual
'
test_expect_success 'setup coalesce tests' '
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index 24f6c76aee..e7ab645a3d 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -1649,7 +1649,9 @@ test_expect_success $PREREQ 'To headers from files reset each patch' '
'
test_expect_success $PREREQ 'setup expect' '
-cat >email-using-8bit <<\EOF
+# NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+# that contain multibyte chars.
+cat >email-using-8bit <<EOF
From fe6ecc66ece37198fe5db91fa2fc41d9f4fe5cc4 Mon Sep 17 00:00:00 2001
Message-ID: <bogus-message-id@example.com>
From: author@example.com
@@ -1735,7 +1737,9 @@ test_expect_success $PREREQ '--8bit-encoding overrides sendemail.8bitEncoding' '
'
test_expect_success $PREREQ 'setup expect' '
- cat >email-using-8bit <<-\EOF
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ cat >email-using-8bit <<-EOF
From fe6ecc66ece37198fe5db91fa2fc41d9f4fe5cc4 Mon Sep 17 00:00:00 2001
Message-ID: <bogus-message-id@example.com>
From: author@example.com
@@ -1764,7 +1768,9 @@ test_expect_success $PREREQ '--8bit-encoding also treats subject' '
'
test_expect_success $PREREQ 'setup expect' '
- cat >email-using-8bit <<-\EOF
+ # NOTE: do not quote this heredoc, Dash 0.5.13 has a bug with heredocs
+ # that contain multibyte chars.
+ cat >email-using-8bit <<-EOF
From fe6ecc66ece37198fe5db91fa2fc41d9f4fe5cc4 Mon Sep 17 00:00:00 2001
Message-ID: <bogus-message-id@example.com>
From: A U Thor <author@example.com>
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index 5685cce6fe..479437760b 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -3635,25 +3635,21 @@ background_import_then_checkpoint () {
echo "progress checkpoint"
) >&8 &
- error=1 ;# assume the worst
- while read output <&9
- do
- if test "$output" = "progress checkpoint"
- then
- error=0
- break
- elif test "$output" = "UNEXPECTED"
- then
- break
- fi
- # otherwise ignore cruft
- echo >&2 "cruft: $output"
- done
+ last=$(
+ while read output <&9
+ do
+ if test "$output" = "progress checkpoint" || test "$output" = "UNEXPECTED"
+ then
+ echo "$output"
+ break
+ else
+ # otherwise ignore cruft
+ echo >&2 "cruft: $output"
+ fi
+ done
+ )
- if test $error -eq 1
- then
- false
- fi
+ test "$last" = "progress checkpoint"
}
background_import_still_running () {
diff --git a/t/t9305-fast-import-signatures.sh b/t/t9305-fast-import-signatures.sh
index 18707b3f6c..5667693afd 100755
--- a/t/t9305-fast-import-signatures.sh
+++ b/t/t9305-fast-import-signatures.sh
@@ -103,7 +103,7 @@ test_expect_success RUST,GPG 'strip both OpenPGP signatures with --signed-commit
test_line_count = 2 out
'
-for mode in strip-if-invalid sign-if-invalid
+for mode in strip-if-invalid sign-if-invalid abort-if-invalid
do
test_expect_success GPG "import commit with no signature with --signed-commits=$mode" '
git fast-export main >output &&
@@ -135,6 +135,14 @@ do
# corresponding `data <length>` command would have to be changed too.
sed "s/OpenPGP signed commit/OpenPGP forged commit/" output >modified &&
+ if test "$mode" = abort-if-invalid
+ then
+ test_must_fail git -C new fast-import --quiet \
+ --signed-commits=$mode <modified >log 2>&1 &&
+ test_grep "aborting due to invalid signature" log &&
+ return 0
+ fi &&
+
git -C new fast-import --quiet --signed-commits=$mode <modified >log 2>&1 &&
IMPORTED=$(git -C new rev-parse --verify refs/heads/openpgp-signing) &&
diff --git a/t/t9306-fast-import-signed-tags.sh b/t/t9306-fast-import-signed-tags.sh
index 363619e7d1..ec2b241cdb 100755
--- a/t/t9306-fast-import-signed-tags.sh
+++ b/t/t9306-fast-import-signed-tags.sh
@@ -77,4 +77,122 @@ test_expect_success GPGSSH 'import SSH signed tag with --signed-tags=strip' '
test_grep ! "SSH SIGNATURE" out
'
+for mode in strip-if-invalid sign-if-invalid abort-if-invalid
+do
+ test_expect_success GPG "import tag with no signature with --signed-tags=$mode" '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ git fast-export --signed-tags=verbatim >output &&
+ git -C import fast-import --quiet --signed-tags=$mode <output >log 2>&1 &&
+ test_must_be_empty log
+ '
+
+ test_expect_success GPG "keep valid OpenPGP signature with --signed-tags=$mode" '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ git fast-export --signed-tags=verbatim openpgp-signed >output &&
+ git -C import fast-import --quiet --signed-tags=$mode <output >log 2>&1 &&
+ IMPORTED=$(git -C import rev-parse --verify refs/tags/openpgp-signed) &&
+ test $OPENPGP_SIGNED = $IMPORTED &&
+ git -C import cat-file tag "$IMPORTED" >actual &&
+ test_grep -E "^-----BEGIN PGP SIGNATURE-----" actual &&
+ test_must_be_empty log
+ '
+
+ test_expect_success GPG "handle signature invalidated by message change with --signed-tags=$mode" '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ git fast-export --signed-tags=verbatim openpgp-signed >output &&
+
+ # Change the tag message, which invalidates the signature. The tag
+ # message length should not change though, otherwise the corresponding
+ # `data <length>` command would have to be changed too.
+ sed "s/OpenPGP signed tag/OpenPGP forged tag/" output >modified &&
+
+ if test "$mode" = abort-if-invalid
+ then
+ test_must_fail git -C import fast-import --quiet \
+ --signed-tags=$mode <modified >log 2>&1 &&
+ test_grep "aborting due to invalid signature" log &&
+ return 0
+ fi &&
+
+ git -C import fast-import --quiet --signed-tags=$mode <modified >log 2>&1 &&
+
+ IMPORTED=$(git -C import rev-parse --verify refs/tags/openpgp-signed) &&
+ test $OPENPGP_SIGNED != $IMPORTED &&
+ git -C import cat-file tag "$IMPORTED" >actual &&
+
+ if test "$mode" = strip-if-invalid
+ then
+ test_grep ! -E "^-----BEGIN PGP SIGNATURE-----" actual
+ else
+ test_grep -E "^-----BEGIN PGP SIGNATURE-----" actual &&
+ git -C import verify-tag "$IMPORTED"
+ fi &&
+
+ test_must_be_empty log
+ '
+
+ test_expect_success GPGSM "keep valid X.509 signature with --signed-tags=$mode" '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ git fast-export --signed-tags=verbatim x509-signed >output &&
+ git -C import fast-import --quiet --signed-tags=$mode <output >log 2>&1 &&
+ IMPORTED=$(git -C import rev-parse --verify refs/tags/x509-signed) &&
+ test $X509_SIGNED = $IMPORTED &&
+ git -C import cat-file tag x509-signed >actual &&
+ test_grep -E "^-----BEGIN SIGNED MESSAGE-----" actual &&
+ test_must_be_empty log
+ '
+
+ test_expect_success GPGSSH "keep valid SSH signature with --signed-tags=$mode" '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ test_config -C import gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
+
+ git fast-export --signed-tags=verbatim ssh-signed >output &&
+ git -C import fast-import --quiet --signed-tags=$mode <output >log 2>&1 &&
+ IMPORTED=$(git -C import rev-parse --verify refs/tags/ssh-signed) &&
+ test $SSH_SIGNED = $IMPORTED &&
+ git -C import cat-file tag ssh-signed >actual &&
+ test_grep -E "^-----BEGIN SSH SIGNATURE-----" actual &&
+ test_must_be_empty log
+ '
+done
+
+test_expect_success GPGSSH 'sign invalid tag with explicit keyid' '
+ test_when_finished rm -rf import &&
+ git init import &&
+
+ git fast-export --signed-tags=verbatim ssh-signed >output &&
+
+ # Change the tag message, which invalidates the signature. The tag
+ # message length should not change though, otherwise the corresponding
+ # `data <length>` command would have to be changed too.
+ sed "s/SSH signed tag/SSH forged tag/" output >modified &&
+
+ # Configure the target repository with an invalid default signing key.
+ test_config -C import user.signingkey "not-a-real-key-id" &&
+ test_config -C import gpg.format ssh &&
+ test_config -C import gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
+ test_must_fail git -C import fast-import --quiet \
+ --signed-tags=sign-if-invalid <modified >/dev/null 2>&1 &&
+
+ # Import using explicitly provided signing key.
+ git -C import fast-import --quiet \
+ --signed-tags=sign-if-invalid="${GPGSSH_KEY_PRIMARY}" <modified &&
+
+ IMPORTED=$(git -C import rev-parse --verify refs/tags/ssh-signed) &&
+ test $SSH_SIGNED != $IMPORTED &&
+ git -C import cat-file tag "$IMPORTED" >actual &&
+ test_grep -E "^-----BEGIN SSH SIGNATURE-----" actual &&
+ git -C import verify-tag "$IMPORTED"
+'
+
test_done
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 14e238d24d..f3af10fb7e 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -48,6 +48,9 @@ test_decode_color () {
if (n == 2) return "FAINT";
if (n == 3) return "ITALIC";
if (n == 7) return "REVERSE";
+ if (n == 22) return "NORMAL_INTENSITY";
+ if (n == 23) return "NOITALIC";
+ if (n == 27) return "NOREVERSE";
if (n == 30) return "BLACK";
if (n == 31) return "RED";
if (n == 32) return "GREEN";
diff --git a/t/unit-tests/u-oidtree.c b/t/unit-tests/u-oidtree.c
index e6eede2740..d4d05c7dc3 100644
--- a/t/unit-tests/u-oidtree.c
+++ b/t/unit-tests/u-oidtree.c
@@ -24,7 +24,7 @@ static int fill_tree_loc(struct oidtree *ot, const char *hexes[], size_t n)
return 0;
}
-static void check_contains(struct oidtree *ot, const char *hex, int expected)
+static void check_contains(struct oidtree *ot, const char *hex, bool expected)
{
struct object_id oid;
@@ -38,7 +38,7 @@ struct expected_hex_iter {
const char *query;
};
-static enum cb_next check_each_cb(const struct object_id *oid, void *data)
+static int check_each_cb(const struct object_id *oid, void *data)
{
struct expected_hex_iter *hex_iter = data;
struct object_id expected;
@@ -49,7 +49,7 @@ static enum cb_next check_each_cb(const struct object_id *oid, void *data)
&expected);
cl_assert_equal_s(oid_to_hex(oid), oid_to_hex(&expected));
hex_iter->i += 1;
- return CB_CONTINUE;
+ return 0;
}
LAST_ARG_MUST_BE_NULL
@@ -88,12 +88,12 @@ void test_oidtree__cleanup(void)
void test_oidtree__contains(void)
{
FILL_TREE(&ot, "444", "1", "2", "3", "4", "5", "a", "b", "c", "d", "e");
- check_contains(&ot, "44", 0);
- check_contains(&ot, "441", 0);
- check_contains(&ot, "440", 0);
- check_contains(&ot, "444", 1);
- check_contains(&ot, "4440", 1);
- check_contains(&ot, "4444", 0);
+ check_contains(&ot, "44", false);
+ check_contains(&ot, "441", false);
+ check_contains(&ot, "440", false);
+ check_contains(&ot, "444", true);
+ check_contains(&ot, "4440", true);
+ check_contains(&ot, "4444", false);
}
void test_oidtree__each(void)
diff --git a/tmp-objdir.c b/tmp-objdir.c
index e436eed07e..d199d39e7c 100644
--- a/tmp-objdir.c
+++ b/tmp-objdir.c
@@ -11,6 +11,7 @@
#include "strvec.h"
#include "quote.h"
#include "odb.h"
+#include "odb/source.h"
#include "repository.h"
struct tmp_objdir {
diff --git a/tools/README.md b/tools/README.md
new file mode 100644
index 0000000000..d732997136
--- /dev/null
+++ b/tools/README.md
@@ -0,0 +1,7 @@
+Developer Tooling
+-----------------
+
+This directory is expected to contain all sorts of tooling that
+relates to our build infrastructure. This includes scripts and
+inputs required by our build systems, but also scripts that
+developers are expected to run manually.
diff --git a/check-builtins.sh b/tools/check-builtins.sh
index a0aaf3a347..a0aaf3a347 100755
--- a/check-builtins.sh
+++ b/tools/check-builtins.sh
diff --git a/contrib/coccinelle/.gitignore b/tools/coccinelle/.gitignore
index 1d45c0a40c..1d45c0a40c 100644
--- a/contrib/coccinelle/.gitignore
+++ b/tools/coccinelle/.gitignore
diff --git a/contrib/coccinelle/README b/tools/coccinelle/README
index 055ad0e06a..fd0a543cc2 100644
--- a/contrib/coccinelle/README
+++ b/tools/coccinelle/README
@@ -38,7 +38,7 @@ that might be useful to developers.
So to aid these large scale refactorings, semantic patches can be used.
However we do not want to store them in the same place as the checks for
bad patterns, as then automated builds would fail.
- That is why semantic patches 'contrib/coccinelle/*.pending.cocci'
+ That is why semantic patches 'tools/coccinelle/*.pending.cocci'
are ignored for checks, and can be applied using 'make coccicheck-pending'.
This allows to expose plans of pending large scale refactorings without
diff --git a/contrib/coccinelle/array.cocci b/tools/coccinelle/array.cocci
index e71baea00b..e71baea00b 100644
--- a/contrib/coccinelle/array.cocci
+++ b/tools/coccinelle/array.cocci
diff --git a/contrib/coccinelle/commit.cocci b/tools/coccinelle/commit.cocci
index 42725161e9..42725161e9 100644
--- a/contrib/coccinelle/commit.cocci
+++ b/tools/coccinelle/commit.cocci
diff --git a/contrib/coccinelle/config_fn_ctx.pending.cocci b/tools/coccinelle/config_fn_ctx.pending.cocci
index 54f09fcbcd..54f09fcbcd 100644
--- a/contrib/coccinelle/config_fn_ctx.pending.cocci
+++ b/tools/coccinelle/config_fn_ctx.pending.cocci
diff --git a/contrib/coccinelle/equals-null.cocci b/tools/coccinelle/equals-null.cocci
index 92c7054013..92c7054013 100644
--- a/contrib/coccinelle/equals-null.cocci
+++ b/tools/coccinelle/equals-null.cocci
diff --git a/contrib/coccinelle/flex_alloc.cocci b/tools/coccinelle/flex_alloc.cocci
index e9f7f6d861..e9f7f6d861 100644
--- a/contrib/coccinelle/flex_alloc.cocci
+++ b/tools/coccinelle/flex_alloc.cocci
diff --git a/contrib/coccinelle/free.cocci b/tools/coccinelle/free.cocci
index 03799e1908..03799e1908 100644
--- a/contrib/coccinelle/free.cocci
+++ b/tools/coccinelle/free.cocci
diff --git a/contrib/coccinelle/git_config_number.cocci b/tools/coccinelle/git_config_number.cocci
index 7b57dceefe..7b57dceefe 100644
--- a/contrib/coccinelle/git_config_number.cocci
+++ b/tools/coccinelle/git_config_number.cocci
diff --git a/contrib/coccinelle/hashmap.cocci b/tools/coccinelle/hashmap.cocci
index c5dbb4557b..c5dbb4557b 100644
--- a/contrib/coccinelle/hashmap.cocci
+++ b/tools/coccinelle/hashmap.cocci
diff --git a/contrib/coccinelle/index-compatibility.cocci b/tools/coccinelle/index-compatibility.cocci
index 31e36cf3c4..31e36cf3c4 100644
--- a/contrib/coccinelle/index-compatibility.cocci
+++ b/tools/coccinelle/index-compatibility.cocci
diff --git a/contrib/coccinelle/meson.build b/tools/coccinelle/meson.build
index ae7f5b5460..ae7f5b5460 100644
--- a/contrib/coccinelle/meson.build
+++ b/tools/coccinelle/meson.build
diff --git a/contrib/coccinelle/object_id.cocci b/tools/coccinelle/object_id.cocci
index 01f8d6935b..01f8d6935b 100644
--- a/contrib/coccinelle/object_id.cocci
+++ b/tools/coccinelle/object_id.cocci
diff --git a/contrib/coccinelle/preincr.cocci b/tools/coccinelle/preincr.cocci
index ae42cb0730..ae42cb0730 100644
--- a/contrib/coccinelle/preincr.cocci
+++ b/tools/coccinelle/preincr.cocci
diff --git a/contrib/coccinelle/qsort.cocci b/tools/coccinelle/qsort.cocci
index 22b93a9966..22b93a9966 100644
--- a/contrib/coccinelle/qsort.cocci
+++ b/tools/coccinelle/qsort.cocci
diff --git a/contrib/coccinelle/refs.cocci b/tools/coccinelle/refs.cocci
index 31d9cad8f3..31d9cad8f3 100644
--- a/contrib/coccinelle/refs.cocci
+++ b/tools/coccinelle/refs.cocci
diff --git a/contrib/coccinelle/spatchcache b/tools/coccinelle/spatchcache
index 29e9352d8a..efbcbc3827 100755
--- a/contrib/coccinelle/spatchcache
+++ b/tools/coccinelle/spatchcache
@@ -30,7 +30,7 @@
# out of control.
#
# This along with the general incremental "make" support for
-# "contrib/coccinelle" makes it viable to (re-)run coccicheck
+# "tools/coccinelle" makes it viable to (re-)run coccicheck
# e.g. when merging integration branches.
#
# Note that the "--very-quiet" flag is currently critical. The cache
@@ -42,7 +42,7 @@
# to change, so just supply "--very-quiet" for now.
#
# To use this, simply set SPATCH to
-# contrib/coccinelle/spatchcache. Then optionally set:
+# tools/coccinelle/spatchcache. Then optionally set:
#
# [spatchCache]
# # Optional: path to a custom spatch
@@ -65,7 +65,7 @@
#
# redis-cli FLUSHALL
# <make && make coccicheck, as above>
-# grep -hore HIT -e MISS -e SET -e NOCACHE -e CANTCACHE .build/contrib/coccinelle | sort | uniq -c
+# grep -hore HIT -e MISS -e SET -e NOCACHE -e CANTCACHE .build/tools/coccinelle | sort | uniq -c
# 600 CANTCACHE
# 7365 MISS
# 7365 SET
diff --git a/contrib/coccinelle/strbuf.cocci b/tools/coccinelle/strbuf.cocci
index 83bd93be5f..f586128329 100644
--- a/contrib/coccinelle/strbuf.cocci
+++ b/tools/coccinelle/strbuf.cocci
@@ -71,3 +71,10 @@ identifier fn, param;
{
...
}
+
+// In modern codebase, .buf member of an empty strbuf is not NULL.
+@@
+struct strbuf SB;
+@@
+- SB.buf ? SB.buf : ""
++ SB.buf
diff --git a/tools/coccinelle/strvec.cocci b/tools/coccinelle/strvec.cocci
new file mode 100644
index 0000000000..64edb09f1c
--- /dev/null
+++ b/tools/coccinelle/strvec.cocci
@@ -0,0 +1,46 @@
+@@
+type T;
+identifier i;
+expression dst;
+struct strvec *src_ptr;
+struct strvec src_arr;
+@@
+(
+- for (T i = 0; i < src_ptr->nr; i++) { strvec_push(dst, src_ptr->v[i]); }
++ strvec_pushv(dst, src_ptr->v);
+|
+- for (T i = 0; i < src_arr.nr; i++) { strvec_push(dst, src_arr.v[i]); }
++ strvec_pushv(dst, src_arr.v);
+)
+
+@ separate_loop_index @
+type T;
+identifier i;
+expression dst;
+struct strvec *src_ptr;
+struct strvec src_arr;
+@@
+ T i;
+ ...
+(
+- for (i = 0; i < src_ptr->nr; i++) { strvec_push(dst, src_ptr->v[i]); }
++ strvec_pushv(dst, src_ptr->v);
+|
+- for (i = 0; i < src_arr.nr; i++) { strvec_push(dst, src_arr.v[i]); }
++ strvec_pushv(dst, src_arr.v);
+)
+
+@ unused_loop_index extends separate_loop_index @
+@@
+ {
+ ...
+- T i;
+ ... when != i
+ }
+
+@ depends on unused_loop_index @
+@@
+ if (...)
+- {
+ strvec_pushv(...);
+- }
diff --git a/contrib/coccinelle/swap.cocci b/tools/coccinelle/swap.cocci
index 522177afb6..522177afb6 100644
--- a/contrib/coccinelle/swap.cocci
+++ b/tools/coccinelle/swap.cocci
diff --git a/contrib/coccinelle/tests/free.c b/tools/coccinelle/tests/free.c
index 96d4abc0c7..96d4abc0c7 100644
--- a/contrib/coccinelle/tests/free.c
+++ b/tools/coccinelle/tests/free.c
diff --git a/contrib/coccinelle/tests/free.res b/tools/coccinelle/tests/free.res
index f90fd9f48e..f90fd9f48e 100644
--- a/contrib/coccinelle/tests/free.res
+++ b/tools/coccinelle/tests/free.res
diff --git a/contrib/coccinelle/the_repository.cocci b/tools/coccinelle/the_repository.cocci
index f1129f7985..f1129f7985 100644
--- a/contrib/coccinelle/the_repository.cocci
+++ b/tools/coccinelle/the_repository.cocci
diff --git a/contrib/coccinelle/xcalloc.cocci b/tools/coccinelle/xcalloc.cocci
index c291011607..c291011607 100644
--- a/contrib/coccinelle/xcalloc.cocci
+++ b/tools/coccinelle/xcalloc.cocci
diff --git a/contrib/coccinelle/xopen.cocci b/tools/coccinelle/xopen.cocci
index b71db67019..b71db67019 100644
--- a/contrib/coccinelle/xopen.cocci
+++ b/tools/coccinelle/xopen.cocci
diff --git a/contrib/coccinelle/xstrdup_or_null.cocci b/tools/coccinelle/xstrdup_or_null.cocci
index 9c1d2939b6..9c1d2939b6 100644
--- a/contrib/coccinelle/xstrdup_or_null.cocci
+++ b/tools/coccinelle/xstrdup_or_null.cocci
diff --git a/contrib/coccinelle/xstrncmpz.cocci b/tools/coccinelle/xstrncmpz.cocci
index ccb39e2bc0..ccb39e2bc0 100644
--- a/contrib/coccinelle/xstrncmpz.cocci
+++ b/tools/coccinelle/xstrncmpz.cocci
diff --git a/contrib/coverage-diff.sh b/tools/coverage-diff.sh
index 6ce9603568..6ce9603568 100755
--- a/contrib/coverage-diff.sh
+++ b/tools/coverage-diff.sh
diff --git a/detect-compiler b/tools/detect-compiler
index 124ebdd4c9..124ebdd4c9 100755
--- a/detect-compiler
+++ b/tools/detect-compiler
diff --git a/generate-cmdlist.sh b/tools/generate-cmdlist.sh
index 0ed39c4c5d..0ed39c4c5d 100755
--- a/generate-cmdlist.sh
+++ b/tools/generate-cmdlist.sh
diff --git a/generate-configlist.sh b/tools/generate-configlist.sh
index e28054f9e0..e28054f9e0 100755
--- a/generate-configlist.sh
+++ b/tools/generate-configlist.sh
diff --git a/generate-hooklist.sh b/tools/generate-hooklist.sh
index e0cdf26944..e0cdf26944 100755
--- a/generate-hooklist.sh
+++ b/tools/generate-hooklist.sh
diff --git a/generate-perl.sh b/tools/generate-perl.sh
index 796d835932..796d835932 100755
--- a/generate-perl.sh
+++ b/tools/generate-perl.sh
diff --git a/generate-python.sh b/tools/generate-python.sh
index 31ac115689..31ac115689 100755
--- a/generate-python.sh
+++ b/tools/generate-python.sh
diff --git a/generate-script.sh b/tools/generate-script.sh
index a149e4f0ba..a149e4f0ba 100755
--- a/generate-script.sh
+++ b/tools/generate-script.sh
diff --git a/tools/meson.build b/tools/meson.build
new file mode 100644
index 0000000000..f731f74312
--- /dev/null
+++ b/tools/meson.build
@@ -0,0 +1 @@
+subdir('coccinelle')
diff --git a/tools/precompiled.h b/tools/precompiled.h
new file mode 100644
index 0000000000..b2bec0d2b4
--- /dev/null
+++ b/tools/precompiled.h
@@ -0,0 +1 @@
+#include "git-compat-util.h"
diff --git a/contrib/update-unicode/.gitignore b/tools/update-unicode/.gitignore
index b0ebc6aad2..b0ebc6aad2 100644
--- a/contrib/update-unicode/.gitignore
+++ b/tools/update-unicode/.gitignore
diff --git a/contrib/update-unicode/README b/tools/update-unicode/README
index 151a197041..151a197041 100644
--- a/contrib/update-unicode/README
+++ b/tools/update-unicode/README
diff --git a/contrib/update-unicode/update_unicode.sh b/tools/update-unicode/update_unicode.sh
index aa90865bef..aa90865bef 100755
--- a/contrib/update-unicode/update_unicode.sh
+++ b/tools/update-unicode/update_unicode.sh
diff --git a/trailer.c b/trailer.c
index ca8abd1882..470f86a4a2 100644
--- a/trailer.c
+++ b/trailer.c
@@ -1041,7 +1041,7 @@ static struct trailer_block *trailer_block_get(const struct process_trailer_opti
for (ptr = trailer_lines; *ptr; ptr++) {
if (last && isspace((*ptr)->buf[0])) {
struct strbuf sb = STRBUF_INIT;
- strbuf_attach(&sb, *last, strlen(*last), strlen(*last));
+ strbuf_attach(&sb, *last, strlen(*last), strlen(*last) + 1);
strbuf_addbuf(&sb, *ptr);
*last = strbuf_detach(&sb, NULL);
continue;
diff --git a/transport-helper.c b/transport-helper.c
index 4d95d84f9e..570d7c6439 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -154,6 +154,8 @@ static struct child_process *get_helper(struct transport *transport)
helper->trace2_child_class = helper->args.v[0]; /* "remote-<name>" */
+ helper->clean_on_exit = 1;
+ helper->wait_after_clean = 1;
code = start_command(helper);
if (code < 0 && errno == ENOENT)
die(_("unable to find remote helper for '%s'"), data->name);
diff --git a/transport.c b/transport.c
index cb1befba8c..e53936d87b 100644
--- a/transport.c
+++ b/transport.c
@@ -1360,7 +1360,8 @@ static int pre_push_hook_feed_stdin(int hook_stdin_fd, void *pp_cb UNUSED, void
static void *pre_push_hook_data_alloc(void *feed_pipe_ctx)
{
- struct feed_pre_push_hook_data *data = xmalloc(sizeof(*data));
+ struct feed_pre_push_hook_data *data;
+ CALLOC_ARRAY(data, 1);
strbuf_init(&data->buf, 0);
data->refs = (struct ref *)feed_pipe_ctx;
return data;
diff --git a/walker.c b/walker.c
index 91332539d3..e98eb6da53 100644
--- a/walker.c
+++ b/walker.c
@@ -155,7 +155,7 @@ static int process(struct walker *walker, struct object *obj)
obj->flags |= SEEN;
if (odb_has_object(the_repository->objects, &obj->oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ ODB_HAS_OBJECT_RECHECK_PACKED | ODB_HAS_OBJECT_FETCH_PROMISOR)) {
/* We already have it, so we should scan it now. */
obj->flags |= TO_SCAN;
}
diff --git a/worktree.c b/worktree.c
index 56732f8f33..d874e23b4e 100644
--- a/worktree.c
+++ b/worktree.c
@@ -58,7 +58,7 @@ static void add_head_info(struct worktree *wt)
static int is_current_worktree(struct worktree *wt)
{
- char *git_dir = absolute_pathdup(repo_get_git_dir(the_repository));
+ char *git_dir = absolute_pathdup(repo_get_git_dir(wt->repo));
char *wt_git_dir = get_worktree_git_dir(wt);
int is_current = !fspathcmp(git_dir, absolute_path(wt_git_dir));
free(wt_git_dir);
@@ -78,7 +78,7 @@ struct worktree *get_worktree_from_repository(struct repository *repo)
wt->is_bare = !repo->worktree;
if (fspathcmp(gitdir, commondir))
wt->id = xstrdup(find_last_dir_sep(gitdir) + 1);
- wt->is_current = is_current_worktree(wt);
+ wt->is_current = true;
add_head_info(wt);
free(gitdir);
@@ -227,11 +227,11 @@ struct worktree **get_worktrees_without_reading_head(void)
char *get_worktree_git_dir(const struct worktree *wt)
{
if (!wt)
- return xstrdup(repo_get_git_dir(the_repository));
+ BUG("%s() called with NULL worktree", __func__);
else if (!wt->id)
- return xstrdup(repo_get_common_dir(the_repository));
+ return xstrdup(repo_get_common_dir(wt->repo));
else
- return repo_common_path(the_repository, "worktrees/%s", wt->id);
+ return repo_common_path(wt->repo, "worktrees/%s", wt->id);
}
static struct worktree *find_worktree_by_suffix(struct worktree **list,
diff --git a/worktree.h b/worktree.h
index 026ef303e8..d19ec29dbb 100644
--- a/worktree.h
+++ b/worktree.h
@@ -16,7 +16,7 @@ struct worktree {
struct object_id head_oid;
int is_detached;
int is_bare;
- int is_current;
+ int is_current; /* does `path` match `repo->worktree` */
int lock_reason_valid; /* private */
int prune_reason_valid; /* private */
};
@@ -51,7 +51,6 @@ int submodule_uses_worktrees(const char *path);
/*
* Return git dir of the worktree. Note that the path may be relative.
- * If wt is NULL, git dir of current worktree is returned.
*/
char *get_worktree_git_dir(const struct worktree *wt);