]> git.ipfire.org Git - thirdparty/git.git/commitdiff
Merge branch 'jk/proto-v2-ref-prefix-fix'
authorJunio C Hamano <gitster@pobox.com>
Thu, 15 Sep 2022 23:09:47 +0000 (16:09 -0700)
committerJunio C Hamano <gitster@pobox.com>
Thu, 15 Sep 2022 23:09:47 +0000 (16:09 -0700)
"git fetch" over protocol v2 sent an incorrect ref prefix request
to the server and made "git pull" with configured fetch refspec
that does not cover the remote branch to merge with fail, which has
been corrected.

* jk/proto-v2-ref-prefix-fix:
  fetch: add branch.*.merge to default ref-prefix extension
  fetch: stop checking for NULL transport->remote in do_fetch()

582 files changed:
.github/workflows/main.yml
.gitignore
Documentation/CodingGuidelines
Documentation/Makefile
Documentation/RelNotes/2.37.4.txt [new file with mode: 0644]
Documentation/RelNotes/2.38.0.txt [new file with mode: 0644]
Documentation/config/core.txt
Documentation/config/diff.txt
Documentation/config/difftool.txt
Documentation/config/format.txt
Documentation/config/grep.txt
Documentation/config/log.txt
Documentation/config/lsrefs.txt
Documentation/config/notes.txt
Documentation/config/pack.txt
Documentation/config/protocol.txt
Documentation/config/push.txt
Documentation/config/rebase.txt
Documentation/config/safe.txt
Documentation/config/sendemail.txt
Documentation/config/transfer.txt
Documentation/config/uploadpack.txt
Documentation/git-add.txt
Documentation/git-am.txt
Documentation/git-apply.txt
Documentation/git-archive.txt
Documentation/git-blame.txt
Documentation/git-branch.txt
Documentation/git-bugreport.txt
Documentation/git-bundle.txt
Documentation/git-cat-file.txt
Documentation/git-checkout.txt
Documentation/git-cherry-pick.txt
Documentation/git-clean.txt
Documentation/git-clone.txt
Documentation/git-column.txt
Documentation/git-commit-graph.txt
Documentation/git-commit.txt
Documentation/git-config.txt
Documentation/git-diagnose.txt [new file with mode: 0644]
Documentation/git-diff.txt
Documentation/git-difftool.txt
Documentation/git-fast-import.txt
Documentation/git-fetch.txt
Documentation/git-format-patch.txt
Documentation/git-fsck.txt
Documentation/git-gc.txt
Documentation/git-grep.txt
Documentation/git-help.txt
Documentation/git-imap-send.txt
Documentation/git-init.txt
Documentation/git-interpret-trailers.txt
Documentation/git-log.txt
Documentation/git-ls-files.txt
Documentation/git-mailinfo.txt
Documentation/git-maintenance.txt
Documentation/git-merge-tree.txt
Documentation/git-merge.txt
Documentation/git-mergetool.txt
Documentation/git-multi-pack-index.txt
Documentation/git-notes.txt
Documentation/git-push.txt
Documentation/git-range-diff.txt
Documentation/git-rebase.txt
Documentation/git-revert.txt
Documentation/git-send-email.txt
Documentation/git-show-branch.txt
Documentation/git-stash.txt
Documentation/git-switch.txt
Documentation/git-update-index.txt
Documentation/git-upload-pack.txt
Documentation/git.txt
Documentation/gitformat-bundle.txt [moved from Documentation/technical/bundle-format.txt with 79% similarity]
Documentation/gitformat-chunk.txt [moved from Documentation/technical/chunk-format.txt with 89% similarity]
Documentation/gitformat-commit-graph.txt [moved from Documentation/technical/commit-graph-format.txt with 87% similarity]
Documentation/gitformat-index.txt [moved from Documentation/technical/index-format.txt with 98% similarity]
Documentation/gitformat-pack.txt [moved from Documentation/technical/pack-format.txt with 72% similarity]
Documentation/gitformat-signature.txt [moved from Documentation/technical/signature-format.txt with 96% similarity]
Documentation/gitprotocol-capabilities.txt [moved from Documentation/technical/protocol-capabilities.txt with 96% similarity]
Documentation/gitprotocol-common.txt [moved from Documentation/technical/protocol-common.txt with 89% similarity]
Documentation/gitprotocol-http.txt [moved from Documentation/technical/http-protocol.txt with 97% similarity]
Documentation/gitprotocol-pack.txt [moved from Documentation/technical/pack-protocol.txt with 98% similarity]
Documentation/gitprotocol-v2.txt [moved from Documentation/technical/protocol-v2.txt with 97% similarity]
Documentation/gitremote-helpers.txt
Documentation/howto/recover-corrupted-object-harder.txt
Documentation/includes/cmd-config-section-all.txt [new file with mode: 0644]
Documentation/includes/cmd-config-section-rest.txt [new file with mode: 0644]
Documentation/lint-man-section-order.perl
Documentation/rerere-options.txt [new file with mode: 0644]
Documentation/rev-list-options.txt
Documentation/technical/api-parse-options.txt
Documentation/technical/api-simple-ipc.txt
Documentation/technical/api-trace2.txt
Documentation/technical/bitmap-format.txt
Documentation/technical/bundle-uri.txt [new file with mode: 0644]
Documentation/technical/cruft-packs.txt [deleted file]
Documentation/technical/hash-function-transition.txt
Documentation/technical/long-running-process-protocol.txt
Documentation/technical/packfile-uri.txt
Documentation/technical/partial-clone.txt
Documentation/technical/remembering-renames.txt
Documentation/technical/scalar.txt [new file with mode: 0644]
Documentation/user-manual.txt
GIT-VERSION-GEN
INSTALL
Makefile
RelNotes
add-interactive.c
add-patch.c
archive-tar.c
archive-zip.c
archive.c
archive.h
attr.c
bisect.c
blame.c
block-sha1/sha1.c
bloom.c
branch.c
branch.h
builtin.h
builtin/am.c
builtin/archive.c
builtin/bisect--helper.c
builtin/blame.c
builtin/branch.c
builtin/bugreport.c
builtin/bundle.c
builtin/cat-file.c
builtin/check-ref-format.c
builtin/checkout.c
builtin/clone.c
builtin/commit-graph.c
builtin/config.c
builtin/describe.c
builtin/diagnose.c [new file with mode: 0644]
builtin/difftool.c
builtin/env--helper.c
builtin/fast-export.c
builtin/fast-import.c
builtin/fetch.c
builtin/fsck.c
builtin/gc.c
builtin/grep.c
builtin/help.c
builtin/hook.c
builtin/log.c
builtin/ls-files.c
builtin/ls-tree.c
builtin/merge-file.c
builtin/merge-tree.c
builtin/merge.c
builtin/multi-pack-index.c
builtin/mv.c
builtin/name-rev.c
builtin/notes.c
builtin/pack-objects.c
builtin/pull.c
builtin/range-diff.c
builtin/rebase.c
builtin/receive-pack.c
builtin/reflog.c
builtin/remote.c
builtin/repack.c
builtin/replace.c
builtin/reset.c
builtin/rev-list.c
builtin/rev-parse.c
builtin/revert.c
builtin/rm.c
builtin/shortlog.c
builtin/show-branch.c
builtin/show-ref.c
builtin/sparse-checkout.c
builtin/stash.c
builtin/submodule--helper.c
builtin/symbolic-ref.c
builtin/unpack-objects.c
builtin/worktree.c
bulk-checkin.c
bundle-uri.c [new file with mode: 0644]
bundle-uri.h [new file with mode: 0644]
cache-tree.c
cache.h
ci/lib.sh
color.c
command-list.txt
commit-graph.c
commit-graph.h
commit.c
compat/disk.h [new file with mode: 0644]
compat/fsmonitor/fsm-settings-win32.c
compat/nonblock.c [new file with mode: 0644]
compat/nonblock.h [new file with mode: 0644]
compat/terminal.c
config.c
config.h
configure.ac
contrib/coccinelle/array.cocci
contrib/coccinelle/tests/free.c [new file with mode: 0644]
contrib/coccinelle/tests/free.res [new file with mode: 0644]
contrib/coccinelle/tests/unused.c [new file with mode: 0644]
contrib/coccinelle/tests/unused.res [new file with mode: 0644]
contrib/coccinelle/unused.cocci [new file with mode: 0644]
contrib/completion/git-prompt.sh
contrib/credential/netrc/t-git-credential-netrc.sh
contrib/scalar/README.md [deleted file]
contrib/scalar/scalar.c
contrib/scalar/scalar.txt
contrib/scalar/t/Makefile
contrib/scalar/t/t9099-scalar.sh
contrib/subtree/git-subtree.sh
contrib/subtree/t/Makefile
convert.c
delta-islands.c
diagnose.c [new file with mode: 0644]
diagnose.h [new file with mode: 0644]
diff-no-index.c
diff.c
dir.c
dir.h
environment.c
fetch-pack.c
fuzz-commit-graph.c
git-compat-util.h
git-instaweb.sh
git-merge-resolve.sh
git-p4.py
git-sh-setup.sh
git-submodule.sh
git.c
gitweb/Makefile
gitweb/gitweb.perl
gpg-interface.c
gpg-interface.h
grep.c
grep.h
hash.h
hashmap.c
help.c
help.h
http-backend.c
http.c
http.h
ident.c
list-objects-filter-options.c
ll-merge.c
log-tree.c
ls-refs.c
merge-ort-wrappers.c
merge-ort.c
merge-ort.h
merge-recursive.c
mergesort.c [deleted file]
mergesort.h
midx.c
midx.h
name-hash.c
negotiator/default.c
negotiator/skipping.c
notes.c
object-file.c
object-name.c
object-store.h
object.c
object.h
oidmap.c
pack-bitmap-write.c
pack-bitmap.c
pack-bitmap.h
pack-revindex.h
packfile.c
pager.c
parse-options.c
parse-options.h
patch-ids.c
pathspec.c
pathspec.h
pkt-line.c
pkt-line.h
ppc/sha1.c [deleted file]
ppc/sha1.h [deleted file]
ppc/sha1ppc.S [deleted file]
preload-index.c
pretty.c
promisor-remote.c
range-diff.c
rebase-interactive.c
ref-filter.c
reflog.c
refs.c
refs.h
refs/files-backend.c
refs/iterator.c
refs/packed-backend.c
refspec.h
reftable/reader.c
remote-curl.c
remote.c
remote.h
replace-object.c
repo-settings.c
repository.h
revision.c
revision.h
run-command.c
send-pack.c
send-pack.h
sequencer.c
sequencer.h
server-info.c
setup.c
sha256/nettle.h [new file with mode: 0644]
shallow.c
shared.mak
strbuf.c
streaming.c
strmap.c
sub-process.c
submodule-config.c
submodule.c
submodule.h
t/Makefile
t/README
t/annotate-tests.sh
t/helper/test-bloom.c
t/helper/test-config.c
t/helper/test-crontab.c
t/helper/test-delta.c
t/helper/test-dump-cache-tree.c
t/helper/test-fast-rebase.c
t/helper/test-hash.c
t/helper/test-json-writer.c
t/helper/test-mergesort.c
t/helper/test-parse-options.c
t/helper/test-path-utils.c
t/helper/test-ref-store.c
t/helper/test-regex.c
t/helper/test-rot13-filter.c [new file with mode: 0644]
t/helper/test-scrap-cache-tree.c
t/helper/test-serve-v2.c
t/helper/test-submodule-config.c
t/helper/test-submodule.c [new file with mode: 0644]
t/helper/test-tool-utils.h [new file with mode: 0644]
t/helper/test-tool.c
t/helper/test-tool.h
t/helper/test-urlmatch-normalization.c
t/helper/test-userdiff.c
t/lib-bitmap.sh
t/lib-perl.sh [new file with mode: 0644]
t/lib-rebase.sh
t/lib-submodule-update.sh
t/perf/lib-bitmap.sh
t/perf/p0004-lazy-init-name-hash.sh
t/perf/p0006-read-tree-checkout.sh
t/perf/p0071-sort.sh
t/perf/p2000-sparse-operations.sh
t/perf/p5310-pack-bitmaps.sh
t/perf/p5311-pack-bitmaps-fetch.sh
t/perf/p5312-pack-bitmaps-revs.sh [new file with mode: 0755]
t/perf/p5326-multi-pack-bitmaps.sh
t/perf/p7527-builtin-fsmonitor.sh
t/t0000-basic.sh
t/t0002-gitfile.sh
t/t0003-attributes.sh
t/t0004-unwritable.sh
t/t0008-ignores.sh
t/t0012-help.sh
t/t0015-hash.sh
t/t0019-json-writer.sh
t/t0021-conversion.sh
t/t0021/rot13-filter.pl [deleted file]
t/t0027-auto-crlf.sh
t/t0028-working-tree-encoding.sh
t/t0032-reftable-unittest.sh
t/t0033-safe-directory.sh
t/t0035-safe-bare-repository.sh [new file with mode: 0755]
t/t0040-parse-options.sh
t/t0050-filesystem.sh
t/t0060-path-utils.sh
t/t0071-sort.sh
t/t0090-cache-tree.sh
t/t0091-bugreport.sh
t/t0092-diagnose.sh [new file with mode: 0755]
t/t0095-bloom.sh
t/t0110-urlmatch-normalization.sh
t/t0202-gettext-perl.sh
t/t0203-gettext-setlocale-sanity.sh
t/t0212/parse_events.perl
t/t1006-cat-file.sh
t/t1011-read-tree-sparse-checkout.sh
t/t1020-subdirectory.sh
t/t1051-large-conversion.sh
t/t1060-object-corruption.sh
t/t1090-sparse-checkout-scope.sh
t/t1092-sparse-checkout-compatibility.sh
t/t1301-shared-repo.sh
t/t1401-symbolic-ref.sh
t/t1402-check-ref-format.sh
t/t1405-main-ref-store.sh
t/t1407-worktree-ref-store.sh
t/t1418-reflog-exists.sh
t/t1450-fsck.sh
t/t1500-rev-parse.sh
t/t1502-rev-parse-parseopt.sh
t/t1503-rev-parse-verify.sh
t/t1701-racy-split-index.sh
t/t2006-checkout-index-basic.sh
t/t2018-checkout-branch.sh
t/t2020-checkout-detach.sh
t/t2023-checkout-m.sh
t/t2080-parallel-checkout-basics.sh
t/t2082-parallel-checkout-attributes.sh
t/t2205-add-worktree-config.sh
t/t2400-worktree-add.sh
t/t2403-worktree-move.sh
t/t2407-worktree-heads.sh [new file with mode: 0755]
t/t3001-ls-files-others-exclude.sh
t/t3012-ls-files-dedup.sh
t/t3013-ls-files-format.sh [new file with mode: 0755]
t/t3206-range-diff.sh
t/t3301-notes.sh
t/t3304-notes-mixed.sh
t/t3305-notes-fanout.sh
t/t3307-notes-man.sh
t/t3404-rebase-interactive.sh
t/t3426-rebase-submodule.sh
t/t3507-cherry-pick-conflict.sh
t/t3701-add-interactive.sh
t/t3903-stash.sh
t/t3920-crlf-messages.sh
t/t4013-diff-various.sh
t/t4013/diff.log_--decorate=full_--all
t/t4013/diff.log_--decorate=full_--clear-decorations_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate=full_--decorate-all_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate_--all
t/t4013/diff.log_--decorate_--clear-decorations_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate_--decorate-all_--all [new file with mode: 0644]
t/t4014-format-patch.sh
t/t4017-diff-retval.sh
t/t4020-diff-external.sh
t/t4044-diff-index-unique-abbrev.sh
t/t4051-diff-function-context.sh
t/t4057-diff-combined-paths.sh
t/t4069-remerge-diff.sh
t/t4114-apply-typechange.sh
t/t4140-apply-ita.sh
t/t4202-log.sh
t/t4203-mailmap.sh
t/t4207-log-decoration-colors.sh
t/t4301-merge-tree-write-tree.sh [new file with mode: 0755]
t/t5000-tar-tree.sh
t/t5001-archive-attr.sh
t/t5002-archive-attr-pattern.sh
t/t5003-archive-zip.sh
t/t5303-pack-corruption-resilience.sh
t/t5308-pack-detect-duplicates.sh
t/t5309-pack-delta-cycles.sh
t/t5310-pack-bitmaps.sh
t/t5311-pack-bitmaps-shallow.sh
t/t5314-pack-cycle-detection.sh
t/t5315-pack-objects-compression.sh
t/t5318-commit-graph.sh
t/t5321-pack-large-objects.sh
t/t5326-multi-pack-bitmaps.sh
t/t5327-multi-pack-bitmaps-rev.sh
t/t5329-pack-objects-cruft.sh
t/t5351-unpack-large-objects.sh [new file with mode: 0755]
t/t5402-post-merge-hook.sh
t/t5500-fetch-pack.sh
t/t5503-tagfollow.sh
t/t5505-remote.sh
t/t5516-fetch-push.sh
t/t5524-pull-msg.sh
t/t5541-http-push-smart.sh
t/t5544-pack-objects-hook.sh
t/t5550-http-fetch-dumb.sh
t/t5551-http-fetch-smart.sh
t/t5557-http-get.sh [new file with mode: 0755]
t/t5558-clone-bundle-uri.sh [new file with mode: 0755]
t/t5601-clone.sh
t/t5606-clone-options.sh
t/t5616-partial-clone.sh
t/t5703-upload-pack-ref-in-want.sh
t/t6001-rev-list-graft.sh
t/t6008-rev-list-submodule.sh
t/t6019-rev-list-ancestry-path.sh
t/t6101-rev-parse-parents.sh
t/t6102-rev-list-unexpected-objects.sh
t/t6115-rev-list-du.sh
t/t6132-pathspec-exclude.sh
t/t6134-pathspec-in-submodule.sh
t/t6400-merge-df.sh
t/t6402-merge-rename.sh
t/t6403-merge-file.sh
t/t6404-recursive-merge.sh
t/t6405-merge-symlinks.sh
t/t6406-merge-attr.sh
t/t6407-merge-binary.sh
t/t6408-merge-up-to-date.sh
t/t6411-merge-filemode.sh
t/t6413-merge-crlf.sh
t/t6416-recursive-corner-cases.sh
t/t6417-merge-ours-theirs.sh
t/t6421-merge-partial-clone.sh
t/t6422-merge-rename-corner-cases.sh
t/t6423-merge-rename-directories.sh
t/t6424-merge-unrelated-index-changes.sh
t/t6425-merge-rename-delete.sh
t/t6426-merge-skip-unneeded-updates.sh
t/t6427-diff3-conflict-markers.sh
t/t6428-merge-conflicts-sparse.sh
t/t6429-merge-sequence-rename-caching.sh
t/t6431-merge-criscross.sh
t/t6435-merge-sparse.sh
t/t6437-submodule-merge.sh
t/t6439-merge-co-error-msgs.sh
t/t7002-mv-sparse-checkout.sh
t/t7007-show.sh
t/t7060-wtstatus.sh
t/t7062-wtstatus-ignorecase.sh
t/t7063-status-untracked-cache.sh
t/t7110-reset-merge.sh
t/t7111-reset-table.sh
t/t7400-submodule-basic.sh
t/t7401-submodule-summary.sh
t/t7402-submodule-rebase.sh
t/t7406-submodule-update.sh
t/t7412-submodule-absorbgitdirs.sh
t/t7413-submodule-is-active.sh
t/t7414-submodule-mistakes.sh
t/t7418-submodule-sparse-gitmodules.sh
t/t7419-submodule-set-branch.sh
t/t7450-bad-git-dotfiles.sh
t/t7503-pre-commit-and-pre-merge-commit-hooks.sh
t/t7506-status-submodule.sh
t/t7507-commit-verbose.sh
t/t7600-merge.sh
t/t7607-merge-state.sh [new file with mode: 0755]
t/t7609-mergetool--lib.sh
t/t7810-grep.sh
t/t7812-grep-icase-non-ascii.sh
t/t7814-grep-recurse-submodules.sh
t/t7900-maintenance.sh
t/t8001-annotate.sh
t/t8002-blame.sh
t/t8007-cat-file-textconv.sh
t/t8010-cat-file-filters.sh
t/t8012-blame-colors.sh
t/t9100-git-svn-basic.sh
t/t9101-git-svn-props.sh
t/t9104-git-svn-follow-parent.sh
t/t9122-git-svn-author.sh
t/t9132-git-svn-broken-symlink.sh
t/t9162-git-svn-dcommit-interactive.sh
t/t9301-fast-import-notes.sh
t/t9700-perl-git.sh
t/t9901-git-web--browse.sh
t/t9903-bash-prompt.sh
t/test-lib-functions.sh
t/test-lib.sh
tempfile.c
tempfile.h
trace2/tr2_tgt_event.c
trace2/tr2_tgt_normal.c
trace2/tr2_tgt_perf.c
trailer.c
transport.c
unpack-trees.c
upload-pack.c
walker.c
wrapper.c
wt-status.c
xdiff/xdiff.h
xdiff/xdiffi.c
xdiff/xdiffi.h
xdiff/xhistogram.c
xdiff/xmacros.h
xdiff/xpatience.c
xdiff/xprepare.c
xdiff/xutils.c
xdiff/xutils.h

index cd1f52692a53678fd74cb12ed469964fd5b7973c..831f4df56c51dc96b17465a5120d28bf11a63351 100644 (file)
@@ -309,7 +309,7 @@ jobs:
     if: needs.ci-config.outputs.enabled == 'yes'
     env:
       jobname: StaticAnalysis
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-22.04
     steps:
     - uses: actions/checkout@v2
     - run: ci/install-dependencies.sh
index a45221576418e77de53f2776912ad62cce00c85b..80b530bbed2c80814ac74956d329d277d85bba86 100644 (file)
@@ -53,6 +53,7 @@
 /git-cvsimport
 /git-cvsserver
 /git-daemon
+/git-diagnose
 /git-diff
 /git-diff-files
 /git-diff-index
 /git-worktree
 /git-write-tree
 /git-core-*/?*
+/git.res
 /gitweb/GITWEB-BUILD-OPTIONS
 /gitweb/gitweb.cgi
 /gitweb/static/gitweb.js
 *.hcc
 *.obj
 *.lib
-*.res
 *.sln
 *.sp
 *.suo
index 4c756be517acbfb0e18daf30bf65cb177dacef20..9fca21cc5f9958fdfd044c5d34a2c142c87de51a 100644 (file)
@@ -606,7 +606,7 @@ Writing Documentation:
     avoidance of gendered pronouns.
 
   - When it becomes awkward to stick to this style, prefer "you" when
-    addressing the the hypothetical user, and possibly "we" when
+    addressing the hypothetical user, and possibly "we" when
     discussing how the program might react to the user.  E.g.
 
       You can use this option instead of --xyz, but we might remove
index 4f801f4e4c9470c42ae7933a68970a4984bc54b5..849af6da307b412d8be1d78eb62235d27bcbe79c 100644 (file)
@@ -24,10 +24,21 @@ MAN1_TXT += gitweb.txt
 
 # man5 / man7 guides (note: new guides should also be added to command-list.txt)
 MAN5_TXT += gitattributes.txt
+MAN5_TXT += gitformat-bundle.txt
+MAN5_TXT += gitformat-chunk.txt
+MAN5_TXT += gitformat-commit-graph.txt
+MAN5_TXT += gitformat-index.txt
+MAN5_TXT += gitformat-pack.txt
+MAN5_TXT += gitformat-signature.txt
 MAN5_TXT += githooks.txt
 MAN5_TXT += gitignore.txt
 MAN5_TXT += gitmailmap.txt
 MAN5_TXT += gitmodules.txt
+MAN5_TXT += gitprotocol-capabilities.txt
+MAN5_TXT += gitprotocol-common.txt
+MAN5_TXT += gitprotocol-http.txt
+MAN5_TXT += gitprotocol-pack.txt
+MAN5_TXT += gitprotocol-v2.txt
 MAN5_TXT += gitrepository-layout.txt
 MAN5_TXT += gitweb.conf.txt
 
@@ -51,6 +62,7 @@ HOWTO_TXT += $(wildcard howto/*.txt)
 
 DOC_DEP_TXT += $(wildcard *.txt)
 DOC_DEP_TXT += $(wildcard config/*.txt)
+DOC_DEP_TXT += $(wildcard includes/*.txt)
 
 ifdef MAN_FILTER
 MAN_TXT = $(filter $(MAN_FILTER),$(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT))
@@ -95,26 +107,17 @@ TECH_DOCS += MyFirstObjectWalk
 TECH_DOCS += SubmittingPatches
 TECH_DOCS += ToolsForGit
 TECH_DOCS += technical/bitmap-format
-TECH_DOCS += technical/bundle-format
-TECH_DOCS += technical/cruft-packs
+TECH_DOCS += technical/bundle-uri
 TECH_DOCS += technical/hash-function-transition
-TECH_DOCS += technical/http-protocol
-TECH_DOCS += technical/index-format
 TECH_DOCS += technical/long-running-process-protocol
 TECH_DOCS += technical/multi-pack-index
-TECH_DOCS += technical/pack-format
 TECH_DOCS += technical/pack-heuristics
-TECH_DOCS += technical/pack-protocol
 TECH_DOCS += technical/parallel-checkout
 TECH_DOCS += technical/partial-clone
-TECH_DOCS += technical/protocol-capabilities
-TECH_DOCS += technical/protocol-common
-TECH_DOCS += technical/protocol-v2
 TECH_DOCS += technical/racy-git
 TECH_DOCS += technical/reftable
 TECH_DOCS += technical/send-pack-pipeline
 TECH_DOCS += technical/shallow
-TECH_DOCS += technical/signature-format
 TECH_DOCS += technical/trivial-merge
 SP_ARTICLES += $(TECH_DOCS)
 SP_ARTICLES += technical/api-index
@@ -290,6 +293,8 @@ cmds_txt = cmds-ancillaryinterrogators.txt \
        cmds-synchingrepositories.txt \
        cmds-synchelpers.txt \
        cmds-guide.txt \
+       cmds-developerinterfaces.txt \
+       cmds-userinterfaces.txt \
        cmds-purehelpers.txt \
        cmds-foreignscminterface.txt
 
diff --git a/Documentation/RelNotes/2.37.4.txt b/Documentation/RelNotes/2.37.4.txt
new file mode 100644 (file)
index 0000000..7321763
--- /dev/null
@@ -0,0 +1,31 @@
+Git 2.37.4 Release Notes
+========================
+
+This primarily is to backport various fixes accumulated on the 'master'
+front since 2.37.3.
+
+Fixes since v2.37.3
+-------------------
+
+ * An earlier optimization discarded a tree-object buffer that is
+   still in use, which has been corrected.
+
+ * Fix deadlocks between main Git process and subprocess spawned via
+   the pipe_command() API, that can kill "git add -p" that was
+   reimplemented in C recently.
+
+ * xcalloc(), imitating calloc(), takes "number of elements of the
+   array", and "size of a single element", in this order.  A call that
+   does not follow this ordering has been corrected.
+
+ * The preload-index codepath made copies of pathspec to give to
+   multiple threads, which were left leaked.
+
+ * Update the version of Ubuntu used for GitHub Actions CI from 18.04
+   to 22.04.
+
+ * The auto-stashed local changes created by "git merge --autostash"
+   was mixed into a conflicted state left in the working tree, which
+   has been corrected.
+
+Also contains other minor documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.38.0.txt b/Documentation/RelNotes/2.38.0.txt
new file mode 100644 (file)
index 0000000..fe04b31
--- /dev/null
@@ -0,0 +1,384 @@
+Git v2.38 Release Notes
+=======================
+
+UI, Workflows & Features
+
+ * "git remote show [-n] frotz" now pays attention to negative
+   pathspec.
+
+ * "git push" sometimes perform poorly when reachability bitmaps are
+   used, even in a repository where other operations are helped by
+   bitmaps.  The push.useBitmaps configuration variable is introduced
+   to allow disabling use of reachability bitmaps only for "git push".
+
+ * "git grep -m<max-hits>" is a way to limit the hits shown per file.
+
+ * "git merge-tree" learned a new mode where it takes two commits and
+   computes a tree that would result in the merge commit, if the
+   histories leading to these two commits were to be merged.
+
+ * "git mv A B" in a sparsely populated working tree can be asked to
+   move a path between directories that are "in cone" (i.e. expected
+   to be materialized in the working tree) and "out of cone"
+   (i.e. expected to be hidden).  The handling of such cases has been
+   improved.
+
+ * Earlier, HTTP transport clients learned to tell the server side
+   what locale they are in by sending Accept-Language HTTP header, but
+   this was done only for some requests but not others.
+
+ * Introduce a discovery.barerepository configuration variable that
+   allows users to forbid discovery of bare repositories.
+
+ * Various messages that come from the pack-bitmap codepaths have been
+   tweaked.
+
+ * "git rebase -i" learns to update branches whose tip appear in the
+   rebased range with "--update-refs" option.
+
+ * "git ls-files" learns the "--format" option to tweak its output.
+
+ * "git cat-file" learned an option to use the mailmap when showing
+   commit and tag objects.
+
+ * When "git merge" finds that it cannot perform a merge, it should
+   restore the working tree to the state before the command was
+   initiated, but in some corner cases it didn't.
+
+ * Operating modes like "--batch" of "git cat-file" command learned to
+   take NUL-terminated input, instead of one-item-per-line.
+
+ * "git rm" has become more aware of the sparse-index feature.
+
+ * "git rev-list --disk-usage" learned to take an optional value
+   "human" to show the reported value in human-readable format, like
+   "3.40MiB".
+
+ * The "diagnose" feature to create a zip archive for diagnostic
+   material has been lifted from "scalar" and made into a feature of
+   "git bugreport".
+
+ * The namespaces used by "log --decorate" from "refs/" hierarchy by
+   default has been tightened.
+
+ * "git rev-list --ancestry-path=C A..B" is a natural extension of
+   "git rev-list A..B"; instead of choosing a subset of A..B to those
+   that have ancestry relationship with A, it lets a subset with
+   ancestry relationship with C.
+
+ * "scalar" now enables built-in fsmonitor on enlisted repositories,
+   when able.
+
+ * The bash prompt (in contrib/) learned to optionally indicate when
+   the index is unmerged.
+
+ * "git clone" command learned the "--bundle-uri" option to coordinate
+   with hosting sites the use of pre-prepared bundle files.
+
+ * "git range-diff" learned to honor pathspec argument if given.
+
+ * "git format-patch --from=<ident>" can be told to add an in-body
+   "From:" line even for commits that are authored by the given
+   <ident> with "--force-in-body-from"option.
+
+ * The built-in fsmonitor refuses to work on a network mounted
+   repositories; a configuration knob for users to override this has
+   been introduced.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * Collection of what is referenced by objects in promisor packs have
+   been optimized to inspect these objects in the in-pack order.
+
+ * Introduce a helper to see if a branch is already being worked on
+   (hence should not be newly checked out in a working tree), which
+   performs much better than the existing find_shared_symref() to
+   replace many uses of the latter.
+
+ * Teach "git archive" to (optionally and then by default) avoid
+   spawning an external "gzip" process when creating ".tar.gz" (and
+   ".tgz") archives.
+
+ * Allow large objects read from a packstream to be streamed into a
+   loose object file straight, without having to keep it in-core as a
+   whole.
+
+ * Further preparation to turn git-submodule.sh into a builtin
+   continues.
+
+ * Apply Coccinelle rule to turn raw memmove() into MOVE_ARRAY() cpp
+   macro, which would improve maintainability and readability.
+
+ * Teach "make all" to build gitweb as well.
+
+ * Tweak tests so that they still work when the "git init" template
+   did not create .git/info directory.
+
+ * Add Coccinelle rules to detect the pattern of initializing and then
+   finalizing a structure without using it in between at all, which
+   happens after code restructuring and the compilers fail to
+   recognize as an unused variable.
+
+ * The code to convert between GPG trust level strings and internal
+   constants we use to represent them have been cleaned up.
+
+ * Support for libnettle as SHA256 implementation has been added.
+
+ * The way "git multi-pack" uses parse-options API has been improved.
+
+ * A coccinelle rule (in contrib/) to encourage use of COPY_ARRAY
+   macro has been improved.
+
+ * API tweak to make it easier to run fuzz testing on commit-graph parser.
+
+ * Omit fsync-related trace2 entries when their values are all zero.
+
+ * The codepath to write multi-pack index has been taught to release a
+   large chunk of memory that holds an array of objects in the packs,
+   as soon as it is done with the array, to reduce memory consumption.
+
+ * Add a level of redirection to array allocation API in xdiff part,
+   to make it easier to share with the libgit2 project.
+
+ * "git fetch" client logs the partial clone filter used in the trace2
+   output.
+
+ * The "bundle URI" design gets documented.
+
+ * The common ancestor negotiation exchange during a "git fetch"
+   session now leaves trace log.
+
+ * Test portability improvements.
+   (merge 4d1d843be7 mt/rot13-in-c later to maint).
+
+ * The "subcommand" mode is introduced to parse-options API and update
+   the command line parser of Git commands with subcommands.
+
+ * The pack bitmap file gained a bitmap-lookup table to speed up
+   locating the necessary bitmap for a given commit.
+
+ * The assembly version of SHA-1 implementation for PPC has been
+   removed.
+
+ * The server side that responds to "git fetch" and "git clone"
+   request has been optimized by allowing it to send objects in its
+   object store without recomputing and validating the object names.
+
+ * Annotate function parameters that are not used (but cannot be
+   removed for structural reasons), to prepare us to later compile
+   with -Wunused warning turned on.
+
+ * Share the text used to explain configuration variables used by "git
+   <subcmd>" in "git help <subcmd>" with the text from "git help config".
+
+
+Fixes since v2.37
+-----------------
+
+ * Rewrite of "git add -i" in C that appeared in Git 2.25 didn't
+   correctly record a removed file to the index, which was fixed.
+
+ * Certain diff options are currently ignored when combined-diff is
+   shown; mark them as incompatible with the feature.
+
+ * Adjust technical/bitmap-format to be formatted by AsciiDoc, and
+   add some missing information to the documentation.
+
+ * Fixes for tests when the source directory has unusual characters in
+   its path, e.g. whitespaces, double-quotes, etc.
+
+ * "git mktree --missing" lazily fetched objects that are missing from
+   the local object store, which was totally unnecessary for the purpose
+   of creating the tree object(s) from its input.
+
+ * Give _() markings to fatal/warning/usage: labels that are shown in
+   front of these messages.
+
+ * References to commands-to-be-typed-literally in "git rebase"
+   documentation mark-up have been corrected.
+
+ * In a non-bare repository, the behavior of Git when the
+   core.worktree configuration variable points at a directory that has
+   a repository as its subdirectory, regressed in Git 2.27 days.
+
+ * Recent update to vimdiff layout code has been made more robust
+   against different end-user vim settings.
+
+ * Plug various memory leaks, both in the main code and in test-tool
+   commands.
+
+ * Fixes a long-standing corner case bug around directory renames in
+   the merge-ort strategy.
+
+ * The resolve-undo information in the index was not protected against
+   GC, which has been corrected.
+
+ * A corner case bug where lazily fetching objects from a promisor
+   remote resulted in infinite recursion has been corrected.
+
+ * "git clone" from a repository with some ref whose HEAD is unborn
+   did not set the HEAD in the resulting repository correctly, which
+   has been corrected.
+
+ * An earlier attempt to plug leaks placed a clean-up label to jump to
+   at a bogus place, which as been corrected.
+
+ * Variable quoting fix in the vimdiff driver of "git mergetool"
+
+ * "git shortlog -n" relied on the underlying qsort() to be stable,
+   which shouldn't have.  Fixed.
+
+ * A fix for a regression in test framework.
+
+ * mkstemp() emulation on Windows has been improved.
+
+ * Add missing documentation for "include" and "includeIf" features in
+   "git config" file format, which incidentally teaches the command
+   line completion to include them in its offerings.
+
+ * Avoid "white/black-list" in documentation and code comments.
+
+ * Workaround for a compiler warning against use of die() in
+   osx-keychain (in contrib/).
+
+ * Workaround for a false positive compiler warning.
+
+ * "git p4" working on UTF-16 files on Windows did not implement
+   CRLF-to-LF conversion correctly, which has been corrected.
+
+ * "git p4" did not handle non-ASCII client name well, which has been
+   corrected.
+
+ * "rerere-train" script (in contrib/) used to honor commit.gpgSign
+   while recreating the throw-away merges.
+
+ * "git checkout" miscounted the paths it updated, which has been
+   corrected.
+
+ * Fix for a bug that makes write-tree to fail to write out a
+   non-existent index as a tree, introduced in 2.37.
+
+ * There was a bug in the codepath to upgrade generation information
+   in commit-graph from v1 to v2 format, which has been corrected.
+
+ * Gitweb had legacy URL shortener that is specific to the way
+   projects hosted on kernel.org used to (but no longer) work, which
+   has been removed.
+
+ * Fix build procedure for Windows that uses CMake so that it can pick
+   up the shell interpreter from local installation location.
+
+ * Conditionally allow building Python interpreter on Windows
+
+ * Fix to lstat() emulation on Windows.
+
+ * Older gcc with -Wall complains about the universal zero initializer
+   "struct s = { 0 };" idiom, which makes developers' lives
+   inconvenient (as -Werror is enabled by DEVELOPER=YesPlease).  The
+   build procedure has been tweaked to help these compilers.
+
+ * Plug memory leaks in the failure code path in the "merge-ort" merge
+   strategy backend.
+
+ * "git symbolic-ref symref non..sen..se" is now diagnosed as an error.
+
+ * A follow-up fix to a fix for a regression in 2.36 around hooks.
+
+ * Avoid repeatedly running getconf to ask libc version in the test
+   suite, and instead just as it once per script.
+
+ * Platform-specific code that determines if a directory is OK to use
+   as a repository has been taught to report more details, especially
+   on Windows.
+
+ * "vimdiff3" regression fix.
+
+ * "git fsck" reads mode from tree objects but canonicalizes the mode
+   before passing it to the logic to check object sanity, which has
+   hid broken tree objects from the checking logic.  This has been
+   corrected, but to help exiting projects with broken tree objects
+   that they cannot fix retroactively, the severity of anomalies this
+   code detects has been demoted to "info" for now.
+
+ * Fixes to sparse index compatibility work for "reset" and "checkout"
+   commands.
+
+ * An earlier optimization discarded a tree-object buffer that is
+   still in use, which has been corrected.
+   (merge 1490d7d82d jk/is-promisor-object-keep-tree-in-use later to maint).
+
+ * Fix deadlocks between main Git process and subprocess spawned via
+   the pipe_command() API, that can kill "git add -p" that was
+   reimplemented in C recently.
+   (merge 716c1f649e jk/pipe-command-nonblock later to maint).
+
+ * The sequencer machinery translated messages left in the reflog by
+   mistake, which has been corrected.
+
+ * xcalloc(), imitating calloc(), takes "number of elements of the
+   array", and "size of a single element", in this order.  A call that
+   does not follow this ordering has been corrected.
+   (merge c4bbd9bb8f sg/xcalloc-cocci-fix later to maint).
+
+ * The preload-index codepath made copies of pathspec to give to
+   multiple threads, which were left leaked.
+   (merge 23578904da ad/preload-plug-memleak later to maint).
+
+ * Update the version of Ubuntu used for GitHub Actions CI from 18.04
+   to 22.04.
+   (merge ef46584831 ds/github-actions-use-newer-ubuntu later to maint).
+
+ * The auto-stashed local changes created by "git merge --autostash"
+   was mixed into a conflicted state left in the working tree, which
+   has been corrected.
+   (merge d3a9295ada en/merge-unstash-only-on-clean-merge later to maint).
+
+ * Multi-pack index got corrupted when preferred pack changed from one
+   pack to another in a certain way, which has been corrected.
+   (merge 99e4d084ff tb/midx-with-changing-preferred-pack-fix later to maint).
+
+ * The clean-up of temporary files created via mks_tempfile_dt() was
+   racy and attempted to unlink() the leading directory when signals
+   are involved, which has been corrected.
+   (merge babe2e0559 rs/tempfile-cleanup-race-fix later to maint).
+
+ * FreeBSD portability fix for "git maintenance" that spawns "crontab"
+   to schedule tasks.
+   (merge ee69e7884e bc/gc-crontab-fix later to maint).
+
+ * Those who use diff-so-fancy as the diff-filter noticed a regression
+   or two in the code that parses the diff output in the built-in
+   version of "add -p", which has been corrected.
+   (merge 0a101676e5 js/add-p-diff-parsing-fix later to maint).
+
+ * Segfault fix-up to an earlier fix to the topic to teach "git reset"
+   and "git checkout" work better in a sparse checkout.
+   (merge 037f8ea6d9 vd/sparse-reset-checkout-fixes later to maint).
+
+ * "git diff --no-index A B" managed its the pathnames of its two
+   input files rather haphazardly, sometimes leaking them.  The
+   command line argument processing has been straightened out to clean
+   it up.
+   (merge 2b43dd0eb5 rs/diff-no-index-cleanup later to maint).
+
+ * "git rev-list --verify-objects" ought to inspect the contents of
+   objects and notice corrupted ones, but it didn't when the commit
+   graph is in use, which has been corrected.
+   (merge b27ccae34b jk/rev-list-verify-objects-fix later to maint).
+
+ * More fixes to "add -p"
+   (merge 64ec8efb83 js/builtin-add-p-portability-fix later to maint).
+
+ * The parser in the script interface to parse-options in "git
+   rev-parse" has been updated to diagnose a bogus input correctly.
+   (merge f20b9c36d0 ow/rev-parse-parseopt-fix later to maint).
+
+ * The code that manages list-object-filter structure, used in partial
+   clones, leaked the instances, which has been plugged.
+   (merge 66eede4a37 jk/plug-list-object-filter-leaks later to maint).
+
+ * Other code cleanup, docfix, build fix, etc.
+   (merge 77b9e85c0f vd/fix-perf-tests later to maint).
+   (merge 0682bc43f5 jk/test-crontab-fixes later to maint).
+   (merge b46dd1726c cc/doc-trailer-whitespace-rules later to maint).
index 88bc799cf36e679088152bce690d3a125c0e0c69..37afbaf5a419d17f22e6598607cc13d121a8020e 100644 (file)
@@ -444,17 +444,32 @@ You probably do not need to adjust this value.
 Common unit suffixes of 'k', 'm', or 'g' are supported.
 
 core.bigFileThreshold::
-       Files larger than this size are stored deflated, without
-       attempting delta compression.  Storing large files without
-       delta compression avoids excessive memory usage, at the
-       slight expense of increased disk usage. Additionally files
-       larger than this size are always treated as binary.
+       The size of files considered "big", which as discussed below
+       changes the behavior of numerous git commands, as well as how
+       such files are stored within the repository. The default is
+       512 MiB. Common unit suffixes of 'k', 'm', or 'g' are
+       supported.
 +
-Default is 512 MiB on all platforms.  This should be reasonable
-for most projects as source code and other text files can still
-be delta compressed, but larger binary media files won't be.
+Files above the configured limit will be:
 +
-Common unit suffixes of 'k', 'm', or 'g' are supported.
+* Stored deflated in packfiles, without attempting delta compression.
++
+The default limit is primarily set with this use-case in mind. With it,
+most projects will have their source code and other text files delta
+compressed, but not larger binary media files.
++
+Storing large files without delta compression avoids excessive memory
+usage, at the slight expense of increased disk usage.
++
+* Will be treated as if they were labeled "binary" (see
+  linkgit:gitattributes[5]). e.g. linkgit:git-log[1] and
+  linkgit:git-diff[1] will not compute diffs for files above this limit.
++
+* Will generally be streamed when written, which avoids excessive
+memory usage, at the cost of some fixed overhead. Commands that make
+use of this include linkgit:git-archive[1],
+linkgit:git-fast-import[1], linkgit:git-index-pack[1],
+linkgit:git-unpack-objects[1] and linkgit:git-fsck[1].
 
 core.excludesFile::
        Specifies the pathname to the file that contains patterns to
index 32f84838ac1fa7cd54eb0d4a12f280ac4ac6fb87..35a7bf86d7774c67dbd129cc29b009aae46c81a9 100644 (file)
@@ -178,21 +178,6 @@ diff.<driver>.cachetextconv::
        Set this option to true to make the diff driver cache the text
        conversion outputs.  See linkgit:gitattributes[5] for details.
 
-diff.tool::
-       Controls which diff tool is used by linkgit:git-difftool[1].
-       This variable overrides the value configured in `merge.tool`.
-       The list below shows the valid built-in values.
-       Any other value is treated as a custom diff tool and requires
-       that a corresponding difftool.<tool>.cmd variable is defined.
-
-diff.guitool::
-       Controls which diff tool is used by linkgit:git-difftool[1] when
-       the -g/--gui flag is specified. This variable overrides the value
-       configured in `merge.guitool`. The list below shows the valid
-       built-in values. Any other value is treated as a custom diff tool
-       and requires that a corresponding difftool.<guitool>.cmd variable
-       is defined.
-
 include::../mergetools-diff.txt[]
 
 diff.indentHeuristic::
index 67625944804f6f93f6a5354056a83d039bac2e8c..a3f821121020c0b6a4c66fd74ad52a974ac07207 100644 (file)
@@ -1,6 +1,17 @@
-difftool.<tool>.path::
-       Override the path for the given tool.  This is useful in case
-       your tool is not in the PATH.
+diff.tool::
+       Controls which diff tool is used by linkgit:git-difftool[1].
+       This variable overrides the value configured in `merge.tool`.
+       The list below shows the valid built-in values.
+       Any other value is treated as a custom diff tool and requires
+       that a corresponding difftool.<tool>.cmd variable is defined.
+
+diff.guitool::
+       Controls which diff tool is used by linkgit:git-difftool[1] when
+       the -g/--gui flag is specified. This variable overrides the value
+       configured in `merge.guitool`. The list below shows the valid
+       built-in values. Any other value is treated as a custom diff tool
+       and requires that a corresponding difftool.<guitool>.cmd variable
+       is defined.
 
 difftool.<tool>.cmd::
        Specify the command to invoke the specified diff tool.
@@ -9,6 +20,17 @@ difftool.<tool>.cmd::
        file containing the contents of the diff pre-image and 'REMOTE'
        is set to the name of the temporary file containing the contents
        of the diff post-image.
++
+See the `--tool=<tool>` option in linkgit:git-difftool[1] for more details.
+
+difftool.<tool>.path::
+       Override the path for the given tool.  This is useful in case
+       your tool is not in the PATH.
+
+difftool.trustExitCode::
+       Exit difftool if the invoked diff tool returns a non-zero exit status.
++
+See the `--trust-exit-code` option in linkgit:git-difftool[1] for more details.
 
 difftool.prompt::
        Prompt before each invocation of the diff tool.
index fdbc06a4d2a837f768a213fd5ffdf28ad5a48315..c7303d8d9f004b5e413c4e5fe0dbb2f0df0c9171 100644 (file)
@@ -15,6 +15,10 @@ format.from::
        different.  If set to a non-boolean value, format-patch uses that
        value instead of your committer identity.  Defaults to false.
 
+format.forceInBodyFrom::
+       Provides the default value for the `--[no-]force-in-body-from`
+       option to format-patch.  Defaults to false.
+
 format.numbered::
        A boolean which can enable or disable sequence numbers in patch
        subjects.  It defaults to "auto" which enables it only if there
index 182edd813a5d3f970922b23e85f1000c32f4a3df..e521f20390ceaeab8701dd424f13962c3a439ffa 100644 (file)
@@ -17,8 +17,11 @@ grep.extendedRegexp::
        other than 'default'.
 
 grep.threads::
-       Number of grep worker threads to use.
-       See `grep.threads` in linkgit:git-grep[1] for more information.
+       Number of grep worker threads to use. If unset (or set to 0), Git will
+       use as many threads as the number of logical cores available.
+
+grep.fullName::
+       If set to true, enable `--full-name` option by default.
 
 grep.fallbackToNoIndex::
        If set to true, fall back to git grep --no-index if git grep
index 456eb07800cb1eef63e9d26f96a1ed35b6e65116..bc63bc3939c27d40969564657321866fe8236f21 100644 (file)
@@ -7,6 +7,10 @@ log.date::
        Set the default date-time mode for the 'log' command.
        Setting a value for log.date is similar to using 'git log''s
        `--date` option.  See linkgit:git-log[1] for details.
++
+If the format is set to "auto:foo" and the pager is in use, format
+"foo" will be the used for the date format. Otherwise "default" will
+be used.
 
 log.decorate::
        Print out the ref names of any commits that are shown by the log
@@ -18,6 +22,11 @@ log.decorate::
        names are shown. This is the same as the `--decorate` option
        of the `git log`.
 
+log.initialDecorationSet::
+       By default, `git log` only shows decorations for certain known ref
+       namespaces. If 'all' is specified, then show all refs as
+       decorations.
+
 log.excludeDecoration::
        Exclude the specified patterns from the log decorations. This is
        similar to the `--decorate-refs-exclude` command-line option, but
index adeda0f24d350aa084cecec0c57570420893223c..3d88fb0badba456ddabac242ee6657b44cbe5768 100644 (file)
@@ -1,7 +1,7 @@
 lsrefs.unborn::
        May be "advertise" (the default), "allow", or "ignore". If "advertise",
        the server will respond to the client sending "unborn" (as described in
-       protocol-v2.txt) and will advertise support for this feature during the
+       linkgit:gitprotocol-v2[5]) and will advertise support for this feature during the
        protocol v2 capability advertisement. "allow" is the same as
        "advertise" except that the server will not advertise support for this
        feature; this is useful for load-balanced servers that cannot be
index aeef56d49ae97dc0df99b1e63f0a842f36f3142e..c7c4811734b5c935c89b52eb3b3ddef58f49689d 100644 (file)
@@ -3,6 +3,9 @@ notes.mergeStrategy::
        conflicts.  Must be one of `manual`, `ours`, `theirs`, `union`, or
        `cat_sort_uniq`.  Defaults to `manual`.  See "NOTES MERGE STRATEGIES"
        section of linkgit:git-notes[1] for more information on each strategy.
++
+This setting can be overridden by passing the `--strategy` option to
+linkgit:git-notes[1].
 
 notes.<name>.mergeStrategy::
        Which merge strategy to choose when doing a notes merge into
@@ -11,28 +14,35 @@ notes.<name>.mergeStrategy::
        linkgit:git-notes[1] for more information on the available strategies.
 
 notes.displayRef::
-       The (fully qualified) refname from which to show notes when
-       showing commit messages.  The value of this variable can be set
-       to a glob, in which case notes from all matching refs will be
-       shown.  You may also specify this configuration variable
-       several times.  A warning will be issued for refs that do not
-       exist, but a glob that does not match any refs is silently
-       ignored.
+       Which ref (or refs, if a glob or specified more than once), in
+       addition to the default set by `core.notesRef` or
+       `GIT_NOTES_REF`, to read notes from when showing commit
+       messages with the 'git log' family of commands.
 +
 This setting can be overridden with the `GIT_NOTES_DISPLAY_REF`
 environment variable, which must be a colon separated list of refs or
 globs.
 +
+A warning will be issued for refs that do not exist,
+but a glob that does not match any refs is silently ignored.
++
+This setting can be disabled by the `--no-notes` option to the 'git
+log' family of commands, or by the `--notes=<ref>` option accepted by
+those commands.
++
 The effective value of "core.notesRef" (possibly overridden by
 GIT_NOTES_REF) is also implicitly added to the list of refs to be
 displayed.
 
 notes.rewrite.<command>::
        When rewriting commits with <command> (currently `amend` or
-       `rebase`) and this variable is set to `true`, Git
-       automatically copies your notes from the original to the
-       rewritten commit.  Defaults to `true`, but see
-       "notes.rewriteRef" below.
+       `rebase`), if this variable is `false`, git will not copy
+       notes from the original to the rewritten commit.  Defaults to
+       `true`.  See also "`notes.rewriteRef`" below.
++
+This setting can be overridden with the `GIT_NOTES_REWRITE_REF`
+environment variable, which must be a colon separated list of refs or
+globs.
 
 notes.rewriteMode::
        When copying notes during a rewrite (see the
@@ -46,14 +56,13 @@ environment variable.
 
 notes.rewriteRef::
        When copying notes during a rewrite, specifies the (fully
-       qualified) ref whose notes should be copied.  The ref may be a
-       glob, in which case notes in all matching refs will be copied.
-       You may also specify this configuration several times.
+       qualified) ref whose notes should be copied.  May be a glob,
+       in which case notes in all matching refs will be copied.  You
+       may also specify this configuration several times.
 +
 Does not have a default value; you must configure this variable to
 enable note rewriting.  Set it to `refs/notes/commits` to enable
 rewriting for the default commit notes.
 +
-This setting can be overridden with the `GIT_NOTES_REWRITE_REF`
-environment variable, which must be a colon separated list of refs or
-globs.
+Can be overridden with the `GIT_NOTES_REWRITE_REF` environment variable.
+See `notes.rewrite.<command>` above for a further description of its format.
index ad7f73a1eade701492ae7b773cc5bb5dc37e5053..53093d99969cc090e4727a570eb667a6fba445d4 100644 (file)
@@ -164,9 +164,16 @@ When writing a multi-pack reachability bitmap, no new namehashes are
 computed; instead, any namehashes stored in an existing bitmap are
 permuted into their appropriate location when writing a new bitmap.
 
+pack.writeBitmapLookupTable::
+       When true, Git will include a "lookup table" section in the
+       bitmap index (if one is written). This table is used to defer
+       loading individual bitmaps as late as possible. This can be
+       beneficial in repositories that have relatively large bitmap
+       indexes. Defaults to false.
+
 pack.writeReverseIndex::
        When true, git will write a corresponding .rev file (see:
-       link:../technical/pack-format.html[Documentation/technical/pack-format.txt])
+       linkgit:gitformat-pack[5])
        for each new packfile that it writes in all places except for
        linkgit:git-fast-import[1] and in the bulk checkin mechanism.
        Defaults to false.
index 756591d77b080ccfe5be5a26c899b65273cf4866..576038185148d8cce30170b3fe4888b6cf3aa2b6 100644 (file)
@@ -58,6 +58,6 @@ protocol.version::
 * `1` - the original wire protocol with the addition of a version string
   in the initial response from the server.
 
-* `2` - link:technical/protocol-v2.html[wire protocol version 2].
+* `2` - Wire protocol version 2, see linkgit:gitprotocol-v2[5].
 
 --
index e32801e6c91d8145106514021ae200642b7c8366..7386fea225ae4d3215839eb42df28e414ccdf38b 100644 (file)
@@ -137,3 +137,8 @@ push.negotiate::
        server attempt to find commits in common. If "false", Git will
        rely solely on the server's ref advertisement to find commits
        in common.
+
+push.useBitmaps::
+       If set to "false", disable use of bitmaps for "git push" even if
+       `pack.useBitmaps` is "true", without preventing other git operations
+       from using bitmaps. Default is true.
index 8c979cb20f2a57d624efbbcc32d56339531d49de..f19bd0e04079051d6be21ee461d1a98c9a632629 100644 (file)
@@ -21,6 +21,9 @@ rebase.autoStash::
        `--autostash` options of linkgit:git-rebase[1].
        Defaults to false.
 
+rebase.updateRefs::
+       If set to true enable `--update-refs` option by default.
+
 rebase.missingCommitsCheck::
        If set to "warn", git rebase -i will print a warning if some
        commits are removed (e.g. a line was deleted), however the
index fa02f3ccc5490d2c4b72685b8fbb15e4fd3d0668..bde7f31459b98136a31a7eb1d457c5c43fddea54 100644 (file)
@@ -1,3 +1,22 @@
+safe.bareRepository::
+       Specifies which bare repositories Git will work with. The currently
+       supported values are:
++
+* `all`: Git works with all bare repositories. This is the default.
+* `explicit`: Git only works with bare repositories specified via
+  the top-level `--git-dir` command-line option, or the `GIT_DIR`
+  environment variable (see linkgit:git[1]).
++
+If you do not use bare repositories in your workflow, then it may be
+beneficial to set `safe.bareRepository` to `explicit` in your global
+config. This will protect you from attacks that involve cloning a
+repository that contains a bare repository and running a Git command
+within that directory.
++
+This config setting is only respected in protected configuration (see
+<<SCOPES>>). This prevents the untrusted repository from tampering with
+this value.
+
 safe.directory::
        These config entries specify Git-tracked directories that are
        considered safe even if they are owned by someone other than the
@@ -12,9 +31,9 @@ via `git config --add`. To reset the list of safe directories (e.g. to
 override any such directories specified in the system config), add a
 `safe.directory` entry with an empty value.
 +
-This config setting is only respected when specified in a system or global
-config, not when it is specified in a repository config, via the command
-line option `-c safe.directory=<path>`, or in environment variables.
+This config setting is only respected in protected configuration (see
+<<SCOPES>>). This prevents the untrusted repository from tampering with this
+value.
 +
 The value of this setting is interpolated, i.e. `~/<path>` expands to a
 path relative to the home directory and `%(prefix)/<path>` expands to a
index 50baa5d6bfbe8ad8f60e9f538b0643a13e1c8576..51da7088a844d3f148d78317e792efd485a2c0af 100644 (file)
@@ -18,17 +18,49 @@ sendemail.<identity>.*::
        identity is selected, through either the command-line or
        `sendemail.identity`.
 
+sendemail.multiEdit::
+       If true (default), a single editor instance will be spawned to edit
+       files you have to edit (patches when `--annotate` is used, and the
+       summary when `--compose` is used). If false, files will be edited one
+       after the other, spawning a new editor each time.
+
+sendemail.confirm::
+       Sets the default for whether to confirm before sending. Must be
+       one of 'always', 'never', 'cc', 'compose', or 'auto'. See `--confirm`
+       in the linkgit:git-send-email[1] documentation for the meaning of these
+       values.
+
 sendemail.aliasesFile::
+       To avoid typing long email addresses, point this to one or more
+       email aliases files.  You must also supply `sendemail.aliasFileType`.
+
 sendemail.aliasFileType::
+       Format of the file(s) specified in sendemail.aliasesFile. Must be
+       one of 'mutt', 'mailrc', 'pine', 'elm', or 'gnus', or 'sendmail'.
++
+What an alias file in each format looks like can be found in
+the documentation of the email program of the same name. The
+differences and limitations from the standard formats are
+described below:
++
+--
+sendmail;;
+*      Quoted aliases and quoted addresses are not supported: lines that
+       contain a `"` symbol are ignored.
+*      Redirection to a file (`/path/name`) or pipe (`|command`) is not
+       supported.
+*      File inclusion (`:include: /path/name`) is not supported.
+*      Warnings are printed on the standard error output for any
+       explicitly unsupported constructs, and any other lines that are not
+       recognized by the parser.
+--
 sendemail.annotate::
 sendemail.bcc::
 sendemail.cc::
 sendemail.ccCmd::
 sendemail.chainReplyTo::
-sendemail.confirm::
 sendemail.envelopeSender::
 sendemail.from::
-sendemail.multiEdit::
 sendemail.signedoffbycc::
 sendemail.smtpPass::
 sendemail.suppresscc::
@@ -44,7 +76,9 @@ sendemail.thread::
 sendemail.transferEncoding::
 sendemail.validate::
 sendemail.xmailer::
-       See linkgit:git-send-email[1] for description.
+       These configuration variables all provide a default for
+       linkgit:git-send-email[1] command-line options. See its
+       documentation for details.
 
 sendemail.signedoffcc (deprecated)::
        Deprecated alias for `sendemail.signedoffbycc`.
index 7ed917f5fc5f1b2eb67663f7176825bffabf0ef4..264812cca4db9a0a7b70be3eed7703d3439c9f46 100644 (file)
@@ -13,7 +13,7 @@ Note that this is currently limited to detecting credentials in
 You might want to enable this to prevent inadvertent credentials
 exposure, e.g. because:
 +
-* The OS or system where you're running git may not provide way way or
+* The OS or system where you're running git may not provide a way or
   otherwise allow you to configure the permissions of the
   configuration file where the username and/or password are stored.
 * Even if it does, having such data stored "at rest" might expose you
index 32fad5bbe817f2caa6201f0e496a06f4b055144a..16264d82a722ec01d2af662613657715e99b7bf9 100644 (file)
@@ -49,9 +49,9 @@ uploadpack.packObjectsHook::
        `pack-objects` to the hook, and expects a completed packfile on
        stdout.
 +
-Note that this configuration variable is ignored if it is seen in the
-repository-level config (this is a safety measure against fetching from
-untrusted repositories).
+Note that this configuration variable is only respected when it is specified
+in protected configuration (see <<SCOPES>>). This is a safety measure
+against fetching from untrusted repositories.
 
 uploadpack.allowFilter::
        If this option is set, `upload-pack` will support partial
index 9b37f356542d1d8787f0344abf3243996fb89476..a030d33c6e704adaf0b12168cb7ef0d062a1ef19 100644 (file)
@@ -433,6 +433,13 @@ they will make the patch impossible to apply:
 * deleting context or removal lines
 * modifying the contents of context or removal lines
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/add.txt[]
+
 SEE ALSO
 --------
 linkgit:git-status[1]
index 09107fb106703d14c9e695685e93f59e15362fc3..326276e51ce5734675f842771d41290057c53f90 100644 (file)
@@ -112,10 +112,7 @@ default.   You can use `--no-utf8` to override this.
        am.threeWay configuration variable. For more information,
        see am.threeWay in linkgit:git-config[1].
 
---rerere-autoupdate::
---no-rerere-autoupdate::
-       Allow the rerere mechanism to update the index with the
-       result of auto-conflict resolution if possible.
+include::rerere-options.txt[]
 
 --ignore-space-change::
 --ignore-whitespace::
@@ -261,6 +258,13 @@ This command can run `applypatch-msg`, `pre-applypatch`,
 and `post-applypatch` hooks.  See linkgit:githooks[5] for more
 information.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/am.txt[]
+
 SEE ALSO
 --------
 linkgit:git-apply[1].
index b6d77f420682618be63cb27a34e803cfcac8f802..1d478cbe9b5b873566fd9fc34e5d837bd0a4595c 100644 (file)
@@ -263,13 +263,9 @@ has no effect when `--index` or `--cached` is in use.
 CONFIGURATION
 -------------
 
-apply.ignoreWhitespace::
-       Set to 'change' if you want changes in whitespace to be ignored by default.
-       Set to one of: no, none, never, false if you want changes in
-       whitespace to be significant.
-apply.whitespace::
-       When no `--whitespace` flag is given from the command
-       line, this configuration item is used as the default.
+include::includes/cmd-config-section-all.txt[]
+
+include::config/apply.txt[]
 
 SUBMODULES
 ----------
index 56989a2f3493aa80530cb9efc72e74ba8d42eb56..60c040988bb803cbca608b9e58647b75a75ac173 100644 (file)
@@ -34,10 +34,12 @@ OPTIONS
 -------
 
 --format=<fmt>::
-       Format of the resulting archive: 'tar' or 'zip'. If this option
+       Format of the resulting archive. Possible values are `tar`,
+       `zip`, `tar.gz`, `tgz`, and any format defined using the
+       configuration option `tar.<format>.command`. If `--format`
        is not given, and the output file is specified, the format is
-       inferred from the filename if possible (e.g. writing to "foo.zip"
-       makes the output to be in the zip format). Otherwise the output
+       inferred from the filename if possible (e.g. writing to `foo.zip`
+       makes the output to be in the `zip` format). Otherwise the output
        format is `tar`.
 
 -l::
@@ -143,17 +145,16 @@ tar.<format>.command::
        is executed using the shell with the generated tar file on its
        standard input, and should produce the final output on its
        standard output. Any compression-level options will be passed
-       to the command (e.g., "-9"). An output file with the same
-       extension as `<format>` will be use this format if no other
-       format is given.
+       to the command (e.g., `-9`).
 +
-The "tar.gz" and "tgz" formats are defined automatically and default to
-`gzip -cn`. You may override them with custom commands.
+The `tar.gz` and `tgz` formats are defined automatically and use the
+magic command `git archive gzip` by default, which invokes an internal
+implementation of gzip.
 
 tar.<format>.remote::
-       If true, enable `<format>` for use by remote clients via
+       If true, enable the format for use by remote clients via
        linkgit:git-upload-archive[1]. Defaults to false for
-       user-defined formats, but true for the "tar.gz" and "tgz"
+       user-defined formats, but true for the `tar.gz` and `tgz`
        formats.
 
 [[ATTRIBUTES]]
index d7a46cc67441939a9fc80ac7d6ec39eb6b44f752..4400a17330b4204227050b76d380bc84b017628a 100644 (file)
@@ -241,6 +241,12 @@ MAPPING AUTHORS
 
 See linkgit:gitmailmap[5].
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/blame.txt[]
 
 SEE ALSO
 --------
index ae82378349df74745efe26be51903a191fe76c9f..12c5f84e3bef5c83c80edec9850315dd334beffa 100644 (file)
@@ -336,6 +336,10 @@ CONFIGURATION
 `--list` is used or implied. The default is to use a pager.
 See linkgit:git-config[1].
 
+include::includes/cmd-config-section-rest.txt[]
+
+include::config/branch.txt[]
+
 EXAMPLES
 --------
 
index d8817bf3cec3995cc7f3fa4a87c8f8ff3f9db865..eca726e57911af2cc1f0643cafdcf887c20bfa32 100644 (file)
@@ -9,6 +9,7 @@ SYNOPSIS
 --------
 [verse]
 'git bugreport' [(-o | --output-directory) <path>] [(-s | --suffix) <format>]
+               [--diagnose[=<mode>]]
 
 DESCRIPTION
 -----------
@@ -31,6 +32,10 @@ The following information is captured automatically:
  - A list of enabled hooks
  - $SHELL
 
+Additional information may be gathered into a separate zip archive using the
+`--diagnose` option, and can be attached alongside the bugreport document to
+provide additional context to readers.
+
 This tool is invoked via the typical Git setup process, which means that in some
 cases, it might not be able to launch - for example, if a relevant config file
 is unreadable. In this kind of scenario, it may be helpful to manually gather
@@ -49,6 +54,19 @@ OPTIONS
        named 'git-bugreport-<formatted suffix>'. This should take the form of a
        strftime(3) format string; the current local time will be used.
 
+--no-diagnose::
+--diagnose[=<mode>]::
+       Create a zip archive of supplemental information about the user's
+       machine, Git client, and repository state. The archive is written to the
+       same output directory as the bug report and is named
+       'git-diagnostics-<formatted suffix>'.
++
+Without `mode` specified, the diagnostic archive will contain the default set of
+statistics reported by `git diagnose`. An optional `mode` value may be specified
+to change which information is included in the archive. See
+linkgit:git-diagnose[1] for the list of valid values for `mode` and details
+about their usage.
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index 7685b570455cadc243929deecc197776326dfeba..18a022b4b40c345040c23e83bed1fd1983bd725e 100644 (file)
@@ -42,7 +42,7 @@ BUNDLE FORMAT
 Bundles are `.pack` files (see linkgit:git-pack-objects[1]) with a
 header indicating what references are contained within the bundle.
 
-Like the the packed archive format itself bundles can either be
+Like the packed archive format itself bundles can either be
 self-contained, or be created using exclusions.
 See the "OBJECT PREREQUISITES" section below.
 
@@ -56,10 +56,8 @@ using "thin packs", bundles created using exclusions are smaller in
 size. That they're "thin" under the hood is merely noted here as a
 curiosity, and as a reference to other documentation.
 
-See link:technical/bundle-format.html[the `bundle-format`
-documentation] for more details and the discussion of "thin pack" in
-link:technical/pack-format.html[the pack format documentation] for
-further details.
+See linkgit:gitformat-bundle[5] for more details and the discussion of
+"thin pack" in linkgit:gitformat-pack[5] for further details.
 
 OPTIONS
 -------
@@ -77,7 +75,7 @@ verify <file>::
        commits exist and are fully linked in the current repository.
        Then, 'git bundle' prints a list of missing commits, if any.
        Finally, information about additional capabilities, such as "object
-       filter", is printed. See "Capabilities" in link:technical/bundle-format.html
+       filter", is printed. See "Capabilities" in linkgit:gitformat-bundle[5]
        for more information. The exit code is zero for success, but will
        be nonzero if the bundle file is invalid.
 
@@ -337,6 +335,11 @@ You can also see what references it offers:
 $ git ls-remote mybundle
 ----------------
 
+FILE FORMAT
+-----------
+
+See linkgit:gitformat-bundle[5].
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index 24a811f0ef64b0f411c23a3598e7bc73e6a2e7ee..ec30b5c5743fd64ea9d42f4f451dc6ce0627d80a 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
 'git cat-file' (-t | -s) [--allow-unknown-type] <object>
 'git cat-file' (--batch | --batch-check | --batch-command) [--batch-all-objects]
             [--buffer] [--follow-symlinks] [--unordered]
-            [--textconv | --filters]
+            [--textconv | --filters] [-z]
 'git cat-file' (--textconv | --filters)
             [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>]
 
@@ -63,6 +63,12 @@ OPTIONS
        or to ask for a "blob" with `<object>` being a tag object that
        points at it.
 
+--[no-]mailmap::
+--[no-]use-mailmap::
+       Use mailmap file to map author, committer and tagger names
+       and email addresses to canonical real names and email addresses.
+       See linkgit:git-shortlog[1].
+
 --textconv::
        Show the content as transformed by a textconv filter. In this case,
        `<object>` has to be of the form `<tree-ish>:<path>`, or `:<path>` in
@@ -207,6 +213,11 @@ respectively print:
        /etc/passwd
 --
 
+-z::
+       Only meaningful with `--batch`, `--batch-check`, or
+       `--batch-command`; input is NUL-delimited instead of
+       newline-delimited.
+
 
 OUTPUT
 ------
index 9f37e22e13088187c60c49f70878e883e0e092cd..4cb9d555b4b436ae6aef8e14c1729599da0d74e8 100644 (file)
@@ -600,6 +600,13 @@ $ edit frotz
 $ git add frotz
 ------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/checkout.txt[]
+
 SEE ALSO
 --------
 linkgit:git-switch[1],
index 78dcc9171fb04da07a5ec0308e3be83d417fefeb..1e8ac9df60274067dcf57d3f9a82ba6f270ea7d9 100644 (file)
@@ -156,10 +156,7 @@ effect to your index in a row.
        Pass the merge strategy-specific option through to the
        merge strategy.  See linkgit:git-merge[1] for details.
 
---rerere-autoupdate::
---no-rerere-autoupdate::
-       Allow the rerere mechanism to update the index with the
-       result of auto-conflict resolution if possible.
+include::rerere-options.txt[]
 
 SEQUENCER SUBCOMMANDS
 ---------------------
index a7f309dff5a327ba14b38b63b931060582c53c28..91742633fa878922d23bc78e1aeec9a638994d38 100644 (file)
@@ -133,6 +133,13 @@ help::
 
   Show brief usage of interactive git-clean.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/clean.txt[]
+
 SEE ALSO
 --------
 linkgit:gitignore[5]
index 632bd1348ea14deb067144f187e7f57c0cf2ae60..d6434d262d6e2945ffe98f397ab8500a53990750 100644 (file)
@@ -323,6 +323,13 @@ or `--mirror` is given)
        for `host.xz:foo/.git`).  Cloning into an existing directory
        is only allowed if the directory is empty.
 
+--bundle-uri=<uri>::
+       Before fetching from the remote, fetch a bundle from the given
+       `<uri>` and unbundle the data into the local repository. The refs
+       in the bundle will be stored under the hidden `refs/bundle/*`
+       namespace. This option is incompatible with `--depth`,
+       `--shallow-since`, and `--shallow-exclude`.
+
 :git-clone: 1
 include::urls.txt[]
 
@@ -363,6 +370,15 @@ $ cd my-linux
 $ git clone --bare -l /home/proj/.git /pub/scm/proj.git
 ------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/init.txt[]
+
+include::config/clone.txt[]
+
 
 GIT
 ---
index 6cea9ab4638be5d1cada5e3350ddf41696280112..18431647a2da9fb825330bbb0eebbc25f11c14c6 100644 (file)
@@ -74,6 +74,13 @@ v2.4.3  v2.4.4      v2.4.5      v2.4.6      v2.4.7
 v2.4.8  v2.4.9
 ------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/column.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index e1f48c95b3ca37e67af88e634a44db88dd1ca6a9..36fe56c2c7192906add8bc04d43c1c5cca4aa372 100644 (file)
@@ -142,6 +142,18 @@ $ git show-ref -s | git commit-graph write --stdin-commits
 $ git rev-parse HEAD | git commit-graph write --stdin-commits --append
 ------------------------------------------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/commitgraph.txt[]
+
+
+FILE FORMAT
+-----------
+
+see linkgit:gitformat-commit-graph[5].
 
 GIT
 ---
index 6c60bf98f9f73ad9a892337c23c5a7f9b31b3eec..225c6c9f2e5f8f513e1e56ae77bc45d8c2097da4 100644 (file)
@@ -557,6 +557,10 @@ The editor used to edit the commit log message will be chosen from the
 `VISUAL` environment variable, or the `EDITOR` environment variable (in that
 order).  See linkgit:git-var[1] for details.
 
+include::includes/cmd-config-section-rest.txt[]
+
+include::config/commit.txt[]
+
 HOOKS
 -----
 This command can run `commit-msg`, `prepare-commit-msg`, `pre-commit`,
index 9376e39aef206ae8d687dcf2066727de746e7c71..7a2bcb2f6cb6f94b677d47de49ae8c6d0b98f2b8 100644 (file)
@@ -297,23 +297,20 @@ The default is to use a pager.
 FILES
 -----
 
-If not set explicitly with `--file`, there are four files where
-'git config' will search for configuration options:
+By default, 'git config' will read configuration options from multiple
+files:
 
 $(prefix)/etc/gitconfig::
        System-wide configuration file.
 
 $XDG_CONFIG_HOME/git/config::
-       Second user-specific configuration file. If $XDG_CONFIG_HOME is not set
-       or empty, `$HOME/.config/git/config` will be used. Any single-valued
-       variable set in this file will be overwritten by whatever is in
-       `~/.gitconfig`.  It is a good idea not to create this file if
-       you sometimes use older versions of Git, as support for this
-       file was added fairly recently.
-
 ~/.gitconfig::
-       User-specific configuration file. Also called "global"
-       configuration file.
+       User-specific configuration files. When the XDG_CONFIG_HOME environment
+       variable is not set or empty, $HOME/.config/ is used as
+       $XDG_CONFIG_HOME.
++
+These are also called "global" configuration files. If both files exist, both
+files are read in the order given above.
 
 $GIT_DIR/config::
        Repository specific configuration file.
@@ -322,28 +319,80 @@ $GIT_DIR/config.worktree::
        This is optional and is only searched when
        `extensions.worktreeConfig` is present in $GIT_DIR/config.
 
-If no further options are given, all reading options will read all of these
-files that are available. If the global or the system-wide configuration
-file are not available they will be ignored. If the repository configuration
-file is not available or readable, 'git config' will exit with a non-zero
-error code. However, in neither case will an error message be issued.
+You may also provide additional configuration parameters when running any
+git command by using the `-c` option. See linkgit:git[1] for details.
+
+Options will be read from all of these files that are available. If the
+global or the system-wide configuration files are missing or unreadable they
+will be ignored. If the repository configuration file is missing or unreadable,
+'git config' will exit with a non-zero error code. An error message is produced
+if the file is unreadable, but not if it is missing.
 
 The files are read in the order given above, with last value found taking
 precedence over values read earlier.  When multiple values are taken then all
 values of a key from all files will be used.
 
-You may override individual configuration parameters when running any git
-command by using the `-c` option. See linkgit:git[1] for details.
-
-All writing options will per default write to the repository specific
+By default, options are only written to the repository specific
 configuration file. Note that this also affects options like `--replace-all`
 and `--unset`. *'git config' will only ever change one file at a time*.
 
-You can override these rules using the `--global`, `--system`,
-`--local`, `--worktree`, and `--file` command-line options; see
-<<OPTIONS>> above.
+You can limit which configuration sources are read from or written to by
+specifying the path of a file with the `--file` option, or by specifying a
+configuration scope with `--system`, `--global`, `--local`, or `--worktree`.
+For more, see <<OPTIONS>> above.
+
+[[SCOPES]]
+SCOPES
+------
+
+Each configuration source falls within a configuration scope. The scopes
+are:
+
+system::
+       $(prefix)/etc/gitconfig
+
+global::
+       $XDG_CONFIG_HOME/git/config
++
+~/.gitconfig
+
+local::
+       $GIT_DIR/config
+
+worktree::
+       $GIT_DIR/config.worktree
+
+command::
+       GIT_CONFIG_{COUNT,KEY,VALUE} environment variables (see <<ENVIRONMENT>>
+       below)
++
+the `-c` option
+
+With the exception of 'command', each scope corresponds to a command line
+option: `--system`, `--global`, `--local`, `--worktree`.
+
+When reading options, specifying a scope will only read options from the
+files within that scope. When writing options, specifying a scope will write
+to the files within that scope (instead of the repository specific
+configuration file). See <<OPTIONS>> above for a complete description.
+
+Most configuration options are respected regardless of the scope it is
+defined in, but some options are only respected in certain scopes. See the
+respective option's documentation for the full details.
+
+Protected configuration
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Protected configuration refers to the 'system', 'global', and 'command' scopes.
+For security reasons, certain options are only respected when they are
+specified in protected configuration, and ignored otherwise.
 
+Git treats these scopes as if they are controlled by the user or a trusted
+administrator. This is because an attacker who controls these scopes can do
+substantial harm without using Git, so it is assumed that the user's environment
+protects these scopes against attackers.
 
+[[ENVIRONMENT]]
 ENVIRONMENT
 -----------
 
diff --git a/Documentation/git-diagnose.txt b/Documentation/git-diagnose.txt
new file mode 100644 (file)
index 0000000..3ec8cc7
--- /dev/null
@@ -0,0 +1,65 @@
+git-diagnose(1)
+================
+
+NAME
+----
+git-diagnose - Generate a zip archive of diagnostic information
+
+SYNOPSIS
+--------
+[verse]
+'git diagnose' [(-o | --output-directory) <path>] [(-s | --suffix) <format>]
+              [--mode=<mode>]
+
+DESCRIPTION
+-----------
+Collects detailed information about the user's machine, Git client, and
+repository state and packages that information into a zip archive. The
+generated archive can then, for example, be shared with the Git mailing list to
+help debug an issue or serve as a reference for independent debugging.
+
+By default, the following information is captured in the archive:
+
+  * 'git version --build-options'
+  * The path to the repository root
+  * The available disk space on the filesystem
+  * The name and size of each packfile, including those in alternate object
+    stores
+  * The total count of loose objects, as well as counts broken down by
+    `.git/objects` subdirectory
+
+Additional information can be collected by selecting a different diagnostic mode
+using the `--mode` option.
+
+This tool differs from linkgit:git-bugreport[1] in that it collects much more
+detailed information with a greater focus on reporting the size and data shape
+of repository contents.
+
+OPTIONS
+-------
+-o <path>::
+--output-directory <path>::
+       Place the resulting diagnostics archive in `<path>` instead of the
+       current directory.
+
+-s <format>::
+--suffix <format>::
+       Specify an alternate suffix for the diagnostics archive name, to create
+       a file named 'git-diagnostics-<formatted suffix>'. This should take the
+       form of a strftime(3) format string; the current local time will be
+       used.
+
+--mode=(stats|all)::
+       Specify the type of diagnostics that should be collected. The default behavior
+       of 'git diagnose' is equivalent to `--mode=stats`.
++
+The `--mode=all` option collects everything included in `--mode=stats`, as well
+as copies of `.git`, `.git/hooks`, `.git/info`, `.git/logs`, and
+`.git/objects/info` directories. This additional information may be sensitive,
+as it can be used to reconstruct the full contents of the diagnosed repository.
+Users should exercise caution when sharing an archive generated with
+`--mode=all`.
+
+GIT
+---
+Part of the linkgit:git[1] suite
index 6236c75c9b826a315e553ead169b7fc3fb183c72..85ae6d6d08a2623a95830041ab076f1526dc6417 100644 (file)
@@ -213,6 +213,13 @@ $ git diff -R                          <2>
     rewrites (very expensive).
 <2> Output diff in reverse.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/diff.txt[]
+
 SEE ALSO
 --------
 diff(1),
index 143b0c49d739aef285aab7b10a0c536257d5f6fc..9d14c3c9f099aab8d225767ef6d308ecee2e6713 100644 (file)
@@ -113,33 +113,14 @@ instead.  `--no-symlinks` is the default on Windows.
 
 See linkgit:git-diff[1] for the full list of supported options.
 
-CONFIG VARIABLES
-----------------
+CONFIGURATION
+-------------
 'git difftool' falls back to 'git mergetool' config variables when the
 difftool equivalents have not been defined.
 
-diff.tool::
-       The default diff tool to use.
+include::includes/cmd-config-section-rest.txt[]
 
-diff.guitool::
-       The default diff tool to use when `--gui` is specified.
-
-difftool.<tool>.path::
-       Override the path for the given tool.  This is useful in case
-       your tool is not in the PATH.
-
-difftool.<tool>.cmd::
-       Specify the command to invoke the specified diff tool.
-+
-See the `--tool=<tool>` option above for more details.
-
-difftool.prompt::
-       Prompt before each invocation of the diff tool.
-
-difftool.trustExitCode::
-       Exit difftool if the invoked diff tool returns a non-zero exit status.
-+
-See the `--trust-exit-code` option above for more details.
+include::config/difftool.txt[]
 
 SEE ALSO
 --------
index 39cfa05b28b0126544b5ed8b89e0fd1b697c3e09..8b5dd6add006d111688fc1193d77ebe6b45422a5 100644 (file)
@@ -1564,6 +1564,13 @@ operator can use this facility to peek at the objects and refs from an
 import in progress, at the cost of some added running time and worse
 compression.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/fastimport.txt[]
+
 SEE ALSO
 --------
 linkgit:git-fast-export[1]
index e9d364669af7b9fa2aaf59605dc9e7fa386ca021..63d9569e16444237a7583b8c9dafe232eb1d2e42 100644 (file)
@@ -285,6 +285,13 @@ linkgit:git-gc[1]).
 
 include::transfer-data-leaks.txt[]
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/fetch.txt[]
+
 BUGS
 ----
 Using --recurse-submodules can only fetch new commits in submodules that are
index be797d7a28f62f38e2dc38c82141e71b97ea4aae..dfcc7da4c211706570cd1ce7e1fd8552fb8947d9 100644 (file)
@@ -275,6 +275,17 @@ header). Note also that `git send-email` already handles this
 transformation for you, and this option should not be used if you are
 feeding the result to `git send-email`.
 
+--[no-]force-in-body-from::
+       With the e-mail sender specified via the `--from` option, by
+       default, an in-body "From:" to identify the real author of
+       the commit is added at the top of the commit log message if
+       the sender is different from the author.  With this option,
+       the in-body "From:" is added even when the sender and the
+       author have the same name and address, which may help if the
+       mailing list software mangles the sender's identity.
+       Defaults to the value of the `format.forceInBodyFrom`
+       configuration variable.
+
 --add-header=<header>::
        Add an arbitrary header to the email headers.  This is in addition
        to any configured headers, and may be used multiple times.
index 5088783dccb923bac475d4d0a8419d57b27ecfb0..29318ea957ef8fda0aee500d795c6f04fce0cae2 100644 (file)
@@ -107,6 +107,8 @@ care about this output and want to speed it up further.
 CONFIGURATION
 -------------
 
+include::includes/cmd-config-section-all.txt[]
+
 include::config/fsck.txt[]
 
 DISCUSSION
index 0af7540a0c8b1b7b0df703b7b089dcc74e3c3267..a65c9aa62d64112dfd03335af82a680e54d00c58 100644 (file)
@@ -110,8 +110,7 @@ users and their repositories.
 CONFIGURATION
 -------------
 
-The below documentation is the same as what's found in
-linkgit:git-config[1]:
+include::includes/cmd-config-section-all.txt[]
 
 include::config/gc.txt[]
 
index 3d393fbac1bb3323c41ea2b74f6bba8f8ca39c00..dabdbe8471de5d36bf55589aa74a270901ff882e 100644 (file)
@@ -23,6 +23,7 @@ SYNOPSIS
           [--break] [--heading] [-p | --show-function]
           [-A <post-context>] [-B <pre-context>] [-C <context>]
           [-W | --function-context]
+          [(-m | --max-count) <num>]
           [--threads <num>]
           [-f <file>] [-e] <pattern>
           [--and|--or|--not|(|)|-e <pattern>...]
@@ -238,6 +239,14 @@ providing this option will cause it to die.
        `git diff` works out patch hunk headers (see 'Defining a
        custom hunk-header' in linkgit:gitattributes[5]).
 
+-m <num>::
+--max-count <num>::
+       Limit the amount of matches per file. When using the `-v` or
+       `--invert-match` option, the search stops after the specified
+       number of non-matches. A value of -1 will return unlimited
+       results (the default). A value of 0 will exit immediately with
+       a non-zero status.
+
 --threads <num>::
        Number of grep worker threads to use.
        See `grep.threads` in 'CONFIGURATION' for more information.
@@ -334,34 +343,9 @@ performance in this case, it might be desirable to use `--threads=1`.
 CONFIGURATION
 -------------
 
-grep.lineNumber::
-       If set to true, enable `-n` option by default.
-
-grep.column::
-       If set to true, enable the `--column` option by default.
-
-grep.patternType::
-       Set the default matching behavior. Using a value of 'basic', 'extended',
-       'fixed', or 'perl' will enable the `--basic-regexp`, `--extended-regexp`,
-       `--fixed-strings`, or `--perl-regexp` option accordingly, while the
-       value 'default' will return to the default matching behavior.
-
-grep.extendedRegexp::
-       If set to true, enable `--extended-regexp` option by default. This
-       option is ignored when the `grep.patternType` option is set to a value
-       other than 'default'.
-
-grep.threads::
-       Number of grep worker threads to use. If unset (or set to 0), Git will
-       use as many threads as the number of logical cores available.
-
-grep.fullName::
-       If set to true, enable `--full-name` option by default.
-
-grep.fallbackToNoIndex::
-       If set to true, fall back to git grep --no-index if git grep
-       is executed outside of a git repository.  Defaults to false.
+include::includes/cmd-config-section-all.txt[]
 
+include::config/grep.txt[]
 
 GIT
 ---
index 239c68db457098cae526e4691061c7373b61266a..2b0b5e390dcb94a651ca92ded8c1542a175f7f09 100644 (file)
@@ -9,14 +9,16 @@ SYNOPSIS
 --------
 [verse]
 'git help' [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases]
-'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>]
+'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>]
 'git help' [-g|--guides]
 'git help' [-c|--config]
+'git help' [--user-interfaces]
+'git help' [--developer-interfaces]
 
 DESCRIPTION
 -----------
 
-With no options and no '<command>' or '<guide>' given, the synopsis of the 'git'
+With no options and no '<command>' or '<doc>' given, the synopsis of the 'git'
 command and a list of the most commonly used Git commands are printed
 on the standard output.
 
@@ -26,8 +28,8 @@ printed on the standard output.
 If the option `--guides` or `-g` is given, a list of the
 Git concept guides is also printed on the standard output.
 
-If a command, or a guide, is given, a manual page for that command or
-guide is brought up. The 'man' program is used by default for this
+If a command or other documentation is given, the relevant manual page
+will be brought up. The 'man' program is used by default for this
 purpose, but this can be overridden by other options or configuration
 variables.
 
@@ -69,6 +71,23 @@ OPTIONS
 --guides::
        Prints a list of the Git concept guides on the standard output.
 
+--user-interfaces::
+       Prints a list of the repository, command and file interfaces
+       documentation on the standard output.
++
+In-repository file interfaces such as `.git/info/exclude` are
+documented here (see linkgit:gitrepository-layout[5]), as well as
+in-tree configuration such as `.mailmap` (see linkgit:gitmailmap[5]).
++
+This section of the documentation also covers general or widespread
+user-interface conventions (e.g. linkgit:gitcli[7]), and
+pseudo-configuration such as the file-based `.git/hooks/*` interface
+described in linkgit:githooks[5].
+
+--developer-interfaces::
+       Print list of file formats, protocols and other developer
+       interfaces documentation on the standard output.
+
 -i::
 --info::
        Display manual page for the command in the 'info' format. The
index 63cf498ce9f276878ea8a7bd79362a423210d030..f7b185151415b54b08fb21f54062842baf64e636 100644 (file)
@@ -54,6 +54,8 @@ CONFIGURATION
 To use the tool, `imap.folder` and either `imap.tunnel` or `imap.host` must be set
 to appropriate values.
 
+include::includes/cmd-config-section-rest.txt[]
+
 include::config/imap.txt[]
 
 EXAMPLES
index ad921fe782eae542ba5adfaa41e74d674af2dda9..160dea1372cd9ae87f1bad9744e2ae045ed1b5b6 100644 (file)
@@ -169,6 +169,13 @@ $ git commit    <3>
 <2> Add all existing files to the index.
 <3> Record the pristine state as the first commit in the history.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/init.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index 956a01d184f78832ea8ac8d1874ae1cc568dccaa..6d6197cd0a41ec08954e3b19ab1eb355c01dc5ea 100644 (file)
@@ -60,10 +60,12 @@ non-whitespace lines before a line that starts with '---' (followed by a
 space or the end of the line). Such three minus signs start the patch
 part of the message. See also `--no-divider` below.
 
-When reading trailers, there can be whitespaces after the
-token, the separator and the value. There can also be whitespaces
-inside the token and the value. The value may be split over multiple lines with
-each subsequent line starting with whitespace, like the "folding" in RFC 822.
+When reading trailers, there can be no whitespace before or inside the
+token, but any number of regular space and tab characters are allowed
+between the token and the separator. There can be whitespaces before,
+inside or after the value. The value may be split over multiple lines
+with each subsequent line starting with at least one whitespace, like
+the "folding" in RFC 822.
 
 Note that 'trailers' do not follow and are not intended to follow many
 rules for RFC 822 headers. For example they do not follow
index 20e87cecf4917fdbb18c2ffe675a1b2d9e4c3177..2a66cf888074656ce775ad93551603c15fed4ddc 100644 (file)
@@ -45,13 +45,23 @@ OPTIONS
 
 --decorate-refs=<pattern>::
 --decorate-refs-exclude=<pattern>::
-       If no `--decorate-refs` is given, pretend as if all refs were
-       included.  For each candidate, do not use it for decoration if it
+       For each candidate reference, do not use it for decoration if it
        matches any patterns given to `--decorate-refs-exclude` or if it
        doesn't match any of the patterns given to `--decorate-refs`. The
        `log.excludeDecoration` config option allows excluding refs from
        the decorations, but an explicit `--decorate-refs` pattern will
        override a match in `log.excludeDecoration`.
++
+If none of these options or config settings are given, then references are
+used as decoration if they match `HEAD`, `refs/heads/`, `refs/remotes/`,
+`refs/stash/`, or `refs/tags/`.
+
+--clear-decorations::
+       When specified, this option clears all previous `--decorate-refs`
+       or `--decorate-refs-exclude` options and relaxes the default
+       decoration filter to include all references. This option is
+       assumed if the config value `log.initialDecorationSet` is set to
+       `all`.
 
 --source::
        Print out the ref name given on the command line by which each
@@ -199,47 +209,11 @@ i18n.logOutputEncoding::
        Defaults to the value of `i18n.commitEncoding` if set, and UTF-8
        otherwise.
 
-log.date::
-       Default format for human-readable dates.  (Compare the
-       `--date` option.)  Defaults to "default", which means to write
-       dates like `Sat May 8 19:35:34 2010 -0500`.
-+
-If the format is set to "auto:foo" and the pager is in use, format
-"foo" will be the used for the date format. Otherwise "default" will
-be used.
-
-log.follow::
-       If `true`, `git log` will act as if the `--follow` option was used when
-       a single <path> is given.  This has the same limitations as `--follow`,
-       i.e. it cannot be used to follow multiple files and does not work well
-       on non-linear history.
-
-log.showRoot::
-       If `false`, `git log` and related commands will not treat the
-       initial commit as a big creation event.  Any root commits in
-       `git log -p` output would be shown without a diff attached.
-       The default is `true`.
-
-log.showSignature::
-       If `true`, `git log` and related commands will act as if the
-       `--show-signature` option was passed to them.
-
-mailmap.*::
-       See linkgit:git-shortlog[1].
-
-notes.displayRef::
-       Which refs, in addition to the default set by `core.notesRef`
-       or `GIT_NOTES_REF`, to read notes from when showing commit
-       messages with the `log` family of commands.  See
-       linkgit:git-notes[1].
-+
-May be an unabbreviated ref name or a glob and may be specified
-multiple times.  A warning will be issued for refs that do not exist,
-but a glob that does not match any refs is silently ignored.
-+
-This setting can be disabled by the `--no-notes` option,
-overridden by the `GIT_NOTES_DISPLAY_REF` environment variable,
-and overridden by the `--notes=<ref>` option.
+include::includes/cmd-config-section-rest.txt[]
+
+include::config/log.txt[]
+
+include::config/notes.txt[]
 
 GIT
 ---
index 0dabf3f0ddc8d893818b3ce09436c1b5b1c72a33..d7986419c2507a7032961ba5b37ce6c73e5822f9 100644 (file)
@@ -20,7 +20,7 @@ SYNOPSIS
                [--exclude-standard]
                [--error-unmatch] [--with-tree=<tree-ish>]
                [--full-name] [--recurse-submodules]
-               [--abbrev[=<n>]] [--] [<file>...]
+               [--abbrev[=<n>]] [--format=<format>] [--] [<file>...]
 
 DESCRIPTION
 -----------
@@ -192,6 +192,13 @@ followed by the  ("attr/<eolattr>").
        to the contained files. Sparse directories will be shown with a
        trailing slash, such as "x/" for a sparse directory "x".
 
+--format=<format>::
+       A string that interpolates `%(fieldname)` from the result being shown.
+       It also interpolates `%%` to `%`, and `%xx` where `xx` are hex digits
+       interpolates to character with hex code `xx`; for example `%00`
+       interpolates to `\0` (NUL), `%09` to `\t` (TAB) and %0a to `\n` (LF).
+       --format cannot be combined with `-s`, `-o`, `-k`, `-t`, `--resolve-undo`
+       and `--eol`.
 \--::
        Do not interpret any more arguments as options.
 
@@ -223,6 +230,36 @@ quoted as explained for the configuration variable `core.quotePath`
 (see linkgit:git-config[1]).  Using `-z` the filename is output
 verbatim and the line is terminated by a NUL byte.
 
+It is possible to print in a custom format by using the `--format`
+option, which is able to interpolate different fields using
+a `%(fieldname)` notation. For example, if you only care about the
+"objectname" and "path" fields, you can execute with a specific
+"--format" like
+
+       git ls-files --format='%(objectname) %(path)'
+
+FIELD NAMES
+-----------
+The way each path is shown can be customized by using the
+`--format=<format>` option, where the %(fieldname) in the
+<format> string for various aspects of the index entry are
+interpolated.  The following "fieldname" are understood:
+
+objectmode::
+       The mode of the file which is recorded in the index.
+objectname::
+       The name of the file which is recorded in the index.
+stage::
+       The stage of the file which is recorded in the index.
+eolinfo:index::
+eolinfo:worktree::
+       The <eolinfo> (see the description of the `--eol` option) of
+       the contents in the index or in the worktree for the path.
+eolattr::
+       The <eolattr> (see the description of the `--eol` option)
+       that applies to the path.
+path::
+       The pathname of the file which is recorded in the index.
 
 EXCLUDE PATTERNS
 ----------------
index 3fcfd965fdec62613567278dbd7b08b0080778c7..28060283c703a991d4d2aa36b99b867b8d6c8d43 100644 (file)
@@ -115,6 +115,13 @@ If no such configuration option has been set, `warn` will be used.
 <patch>::
        The patch extracted from e-mail.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/mailinfo.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index e56bad28c6551f0f0a1990f245da4f551df74178..9c630efe19c68ec4b25fee1cf716c5a2073e2f51 100644 (file)
@@ -397,6 +397,13 @@ If you want to customize the background tasks, please rename the tasks
 so future calls to `git maintenance (start|stop)` do not overwrite your
 custom tasks.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/maintenance.txt[]
+
 
 GIT
 ---
index 58731c194229e7e14b4eca27465fd42894d9a710..d6c356740efcaa2ddb7c668f123b4024e3c0f270 100644 (file)
@@ -3,26 +3,237 @@ git-merge-tree(1)
 
 NAME
 ----
-git-merge-tree - Show three-way merge without touching index
+git-merge-tree - Perform merge without touching index or working tree
 
 
 SYNOPSIS
 --------
 [verse]
-'git merge-tree' <base-tree> <branch1> <branch2>
+'git merge-tree' [--write-tree] [<options>] <branch1> <branch2>
+'git merge-tree' [--trivial-merge] <base-tree> <branch1> <branch2> (deprecated)
 
+[[NEWMERGE]]
 DESCRIPTION
 -----------
-Reads three tree-ish, and output trivial merge results and
-conflicting stages to the standard output.  This is similar to
-what three-way 'git read-tree -m' does, but instead of storing the
-results in the index, the command outputs the entries to the
-standard output.
-
-This is meant to be used by higher level scripts to compute
-merge results outside of the index, and stuff the results back into the
-index.  For this reason, the output from the command omits
-entries that match the <branch1> tree.
+
+This command has a modern `--write-tree` mode and a deprecated
+`--trivial-merge` mode.  With the exception of the
+<<DEPMERGE,DEPRECATED DESCRIPTION>> section at the end, the rest of
+this documentation describes modern `--write-tree` mode.
+
+Performs a merge, but does not make any new commits and does not read
+from or write to either the working tree or index.
+
+The performed merge will use the same feature as the "real"
+linkgit:git-merge[1], including:
+
+  * three way content merges of individual files
+  * rename detection
+  * proper directory/file conflict handling
+  * recursive ancestor consolidation (i.e. when there is more than one
+    merge base, creating a virtual merge base by merging the merge bases)
+  * etc.
+
+After the merge completes, a new toplevel tree object is created.  See
+`OUTPUT` below for details.
+
+OPTIONS
+-------
+
+-z::
+       Do not quote filenames in the <Conflicted file info> section,
+       and end each filename with a NUL character rather than
+       newline.  Also begin the messages section with a NUL character
+       instead of a newline.  See <<OUTPUT>> below for more information.
+
+--name-only::
+       In the Conflicted file info section, instead of writing a list
+       of (mode, oid, stage, path) tuples to output for conflicted
+       files, just provide a list of filenames with conflicts (and
+       do not list filenames multiple times if they have multiple
+       conflicting stages).
+
+--[no-]messages::
+       Write any informational messages such as "Auto-merging <path>"
+       or CONFLICT notices to the end of stdout.  If unspecified, the
+       default is to include these messages if there are merge
+       conflicts, and to omit them otherwise.
+
+--allow-unrelated-histories::
+       merge-tree will by default error out if the two branches specified
+       share no common history.  This flag can be given to override that
+       check and make the merge proceed anyway.
+
+[[OUTPUT]]
+OUTPUT
+------
+
+For a successful merge, the output from git-merge-tree is simply one
+line:
+
+       <OID of toplevel tree>
+
+Whereas for a conflicted merge, the output is by default of the form:
+
+       <OID of toplevel tree>
+       <Conflicted file info>
+       <Informational messages>
+
+These are discussed individually below.
+
+[[OIDTLT]]
+OID of toplevel tree
+~~~~~~~~~~~~~~~~~~~~
+
+This is a tree object that represents what would be checked out in the
+working tree at the end of `git merge`.  If there were conflicts, then
+files within this tree may have embedded conflict markers.  This section
+is always followed by a newline (or NUL if `-z` is passed).
+
+[[CFI]]
+Conflicted file info
+~~~~~~~~~~~~~~~~~~~~
+
+This is a sequence of lines with the format
+
+       <mode> <object> <stage> <filename>
+
+The filename will be quoted as explained for the configuration
+variable `core.quotePath` (see linkgit:git-config[1]).  However, if
+the `--name-only` option is passed, the mode, object, and stage will
+be omitted.  If `-z` is passed, the "lines" are terminated by a NUL
+character instead of a newline character.
+
+[[IM]]
+Informational messages
+~~~~~~~~~~~~~~~~~~~~~~
+
+This always starts with a blank line (or NUL if `-z` is passed) to
+separate it from the previous sections, and then has free-form
+messages about the merge, such as:
+
+  * "Auto-merging <file>"
+  * "CONFLICT (rename/delete): <oldfile> renamed...but deleted in..."
+  * "Failed to merge submodule <submodule> (<reason>)"
+  * "Warning: cannot merge binary files: <filename>"
+
+Note that these free-form messages will never have a NUL character
+in or between them, even if -z is passed.  It is simply a large block
+of text taking up the remainder of the output.
+
+EXIT STATUS
+-----------
+
+For a successful, non-conflicted merge, the exit status is 0.  When the
+merge has conflicts, the exit status is 1.  If the merge is not able to
+complete (or start) due to some kind of error, the exit status is
+something other than 0 or 1 (and the output is unspecified).
+
+USAGE NOTES
+-----------
+
+This command is intended as low-level plumbing, similar to
+linkgit:git-hash-object[1], linkgit:git-mktree[1],
+linkgit:git-commit-tree[1], linkgit:git-write-tree[1],
+linkgit:git-update-ref[1], and linkgit:git-mktag[1].  Thus, it can be
+used as a part of a series of steps such as:
+
+       NEWTREE=$(git merge-tree --write-tree $BRANCH1 $BRANCH2)
+       test $? -eq 0 || die "There were conflicts..."
+       NEWCOMMIT=$(git commit-tree $NEWTREE -p $BRANCH1 -p $BRANCH2)
+       git update-ref $BRANCH1 $NEWCOMMIT
+
+Note that when the exit status is non-zero, `NEWTREE` in this sequence
+will contain a lot more output than just a tree.
+
+For conflicts, the output includes the same information that you'd get
+with linkgit:git-merge[1]:
+
+  * what would be written to the working tree (the
+    <<OIDTLT,OID of toplevel tree>>)
+  * the higher order stages that would be written to the index (the
+    <<CFI,Conflicted file info>>)
+  * any messages that would have been printed to stdout (the
+    <<IM,Informational messages>>)
+
+MISTAKES TO AVOID
+-----------------
+
+Do NOT look through the resulting toplevel tree to try to find which
+files conflict; parse the <<CFI,Conflicted file info>> section instead.
+Not only would parsing an entire tree be horrendously slow in large
+repositories, there are numerous types of conflicts not representable by
+conflict markers (modify/delete, mode conflict, binary file changed on
+both sides, file/directory conflicts, various rename conflict
+permutations, etc.)
+
+Do NOT interpret an empty <<CFI,Conflicted file info>> list as a clean
+merge; check the exit status.  A merge can have conflicts without having
+individual files conflict (there are a few types of directory rename
+conflicts that fall into this category, and others might also be added
+in the future).
+
+Do NOT attempt to guess or make the user guess the conflict types from
+the <<CFI,Conflicted file info>> list.  The information there is
+insufficient to do so.  For example: Rename/rename(1to2) conflicts (both
+sides renamed the same file differently) will result in three different
+file having higher order stages (but each only has one higher order
+stage), with no way (short of the <<IM,Informational messages>> section)
+to determine which three files are related.  File/directory conflicts
+also result in a file with exactly one higher order stage.
+Possibly-involved-in-directory-rename conflicts (when
+"merge.directoryRenames" is unset or set to "conflicts") also result in
+a file with exactly one higher order stage.  In all cases, the
+<<IM,Informational messages>> section has the necessary info, though it
+is not designed to be machine parseable.
+
+Do NOT assume that each paths from <<CFI,Conflicted file info>>, and
+the logical conflicts in the <<IM,Informational messages>> have a
+one-to-one mapping, nor that there is a one-to-many mapping, nor a
+many-to-one mapping.  Many-to-many mappings exist, meaning that each
+path can have many logical conflict types in a single merge, and each
+logical conflict type can affect many paths.
+
+Do NOT assume all filenames listed in the <<IM,Informational messages>>
+section had conflicts.  Messages can be included for files that have no
+conflicts, such as "Auto-merging <file>".
+
+AVOID taking the OIDS from the <<CFI,Conflicted file info>> and
+re-merging them to present the conflicts to the user.  This will lose
+information.  Instead, look up the version of the file found within the
+<<OIDTLT,OID of toplevel tree>> and show that instead.  In particular,
+the latter will have conflict markers annotated with the original
+branch/commit being merged and, if renames were involved, the original
+filename.  While you could include the original branch/commit in the
+conflict marker annotations when re-merging, the original filename is
+not available from the <<CFI,Conflicted file info>> and thus you would
+be losing information that might help the user resolve the conflict.
+
+[[DEPMERGE]]
+DEPRECATED DESCRIPTION
+----------------------
+
+Per the <<NEWMERGE,DESCRIPTION>> and unlike the rest of this
+documentation, this section describes the deprecated `--trivial-merge`
+mode.
+
+Other than the optional `--trivial-merge`, this mode accepts no
+options.
+
+This mode reads three tree-ish, and outputs trivial merge results and
+conflicting stages to the standard output in a semi-diff format.
+Since this was designed for higher level scripts to consume and merge
+the results back into the index, it omits entries that match
+<branch1>.  The result of this second form is similar to what
+three-way 'git read-tree -m' does, but instead of storing the results
+in the index, the command outputs the entries to the standard output.
+
+This form not only has limited applicability (a trivial merge cannot
+handle content merges of individual files, rename detection, proper
+directory/file conflict handling, etc.), the output format is also
+difficult to work with, and it will generally be less performant than
+the first form even on successful merges (especially if working in
+large repositories).
 
 GIT
 ---
index 3125473cc1d19140cf54f5286106b86823a9b91f..2d6a1391c89412e66aa1227201bdd89ba2f0f27a 100644 (file)
@@ -90,10 +90,7 @@ invocations. The automated message can include the branch description.
 If `--log` is specified, a shortlog of the commits being merged
 will be appended to the specified message.
 
---rerere-autoupdate::
---no-rerere-autoupdate::
-       Allow the rerere mechanism to update the index with the
-       result of auto-conflict resolution if possible.
+include::rerere-options.txt[]
 
 --overwrite-ignore::
 --no-overwrite-ignore::
@@ -386,13 +383,16 @@ include::merge-strategies.txt[]
 
 CONFIGURATION
 -------------
-include::config/merge.txt[]
 
 branch.<name>.mergeOptions::
        Sets default options for merging into branch <name>. The syntax and
        supported options are the same as those of 'git merge', but option
        values containing whitespace characters are currently not supported.
 
+include::includes/cmd-config-section-rest.txt[]
+
+include::config/merge.txt[]
+
 SEE ALSO
 --------
 linkgit:git-fmt-merge-msg[1], linkgit:git-pull[1],
index f784027bc13724712f902b115abd98bc1c04b43f..c44e205629bf521fbb97c2b49493fcc9b9450932 100644 (file)
@@ -102,6 +102,9 @@ success of the resolution after the custom tool has exited.
 CONFIGURATION
 -------------
 :git-mergetool: 1
+
+include::includes/cmd-config-section-all.txt[]
+
 include::config/mergetool.txt[]
 
 TEMPORARY FILES
index c588fb91af1995c844108cee346512815e39cb3d..a48c3d5ea6301abcdda5df021ac8442ed149f23c 100644 (file)
@@ -128,8 +128,8 @@ $ git multi-pack-index verify
 SEE ALSO
 --------
 See link:technical/multi-pack-index.html[The Multi-Pack-Index Design
-Document] and link:technical/pack-format.html[The Multi-Pack-Index
-Format] for more information on the multi-pack-index feature.
+Document] and linkgit:gitformat-pack[5] for more information on the
+multi-pack-index feature and its file format.
 
 
 GIT
index 0a4200674c40af473d135ccd5df129f28a684864..efbc10f0f598eccd247f13c3e5d1ecf9473a6f78 100644 (file)
@@ -44,7 +44,7 @@ using the `--notes` option. Such notes are added as a patch commentary
 after a three dash separator line.
 
 To change which notes are shown by 'git log', see the
-"notes.displayRef" configuration in linkgit:git-log[1].
+"notes.displayRef" discussion in <<CONFIGURATION>>.
 
 See the "notes.rewrite.<command>" configuration for a way to carry
 notes across commands that rewrite commits.
@@ -307,6 +307,7 @@ with 'git log', so if you use such notes, you'll probably need to write
 some special-purpose tools to do something useful with them.
 
 
+[[CONFIGURATION]]
 CONFIGURATION
 -------------
 
@@ -316,57 +317,9 @@ core.notesRef::
        This setting can be overridden through the environment and
        command line.
 
-notes.mergeStrategy::
-       Which merge strategy to choose by default when resolving notes
-       conflicts.  Must be one of `manual`, `ours`, `theirs`, `union`, or
-       `cat_sort_uniq`.  Defaults to `manual`.  See "NOTES MERGE STRATEGIES"
-       section above for more information on each strategy.
-+
-This setting can be overridden by passing the `--strategy` option.
-
-notes.<name>.mergeStrategy::
-       Which merge strategy to choose when doing a notes merge into
-       refs/notes/<name>.  This overrides the more general
-       "notes.mergeStrategy".  See the "NOTES MERGE STRATEGIES" section above
-       for more information on each available strategy.
-
-notes.displayRef::
-       Which ref (or refs, if a glob or specified more than once), in
-       addition to the default set by `core.notesRef` or
-       `GIT_NOTES_REF`, to read notes from when showing commit
-       messages with the 'git log' family of commands.
-       This setting can be overridden on the command line or by the
-       `GIT_NOTES_DISPLAY_REF` environment variable.
-       See linkgit:git-log[1].
-
-notes.rewrite.<command>::
-       When rewriting commits with <command> (currently `amend` or
-       `rebase`), if this variable is `false`, git will not copy
-       notes from the original to the rewritten commit.  Defaults to
-       `true`.  See also "`notes.rewriteRef`" below.
-+
-This setting can be overridden by the `GIT_NOTES_REWRITE_REF`
-environment variable.
+include::includes/cmd-config-section-rest.txt[]
 
-notes.rewriteMode::
-       When copying notes during a rewrite, what to do if the target
-       commit already has a note.  Must be one of `overwrite`,
-       `concatenate`, `cat_sort_uniq`, or `ignore`.  Defaults to
-       `concatenate`.
-+
-This setting can be overridden with the `GIT_NOTES_REWRITE_MODE`
-environment variable.
-
-notes.rewriteRef::
-       When copying notes during a rewrite, specifies the (fully
-       qualified) ref whose notes should be copied.  May be a glob,
-       in which case notes in all matching refs will be copied.  You
-       may also specify this configuration several times.
-+
-Does not have a default value; you must configure this variable to
-enable note rewriting.
-+
-Can be overridden with the `GIT_NOTES_REWRITE_REF` environment variable.
+include::config/notes.txt[]
 
 
 ENVIRONMENT
index 2f25aa3a291b529be87823765e295934f62484e6..def7657ef9cb71b5da3eb36b2b8f439f172651a3 100644 (file)
@@ -692,6 +692,13 @@ a `git gc` command on the origin repository.
 
 include::transfer-data-leaks.txt[]
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/push.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index fe350d7f4056e280c9740ae9c601f0c7eb2a1119..0b393715d707015a245eb1afe26dfbe044e8fcd6 100644 (file)
@@ -12,6 +12,7 @@ SYNOPSIS
        [--no-dual-color] [--creation-factor=<factor>]
        [--left-only | --right-only]
        ( <range1> <range2> | <rev1>...<rev2> | <base> <rev1> <rev2> )
+       [[--] <path>...]
 
 DESCRIPTION
 -----------
@@ -19,6 +20,9 @@ DESCRIPTION
 This command shows the differences between two versions of a patch
 series, or more generally, two commit ranges (ignoring merge commits).
 
+In the presence of `<path>` arguments, these commit ranges are limited
+accordingly.
+
 To that end, it first finds pairs of commits from both commit ranges
 that correspond with each other. Two commits are said to correspond when
 the diff between their patches (i.e. the author information, the commit
index a872ab0fbd1a9a615dd8d370bb5cdd74f7bab918..9cb8931c7ac8e89e1db036446044633cac00bb39 100644 (file)
@@ -376,10 +376,7 @@ See also INCOMPATIBLE OPTIONS below.
 +
 See also INCOMPATIBLE OPTIONS below.
 
---rerere-autoupdate::
---no-rerere-autoupdate::
-       Allow the rerere mechanism to update the index with the
-       result of auto-conflict resolution if possible.
+include::rerere-options.txt[]
 
 -S[<keyid>]::
 --gpg-sign[=<keyid>]::
@@ -612,6 +609,15 @@ provided. Otherwise an explicit `--no-reschedule-failed-exec` at the
 start would be overridden by the presence of
 `rebase.rescheduleFailedExec=true` configuration.
 
+--update-refs::
+--no-update-refs::
+       Automatically force-update any branches that point to commits that
+       are being rebased. Any branches that are checked out in a worktree
+       are not updated in this way.
++
+If the configuration variable `rebase.updateRefs` is set, then this option
+can be used to override and disable this setting.
+
 INCOMPATIBLE OPTIONS
 --------------------
 
@@ -635,6 +641,7 @@ are incompatible with the following options:
  * --empty=
  * --reapply-cherry-picks
  * --edit-todo
+ * --update-refs
  * --root when used in combination with --onto
 
 In addition, the following pairs of options are incompatible:
@@ -1252,6 +1259,8 @@ merge cmake
 CONFIGURATION
 -------------
 
+include::includes/cmd-config-section-all.txt[]
+
 include::config/rebase.txt[]
 include::config/sequencer.txt[]
 
index 8463fe9cf75a3e9aa9cd2dced9907c6c00a87693..5016755efb61ded8351884a978b3febfc35040e6 100644 (file)
@@ -112,10 +112,7 @@ effect to your index in a row.
        Pass the merge strategy-specific option through to the
        merge strategy.  See linkgit:git-merge[1] for details.
 
---rerere-autoupdate::
---no-rerere-autoupdate::
-       Allow the rerere mechanism to update the index with the
-       result of auto-conflict resolution if possible.
+include::rerere-options.txt[]
 
 --reference::
        Instead of starting the body of the log message with "This
@@ -145,6 +142,13 @@ EXAMPLES
        changes. The revert only modifies the working tree and the
        index.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/revert.txt[]
+
 SEE ALSO
 --------
 linkgit:git-cherry-pick[1]
index 41cd8cb42472a71d93f4f2f6379445f453b7be06..3290043053aa6d16f6a0d63f1c9a04e54e349e36 100644 (file)
@@ -456,41 +456,9 @@ Information
 CONFIGURATION
 -------------
 
-sendemail.aliasesFile::
-       To avoid typing long email addresses, point this to one or more
-       email aliases files.  You must also supply `sendemail.aliasFileType`.
+include::includes/cmd-config-section-all.txt[]
 
-sendemail.aliasFileType::
-       Format of the file(s) specified in sendemail.aliasesFile. Must be
-       one of 'mutt', 'mailrc', 'pine', 'elm', or 'gnus', or 'sendmail'.
-+
-What an alias file in each format looks like can be found in
-the documentation of the email program of the same name. The
-differences and limitations from the standard formats are
-described below:
-+
---
-sendmail;;
-*      Quoted aliases and quoted addresses are not supported: lines that
-       contain a `"` symbol are ignored.
-*      Redirection to a file (`/path/name`) or pipe (`|command`) is not
-       supported.
-*      File inclusion (`:include: /path/name`) is not supported.
-*      Warnings are printed on the standard error output for any
-       explicitly unsupported constructs, and any other lines that are not
-       recognized by the parser.
---
-
-sendemail.multiEdit::
-       If true (default), a single editor instance will be spawned to edit
-       files you have to edit (patches when `--annotate` is used, and the
-       summary when `--compose` is used). If false, files will be edited one
-       after the other, spawning a new editor each time.
-
-sendemail.confirm::
-       Sets the default for whether to confirm before sending. Must be
-       one of 'always', 'never', 'cc', 'compose', or 'auto'. See `--confirm`
-       in the previous section for the meaning of these values.
+include::config/sendemail.txt[]
 
 EXAMPLES
 --------
index 5cc2fcefbab1bcc80b33192e7ac06a3a5a92efd2..e5ec6b467f9f3f765d31949725ddc0c7c634391f 100644 (file)
@@ -199,6 +199,13 @@ shows 10 reflog entries going back from the tip as of 1 hour ago.
 Without `--list`, the output also shows how these tips are
 topologically related with each other.
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/showbranch.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index 6e15f47525765ce3da66b793ab519abd1cd90784..c5d70918283836d9a57c1af4710ce69fba78e3f1 100644 (file)
@@ -382,6 +382,13 @@ grep commit | cut -d\  -f3 |
 xargs git log --merges --no-walk --grep=WIP
 ----------------------------------------------------------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/stash.txt[]
+
 
 SEE ALSO
 --------
index bbcbdceb459c2828e0eb46110d038137be1f6f92..c60fc9c138b5981ce5727efa4482350e699dbac5 100644 (file)
@@ -265,6 +265,13 @@ always create a new name for it (without switching away):
 $ git switch -c good-surprises
 ------------
 
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/checkout.txt[]
+
 SEE ALSO
 --------
 linkgit:git-checkout[1],
index 5ea2f2c60e45a3c91422fcaa2457c100f450fb71..f4bb9c5daf95c6669210429d7a560565dadc91d5 100644 (file)
@@ -420,7 +420,7 @@ as `switch`, `pull`, `merge`) will avoid writing these files.
 However, these commands will sometimes write these files anyway in
 important cases such as conflicts during a merge or rebase.  Git
 commands will also avoid treating the lack of such files as an
-intentional deletion; for example `git add -u` will not not stage a
+intentional deletion; for example `git add -u` will not stage a
 deletion for these files and `git commit -a` will not make a commit
 deleting them either.
 
index 8f87b23ea86a3dfecb2a736d943a6728b6c9f767..b656b4756752f6d91098b85d2766a80e86ca092a 100644 (file)
@@ -39,10 +39,9 @@ OPTIONS
 --http-backend-info-refs::
        Used by linkgit:git-http-backend[1] to serve up
        `$GIT_URL/info/refs?service=git-upload-pack` requests. See
-       "Smart Clients" in link:technical/http-protocol.html[the HTTP
-       transfer protocols] documentation and "HTTP Transport" in
-       link:technical/protocol-v2.html[the Git Wire Protocol, Version
-       2] documentation. Also understood by
+       "Smart Clients" in linkgit:gitprotocol-http[5] and "HTTP
+       Transport" in the linkgit:gitprotocol-v2[5]
+       documentation. Also understood by
        linkgit:git-receive-pack[1].
 
 <directory>::
index 47a6095ff40f2c3f2f1d8f5684c92dd19b616b83..0ef7f5e4ecebf5480e7878d10f73b6d20d98c7ed 100644 (file)
@@ -339,6 +339,23 @@ The following documentation pages are guides about Git concepts.
 
 include::cmds-guide.txt[]
 
+Repository, command and file interfaces
+---------------------------------------
+
+This documentation discusses repository and command interfaces which
+users are expected to interact with directly. See `--user-formats` in
+linkgit:git-help[1] for more details on the critera.
+
+include::cmds-userinterfaces.txt[]
+
+File formats, protocols and other developer interfaces
+------------------------------------------------------
+
+This documentation discusses file formats, over-the-wire protocols and
+other git developer interfaces. See `--developer-interfaces` in
+linkgit:git-help[1].
+
+include::cmds-developerinterfaces.txt[]
 
 Configuration Mechanism
 -----------------------
similarity index 79%
rename from Documentation/technical/bundle-format.txt
rename to Documentation/gitformat-bundle.txt
index b9be8644cf5d53161b7190f580be935b28d402a3..00e0a20e6571969ebeabca8da5d9815be65d682b 100644 (file)
@@ -1,11 +1,33 @@
-= Git bundle v2 format
+gitformat-bundle(5)
+===================
 
-The Git bundle format is a format that represents both refs and Git objects.
+NAME
+----
+gitformat-bundle - The bundle file format
+
+
+SYNOPSIS
+--------
+[verse]
+*.bundle
+*.bdl
+
+DESCRIPTION
+-----------
+
+The Git bundle format is a format that represents both refs and Git
+objects. A bundle is a header in a format similar to
+linkgit:git-show-ref[1] followed by a pack in *.pack format.
 
-== Format
+The format is created and read by the linkgit:git-bundle[1] command,
+and supported by e.g. linkgit:git-fetch[1] and linkgit:git-clone[1].
+
+
+FORMAT
+------
 
 We will use ABNF notation to define the Git bundle format. See
-protocol-common.txt for the details.
+linkgit:gitprotocol-common[5] for the details.
 
 A v2 bundle looks like this:
 
@@ -36,7 +58,9 @@ value        = *(%01-09 / %0b-FF)
 pack         = ... ; packfile
 ----
 
-== Semantics
+
+SEMANTICS
+---------
 
 A Git bundle consists of several parts.
 
@@ -62,13 +86,15 @@ In the bundle format, there can be a comment following a prerequisite obj-id.
 This is a comment and it has no specific meaning. The writer of the bundle MAY
 put any string here. The reader of the bundle MUST ignore the comment.
 
-=== Note on the shallow clone and a Git bundle
+Note on the shallow clone and a Git bundle
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Note that the prerequisites does not represent a shallow-clone boundary. The
 semantics of the prerequisites and the shallow-clone boundaries are different,
 and the Git bundle v2 format cannot represent a shallow clone repository.
 
-== Capabilities
+CAPABILITIES
+------------
 
 Because there is no opportunity for negotiation, unknown capabilities cause 'git
 bundle' to abort.
@@ -79,3 +105,7 @@ bundle' to abort.
 * `filter` specifies an object filter as in the `--filter` option in
   linkgit:git-rev-list[1]. The resulting pack-file must be marked as a
   `.promisor` pack-file after it is unbundled.
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 89%
rename from Documentation/technical/chunk-format.txt
rename to Documentation/gitformat-chunk.txt
index 593614fcedab47336926c81b3c1708c7358edd01..57202ede273ad266be910045294fd74853dd193c 100644 (file)
@@ -1,12 +1,25 @@
-Chunk-based file formats
-========================
+gitformat-chunk(5)
+==================
+
+NAME
+----
+gitformat-chunk - Chunk-based file formats
+
+SYNOPSIS
+--------
+
+Used by linkgit:gitformat-commit-graph[5] and the "MIDX" format (see
+the pack format documentation in linkgit:gitformat-pack[5]).
+
+DESCRIPTION
+-----------
 
 Some file formats in Git use a common concept of "chunks" to describe
 sections of the file. This allows structured access to a large file by
 scanning a small "table of contents" for the remaining data. This common
 format is used by the `commit-graph` and `multi-pack-index` files. See
-link:technical/pack-format.html[the `multi-pack-index` format] and
-link:technical/commit-graph-format.html[the `commit-graph` format] for
+the `multi-pack-index` format in linkgit:gitformat-pack[5] and
+the `commit-graph` format in linkgit:gitformat-commit-graph[5] for
 how they use the chunks to describe structured data.
 
 A chunk-based file format begins with some header information custom to
@@ -108,9 +121,13 @@ for future formats:
 * *commit-graph:* see `write_commit_graph_file()` and `parse_commit_graph()`
   in `commit-graph.c` for how the chunk-format API is used to write and
   parse the commit-graph file format documented in
-  link:technical/commit-graph-format.html[the commit-graph file format].
+  the commit-graph file format in linkgit:gitformat-commit-graph[5].
 
 * *multi-pack-index:* see `write_midx_internal()` and `load_multi_pack_index()`
   in `midx.c` for how the chunk-format API is used to write and
   parse the multi-pack-index file format documented in
-  link:technical/pack-format.html[the multi-pack-index file format].
+  the multi-pack-index file format section of linkgit:gitformat-pack[5].
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 87%
rename from Documentation/technical/commit-graph-format.txt
rename to Documentation/gitformat-commit-graph.txt
index 484b185ba98bdc00efc147851abdcbe4958ee21f..7324665716d7db7f352354f80c61406629264e95 100644 (file)
@@ -1,5 +1,18 @@
-Git commit graph format
-=======================
+gitformat-commit-graph(5)
+=========================
+
+NAME
+----
+gitformat-commit-graph - Git commit graph format
+
+SYNOPSIS
+--------
+[verse]
+$GIT_DIR/objects/info/commit-graph
+$GIT_DIR/objects/info/commit-graphs/*
+
+DESCRIPTION
+-----------
 
 The Git commit graph stores a list of commit OIDs and some associated
 metadata, including:
@@ -30,7 +43,7 @@ and hash type.
 
 All multi-byte numbers are in network byte order.
 
-HEADER:
+=== HEADER:
 
   4-byte signature:
       The signature is: {'C', 'G', 'P', 'H'}
@@ -52,7 +65,7 @@ HEADER:
       We infer the length (H*B) of the Base Graphs chunk
       from this value.
 
-CHUNK LOOKUP:
+=== CHUNK LOOKUP:
 
   (C + 1) * 12 bytes listing the table of contents for the chunks:
       First 4 bytes describe the chunk id. Value 0 is a terminating label.
@@ -62,23 +75,23 @@ CHUNK LOOKUP:
       ID appears at most once.
 
   The CHUNK LOOKUP matches the table of contents from
-  link:technical/chunk-format.html[the chunk-based file format].
+  the chunk-based file format, see linkgit:gitformat-chunk[5]
 
   The remaining data in the body is described one chunk at a time, and
   these chunks may be given in any order. Chunks are required unless
   otherwise specified.
 
-CHUNK DATA:
+=== CHUNK DATA:
 
-  OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+==== OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
       The ith entry, F[i], stores the number of OIDs with first
       byte at most i. Thus F[255] stores the total
       number of commits (N).
 
-  OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+====  OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
       The OIDs for all commits in the graph, sorted in ascending order.
 
-  Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
+====  Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
     * The first H bytes are for the OID of the root tree.
     * The next 8 bytes are for the positions of the first two parents
       of the ith commit. Stores value 0x70000000 if no parent in that
@@ -93,7 +106,7 @@ CHUNK DATA:
       2 bits of the lowest byte, storing the 33rd and 34th bit of the
       commit time.
 
-  Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional]
+==== Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional]
     * This list of 4-byte values store corrected commit date offsets for the
       commits, arranged in the same order as commit data chunk.
     * If the corrected commit date offset cannot be stored within 31 bits,
@@ -104,7 +117,7 @@ CHUNK DATA:
       by compatible versions of Git and in case of split commit-graph chains,
       the topmost layer also has Generation Data chunk.
 
-  Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional]
+==== Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional]
     * This list of 8-byte values stores the corrected commit date offsets
       for commits with corrected commit date offsets that cannot be
       stored within 31 bits.
@@ -112,7 +125,7 @@ CHUNK DATA:
       chunk is present and atleast one corrected commit date offset cannot
       be stored within 31 bits.
 
-  Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+==== Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
       This list of 4-byte values store the second through nth parents for
       all octopus merges. The second parent value in the commit data stores
       an array position within this list along with the most-significant bit
@@ -120,14 +133,14 @@ CHUNK DATA:
       positions for the parents until reaching a value with the most-significant
       bit on. The other bits correspond to the position of the last parent.
 
-  Bloom Filter Index (ID: {'B', 'I', 'D', 'X'}) (N * 4 bytes) [Optional]
+==== Bloom Filter Index (ID: {'B', 'I', 'D', 'X'}) (N * 4 bytes) [Optional]
     * The ith entry, BIDX[i], stores the number of bytes in all Bloom filters
       from commit 0 to commit i (inclusive) in lexicographic order. The Bloom
       filter for the i-th commit spans from BIDX[i-1] to BIDX[i] (plus header
       length), where BIDX[-1] is 0.
     * The BIDX chunk is ignored if the BDAT chunk is not present.
 
-  Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional]
+==== Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional]
     * It starts with header consisting of three unsigned 32-bit integers:
       - Version of the hash algorithm being used. We currently only support
        value 1 which corresponds to the 32-bit version of the murmur3 hash
@@ -147,13 +160,13 @@ CHUNK DATA:
       of length one, with either all bits set to zero or one respectively.
     * The BDAT chunk is present if and only if BIDX is present.
 
-  Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional]
+==== Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional]
       This list of H-byte hashes describe a set of B commit-graph files that
       form a commit-graph chain. The graph position for the ith commit in this
       file's OID Lookup chunk is equal to i plus the number of commits in all
       base graphs.  If B is non-zero, this chunk must exist.
 
-TRAILER:
+=== TRAILER:
 
        H-byte HASH-checksum of all of the above.
 
@@ -164,3 +177,7 @@ the number '2' in their chunk IDs because a previous version of Git wrote
 possibly erroneous data in these chunks with the IDs "GDAT" and "GDOV". By
 changing the IDs, newer versions of Git will silently ignore those older
 chunks and write the new information without trusting the incorrect data.
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 98%
rename from Documentation/technical/index-format.txt
rename to Documentation/gitformat-index.txt
index f691c20ab0a14dc8ecf18405d54c5572905d1606..015cb21bdc089a30e5403877ca1c61bea2b04e24 100644 (file)
@@ -1,5 +1,19 @@
+gitformat-index(5)
+==================
+
+NAME
+----
+gitformat-index - Git index format
+
+SYNOPSIS
+--------
+[verse]
+$GIT_DIR/index
+
+DESCRIPTION
+-----------
+
 Git index format
-================
 
 == The Git index file has the following format
 
@@ -125,7 +139,7 @@ Git index format
     entry is encoded as if the path name for the previous entry is an
     empty string).  At the beginning of an entry, an integer N in the
     variable width encoding (the same encoding as the offset is encoded
-    for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
+    for OFS_DELTA pack entries; see linkgit:gitformat-pack[5]) is stored, followed
     by a NUL-terminated string S.  Removing N bytes from the end of the
     path name for the previous entry, and replacing it with the string S
     yields the path name for this entry.
@@ -402,3 +416,7 @@ The remaining data of each directory block is grouped by type:
   with signature { 's', 'd', 'i', 'r' }. Like the split-index extension,
   tools should avoid interacting with a sparse index unless they understand
   this extension.
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 72%
rename from Documentation/technical/pack-format.txt
rename to Documentation/gitformat-pack.txt
index b520aa9c45bf6cf9b12d6ef798658c298b1888a2..e06af02f211e7a7a8488d1470d7f76dd58e5989c 100644 (file)
@@ -1,5 +1,30 @@
-Git pack format
-===============
+gitformat-pack(5)
+=================
+
+NAME
+----
+gitformat-pack - Git pack format
+
+
+SYNOPSIS
+--------
+[verse]
+$GIT_DIR/objects/pack/pack-*.{pack,idx}
+$GIT_DIR/objects/pack/pack-*.rev
+$GIT_DIR/objects/pack/pack-*.mtimes
+$GIT_DIR/objects/pack/multi-pack-index
+
+DESCRIPTION
+-----------
+
+The Git pack format is now Git stores most of its primary repository
+data. Over the lietime af a repository loose objects (if any) and
+smaller packs are consolidated into larger pack(s). See
+linkgit:git-gc[1] and linkgit:git-pack-objects[1].
+
+The pack format is also used over-the-wire, see
+e.g. linkgit:gitprotocol-v2[5], as well as being a part of
+other container formats in the case of linkgit:gitformat-bundle[5].
 
 == Checksums and object IDs
 
@@ -356,7 +381,7 @@ CHUNK LOOKUP:
            using the next chunk position if necessary.)
 
        The CHUNK LOOKUP matches the table of contents from
-       link:technical/chunk-format.html[the chunk-based file format].
+       the chunk-based file format, see linkgit:gitformat-chunk[5].
 
        The remaining data in the body is described one chunk at a time, and
        these chunks may be given in any order. Chunks are required unless
@@ -482,3 +507,132 @@ packs arranged in MIDX order (with the preferred pack coming first).
 
 The MIDX's reverse index is stored in the optional 'RIDX' chunk within
 the MIDX itself.
+
+== cruft packs
+
+The cruft packs feature offer an alternative to Git's traditional mechanism of
+removing unreachable objects. This document provides an overview of Git's
+pruning mechanism, and how a cruft pack can be used instead to accomplish the
+same.
+
+=== Background
+
+To remove unreachable objects from your repository, Git offers `git repack -Ad`
+(see linkgit:git-repack[1]). Quoting from the documentation:
+
+----
+[...] unreachable objects in a previous pack become loose, unpacked objects,
+instead of being left in the old pack. [...] loose unreachable objects will be
+pruned according to normal expiry rules with the next 'git gc' invocation.
+----
+
+Unreachable objects aren't removed immediately, since doing so could race with
+an incoming push which may reference an object which is about to be deleted.
+Instead, those unreachable objects are stored as loose objects and stay that way
+until they are older than the expiration window, at which point they are removed
+by linkgit:git-prune[1].
+
+Git must store these unreachable objects loose in order to keep track of their
+per-object mtimes. If these unreachable objects were written into one big pack,
+then either freshening that pack (because an object contained within it was
+re-written) or creating a new pack of unreachable objects would cause the pack's
+mtime to get updated, and the objects within it would never leave the expiration
+window. Instead, objects are stored loose in order to keep track of the
+individual object mtimes and avoid a situation where all cruft objects are
+freshened at once.
+
+This can lead to undesirable situations when a repository contains many
+unreachable objects which have not yet left the grace period. Having large
+directories in the shards of `.git/objects` can lead to decreased performance in
+the repository. But given enough unreachable objects, this can lead to inode
+starvation and degrade the performance of the whole system. Since we
+can never pack those objects, these repositories often take up a large amount of
+disk space, since we can only zlib compress them, but not store them in delta
+chains.
+
+=== Cruft packs
+
+A cruft pack eliminates the need for storing unreachable objects in a loose
+state by including the per-object mtimes in a separate file alongside a single
+pack containing all loose objects.
+
+A cruft pack is written by `git repack --cruft` when generating a new pack.
+linkgit:git-pack-objects[1]'s `--cruft` option. Note that `git repack --cruft`
+is a classic all-into-one repack, meaning that everything in the resulting pack is
+reachable, and everything else is unreachable. Once written, the `--cruft`
+option instructs `git repack` to generate another pack containing only objects
+not packed in the previous step (which equates to packing all unreachable
+objects together). This progresses as follows:
+
+  1. Enumerate every object, marking any object which is (a) not contained in a
+     kept-pack, and (b) whose mtime is within the grace period as a traversal
+     tip.
+
+  2. Perform a reachability traversal based on the tips gathered in the previous
+     step, adding every object along the way to the pack.
+
+  3. Write the pack out, along with a `.mtimes` file that records the per-object
+     timestamps.
+
+This mode is invoked internally by linkgit:git-repack[1] when instructed to
+write a cruft pack. Crucially, the set of in-core kept packs is exactly the set
+of packs which will not be deleted by the repack; in other words, they contain
+all of the repository's reachable objects.
+
+When a repository already has a cruft pack, `git repack --cruft` typically only
+adds objects to it. An exception to this is when `git repack` is given the
+`--cruft-expiration` option, which allows the generated cruft pack to omit
+expired objects instead of waiting for linkgit:git-gc[1] to expire those objects
+later on.
+
+It is linkgit:git-gc[1] that is typically responsible for removing expired
+unreachable objects.
+
+=== Caution for mixed-version environments
+
+Repositories that have cruft packs in them will continue to work with any older
+version of Git. Note, however, that previous versions of Git which do not
+understand the `.mtimes` file will use the cruft pack's mtime as the mtime for
+all of the objects in it. In other words, do not expect older (pre-cruft pack)
+versions of Git to interpret or even read the contents of the `.mtimes` file.
+
+Note that having mixed versions of Git GC-ing the same repository can lead to
+unreachable objects never being completely pruned. This can happen under the
+following circumstances:
+
+  - An older version of Git running GC explodes the contents of an existing
+    cruft pack loose, using the cruft pack's mtime.
+  - A newer version running GC collects those loose objects into a cruft pack,
+    where the .mtime file reflects the loose object's actual mtimes, but the
+    cruft pack mtime is "now".
+
+Repeating this process will lead to unreachable objects not getting pruned as a
+result of repeatedly resetting the objects' mtimes to the present time.
+
+If you are GC-ing repositories in a mixed version environment, consider omitting
+the `--cruft` option when using linkgit:git-repack[1] and linkgit:git-gc[1], and
+leaving the `gc.cruftPacks` configuration unset until all writers understand
+cruft packs.
+
+=== Alternatives
+
+Notable alternatives to this design include:
+
+  - The location of the per-object mtime data, and
+  - Storing unreachable objects in multiple cruft packs.
+
+On the location of mtime data, a new auxiliary file tied to the pack was chosen
+to avoid complicating the `.idx` format. If the `.idx` format were ever to gain
+support for optional chunks of data, it may make sense to consolidate the
+`.mtimes` format into the `.idx` itself.
+
+Storing unreachable objects among multiple cruft packs (e.g., creating a new
+cruft pack during each repacking operation including only unreachable objects
+which aren't already stored in an earlier cruft pack) is significantly more
+complicated to construct, and so aren't pursued here. The obvious drawback to
+the current implementation is that the entire cruft pack must be re-written from
+scratch.
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 96%
rename from Documentation/technical/signature-format.txt
rename to Documentation/gitformat-signature.txt
index 166721be6f648cdac8d0fc960e121bafc02cd226..a249869fafaa6b10e52c55c7187efb8d2e212e8e 100644 (file)
@@ -1,7 +1,18 @@
-Git signature format
-====================
+gitformat-signature(5)
+======================
 
-== Overview
+NAME
+----
+gitformat-signature - Git cryptographic signature formats
+
+SYNOPSIS
+--------
+[verse]
+<[tag|commit] object header(s)>
+<over-the-wire protocol>
+
+DESCRIPTION
+-----------
 
 Git uses cryptographic signatures in various places, currently objects (tags,
 commits, mergetags) and transactions (pushes). In every case, the command which
@@ -200,3 +211,7 @@ Date:   Wed Jun 15 09:13:29 2016 +0000
     # gpg:          There is no indication that the signature belongs to the owner.
     # Primary key fingerprint: D4BE 2231 1AD3 131E 5EDA  29A4 6109 2E85 B722 7189
 ----
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 96%
rename from Documentation/technical/protocol-capabilities.txt
rename to Documentation/gitprotocol-capabilities.txt
index 9dfade930dafc471aa3f684b0b5c1dc6feba6e2c..c6dcc7d565d7f0687cbf0bb2226b0288db968996 100644 (file)
@@ -1,8 +1,20 @@
-Git Protocol Capabilities
-=========================
+gitprotocol-capabilities(5)
+===========================
+
+NAME
+----
+gitprotocol-capabilities - Protocol v0 and v1 capabilities
+
+SYNOPSIS
+--------
+[verse]
+<over-the-wire-protocol>
+
+DESCRIPTION
+-----------
 
 NOTE: this document describes capabilities for versions 0 and 1 of the pack
-protocol. For version 2, please refer to the link:protocol-v2.html[protocol-v2]
+protocol. For version 2, please refer to the linkgit:gitprotocol-v2[5]
 doc.
 
 Servers SHOULD support all capabilities defined in this document.
@@ -77,7 +89,7 @@ interleaved with S-R-Q.
 multi_ack_detailed
 ------------------
 This is an extension of multi_ack that permits client to better
-understand the server's in-memory state. See pack-protocol.txt,
+understand the server's in-memory state. See linkgit:gitprotocol-pack[5],
 section "Packfile Negotiation" for more information.
 
 no-done
@@ -281,7 +293,7 @@ a packfile upload and reference update.  If the pushing client requests
 this capability, after unpacking and updating references the server
 will respond with whether the packfile unpacked successfully and if
 each reference was updated successfully.  If any of those were not
-successful, it will send back an error message.  See pack-protocol.txt
+successful, it will send back an error message.  See linkgit:gitprotocol-pack[5]
 for example messages.
 
 report-status-v2
@@ -292,7 +304,7 @@ adding new "option" directives in order to support reference rewritten by
 the "proc-receive" hook.  The "proc-receive" hook may handle a command
 for a pseudo-reference which may create or update a reference with
 different name, new-oid, and old-oid.  While the capability
-'report-status' cannot report for such case.  See pack-protocol.txt
+'report-status' cannot report for such case.  See linkgit:gitprotocol-pack[5]
 for details.
 
 delete-refs
@@ -378,3 +390,7 @@ packet-line, and must not contain non-printable or whitespace characters. The
 current implementation uses trace2 session IDs (see
 link:api-trace2.html[api-trace2] for details), but this may change and users of
 the session ID should not rely on this fact.
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 89%
rename from Documentation/technical/protocol-common.txt
rename to Documentation/gitprotocol-common.txt
index ecedb34bba54ecf8105336d5a98ca349ff35c63c..1486651bd1002f3c121c703caae154caa822fedc 100644 (file)
@@ -1,5 +1,20 @@
-Documentation Common to Pack and Http Protocols
-===============================================
+gitprotocol-common(5)
+=====================
+
+NAME
+----
+gitprotocol-common - Things common to various protocols
+
+SYNOPSIS
+--------
+[verse]
+<over-the-wire-protocol>
+
+DESCRIPTION
+-----------
+
+This document sets defines things common to various over-the-wire
+protocols and file formats used in Git.
 
 ABNF Notation
 -------------
@@ -97,3 +112,7 @@ Examples (as C-style strings):
   "000bfoobar\n"    "foobar\n"
   "0004"            ""
 ----
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 97%
rename from Documentation/technical/http-protocol.txt
rename to Documentation/gitprotocol-http.txt
index cc5126cfedaac2e14e3de48d9cafeb3c58bbb03e..ccc13f0a40758ac5f8268472354998d63331bf92 100644 (file)
@@ -1,5 +1,19 @@
-HTTP transfer protocols
-=======================
+gitprotocol-http(5)
+===================
+
+NAME
+----
+gitprotocol-http - Git HTTP-based protocols
+
+
+SYNOPSIS
+--------
+[verse]
+<over-the-wire-protocol>
+
+
+DESCRIPTION
+-----------
 
 Git supports two HTTP based transfer protocols.  A "dumb" protocol
 which requires only a standard HTTP server on the server end of the
@@ -222,7 +236,7 @@ smart server reply:
    S: 0000
 
 The client may send Extra Parameters (see
-Documentation/technical/pack-protocol.txt) as a colon-separated string
+linkgit:gitprotocol-pack[5]) as a colon-separated string
 in the Git-Protocol HTTP header.
 
 Uses the `--http-backend-info-refs` option to
@@ -512,11 +526,18 @@ the id obtained through ref discovery as old_id.
 
 TODO: Document this further.
 
-
-References
+REFERENCES
 ----------
 
 http://www.ietf.org/rfc/rfc1738.txt[RFC 1738: Uniform Resource Locators (URL)]
 http://www.ietf.org/rfc/rfc2616.txt[RFC 2616: Hypertext Transfer Protocol -- HTTP/1.1]
-link:technical/pack-protocol.html
-link:technical/protocol-capabilities.html
+
+SEE ALSO
+--------
+
+linkgit:gitprotocol-pack[5]
+linkgit:gitprotocol-capabilities[5]
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 98%
rename from Documentation/technical/pack-protocol.txt
rename to Documentation/gitprotocol-pack.txt
index e13a2c064d1244fc8926ee4831bc83186e0e514d..dd4108b7a3b95456e0887fc41aed4e381261bd41 100644 (file)
@@ -1,11 +1,23 @@
-Packfile transfer protocols
-===========================
+gitprotocol-pack(5)
+===================
+
+NAME
+----
+gitprotocol-pack - How packs are transferred over-the-wire
+
+SYNOPSIS
+--------
+[verse]
+<over-the-wire-protocol>
+
+DESCRIPTION
+-----------
 
 Git supports transferring data in packfiles over the ssh://, git://, http:// and
 file:// transports.  There exist two sets of protocols, one for pushing
 data from a client to a server and another for fetching data from a
 server to a client.  The three transports (ssh, git, file) use the same
-protocol to transfer data. http is documented in http-protocol.txt.
+protocol to transfer data. http is documented in linkgit:gitprotocol-http[5].
 
 The processes invoked in the canonical Git implementation are 'upload-pack'
 on the server side and 'fetch-pack' on the client side for fetching data;
@@ -18,7 +30,7 @@ pkt-line Format
 ---------------
 
 The descriptions below build on the pkt-line format described in
-protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
+linkgit:gitprotocol-common[5]. When the grammar indicate `PKT-LINE(...)`, unless
 otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
 include a LF, but the receiver MUST NOT complain if it is not present.
 
@@ -60,7 +72,7 @@ Each Extra Parameter takes the form of `<key>=<value>` or `<key>`.
 
 Servers that receive any such Extra Parameters MUST ignore all
 unrecognized keys. Currently, the only Extra Parameter recognized is
-"version" with a value of '1' or '2'.  See protocol-v2.txt for more
+"version" with a value of '1' or '2'.  See linkgit:gitprotocol-v2[5] for more
 information on protocol version 2.
 
 Git Transport
@@ -455,7 +467,7 @@ Now that the client and server have finished negotiation about what
 the minimal amount of data that needs to be sent to the client is, the server
 will construct and send the required data in packfile format.
 
-See pack-format.txt for what the packfile itself actually looks like.
+See linkgit:gitformat-pack[5] for what the packfile itself actually looks like.
 
 If 'side-band' or 'side-band-64k' capabilities have been specified by
 the client, the server will send the packfile data multiplexed.
@@ -707,3 +719,7 @@ An example client/server communication might look like this:
    S: 0018ok refs/heads/debug\n
    S: 002ang refs/heads/master non-fast-forward\n
 ----
+
+GIT
+---
+Part of the linkgit:git[1] suite
similarity index 97%
rename from Documentation/technical/protocol-v2.txt
rename to Documentation/gitprotocol-v2.txt
index 8a877d27e23803686632e223cbc4ba7f4ac0ab79..c9c0f9160b22e4324e2414cebabe951ae175ad1c 100644 (file)
@@ -1,5 +1,17 @@
-Git Wire Protocol, Version 2
-============================
+gitprotocol-v2(5)
+=================
+
+NAME
+----
+gitprotocol-v2 - Git Wire Protocol, Version 2
+
+SYNOPSIS
+--------
+[verse]
+<over-the-wire-protocol>
+
+DESCRIPTION
+-----------
 
 This document presents a specification for a version 2 of Git's wire
 protocol.  Protocol v2 will improve upon v1 in the following ways:
@@ -26,8 +38,7 @@ Packet-Line Framing
 -------------------
 
 All communication is done using packet-line framing, just as in v1.  See
-`Documentation/technical/pack-protocol.txt` and
-`Documentation/technical/protocol-common.txt` for more information.
+linkgit:gitprotocol-pack[5] and linkgit:gitprotocol-common[5] for more information.
 
 In protocol v2 these special packets will have the following semantics:
 
@@ -42,7 +53,7 @@ Initial Client Request
 In general a client can request to speak protocol v2 by sending
 `version=2` through the respective side-channel for the transport being
 used which inevitably sets `GIT_PROTOCOL`.  More information can be
-found in `pack-protocol.txt` and `http-protocol.txt`, as well as the
+found in linkgit:gitprotocol-pack[5] and linkgit:gitprotocol-http[5], as well as the
 `GIT_PROTOCOL` definition in `git.txt`. In all cases the
 response from the server is the capability advertisement.
 
@@ -66,7 +77,7 @@ HTTP Transport
 ~~~~~~~~~~~~~~
 
 When using the http:// or https:// transport a client makes a "smart"
-info/refs request as described in `http-protocol.txt` and requests that
+info/refs request as described in linkgit:gitprotocol-http[5] and requests that
 v2 be used by supplying "version=2" in the `Git-Protocol` header.
 
    C: GET $GIT_URL/info/refs?service=git-upload-pack HTTP/1.0
@@ -566,3 +577,7 @@ and associated requested information, each separated by a single space.
        attr = "size"
 
        obj-info = obj-id SP obj-size
+
+GIT
+---
+Part of the linkgit:git[1] suite
index 6f1e269ae43e00d631feb8798a2a95aef48e8287..ed8da428c98bc96cafdbbcc8543ed0c8479a13a3 100644 (file)
@@ -168,6 +168,9 @@ Supported commands: 'list', 'import'.
        Can guarantee that when a clone is requested, the received
        pack is self contained and is connected.
 
+'get'::
+       Can use the 'get' command to download a file from a given URI.
+
 If a helper advertises 'connect', Git will use it if possible and
 fall back to another capability if the helper requests so when
 connecting (see the 'connect' command under COMMANDS).
@@ -418,6 +421,12 @@ Supported if the helper has the "connect" capability.
 +
 Supported if the helper has the "stateless-connect" capability.
 
+'get' <uri> <path>::
+       Downloads the file from the given `<uri>` to the given `<path>`. If
+       `<path>.temp` exists, then Git assumes that the `.temp` file is a
+       partial download from a previous attempt and will resume the
+       download from that position.
+
 If a fatal error occurs, the program writes the error message to
 stderr and exits. The caller should expect that a suitable error
 message has been printed if the child closes the connection without
index 8994e2559eac0c5746ca898b0bad1946a7b83298..5efb4fe81ff120e50027b836e85c8ee205923821 100644 (file)
@@ -68,7 +68,7 @@ Note that the "object" file isn't fit for feeding straight to zlib; it
 has the git packed object header, which is variable-length. We want to
 strip that off so we can start playing with the zlib data directly. You
 can either work your way through it manually (the format is described in
-link:../technical/pack-format.html[Documentation/technical/pack-format.txt]),
+linkgit:gitformat-pack[5]),
 or you can walk through it in a debugger. I did the latter, creating a
 valid pack like:
 
diff --git a/Documentation/includes/cmd-config-section-all.txt b/Documentation/includes/cmd-config-section-all.txt
new file mode 100644 (file)
index 0000000..296a239
--- /dev/null
@@ -0,0 +1,3 @@
+Everything below this line in this section is selectively included
+from the linkgit:git-config[1] documentation. The content is the same
+as what's found there:
diff --git a/Documentation/includes/cmd-config-section-rest.txt b/Documentation/includes/cmd-config-section-rest.txt
new file mode 100644 (file)
index 0000000..b1e7682
--- /dev/null
@@ -0,0 +1,3 @@
+Everything above this line in this section isn't included from the
+linkgit:git-config[1] documentation. The content that follows is the
+same as what's found there:
index 425377dfeb7fc0856773fd5eaaac7dd3b7b1c999..02408a0062f0c8c380847b32592aae5671ca85de 100755 (executable)
@@ -32,6 +32,9 @@ my %SECTIONS;
                'SEE ALSO' => {
                        order => $order++,
                },
+               'FILE FORMAT' => {
+                       order => $order++,
+               },
                'GIT' => {
                        required => 1,
                        order => $order++,
diff --git a/Documentation/rerere-options.txt b/Documentation/rerere-options.txt
new file mode 100644 (file)
index 0000000..c3321dd
--- /dev/null
@@ -0,0 +1,9 @@
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+       After the rerere mechanism reuses a recorded resolution on
+       the current conflict to update the files in the working
+       tree, allow it to also update the index with the result of
+       resolution.  `--no-rerere-autoupdate` is a good way to
+       double-check what `rerere` did and catch potential
+       mismerges, before committing the result to the index with a
+       separate `git add`.
index 195e74eec633ea913c0934d2b690b674360376d7..1837509566a79a36a5f607837bc13af082d81b86 100644 (file)
@@ -242,6 +242,7 @@ ifdef::git-rev-list[]
        to `/dev/null` as the output does not have to be formatted.
 
 --disk-usage::
+--disk-usage=human::
        Suppress normal output; instead, print the sum of the bytes used
        for on-disk storage by the selected commits or objects. This is
        equivalent to piping the output into `git cat-file
@@ -249,6 +250,8 @@ ifdef::git-rev-list[]
        faster (especially with `--use-bitmap-index`). See the `CAVEATS`
        section in linkgit:git-cat-file[1] for the limitations of what
        "on-disk storage" means.
+       With the optional value `human`, on-disk storage size is shown
+       in human-readable string(e.g. 12.24 Kib, 3.50 Mib).
 endif::git-rev-list[]
 
 --cherry-mark::
@@ -389,12 +392,14 @@ Default mode::
        merges from the resulting history, as there are no selected
        commits contributing to this merge.
 
---ancestry-path::
+--ancestry-path[=<commit>]::
        When given a range of commits to display (e.g. 'commit1..commit2'
-       or 'commit2 {caret}commit1'), only display commits that exist
-       directly on the ancestry chain between the 'commit1' and
-       'commit2', i.e. commits that are both descendants of 'commit1',
-       and ancestors of 'commit2'.
+       or 'commit2 {caret}commit1'), only display commits in that range
+       that are ancestors of <commit>, descendants of <commit>, or
+       <commit> itself.  If no commit is specified, use 'commit1' (the
+       excluded part of the range) as <commit>.  Can be passed multiple
+       times; if so, a commit is included if it is any of the commits
+       given or if it is an ancestor or descendant of one of them.
 
 A more detailed explanation follows.
 
@@ -568,11 +573,10 @@ Note the major differences in `N`, `P`, and `Q` over `--full-history`:
 
 There is another simplification mode available:
 
---ancestry-path::
-       Limit the displayed commits to those directly on the ancestry
-       chain between the ``from'' and ``to'' commits in the given commit
-       range. I.e. only display commits that are ancestor of the ``to''
-       commit and descendants of the ``from'' commit.
+--ancestry-path[=<commit>]::
+       Limit the displayed commits to those which are an ancestor of
+       <commit>, or which are a descendant of <commit>, or are <commit>
+       itself.
 +
 As an example use case, consider the following commit history:
 +
@@ -604,6 +608,29 @@ option does. Applied to the 'D..M' range, it results in:
                               \
                                L--M
 -----------------------------------------------------------------------
++
+We can also use `--ancestry-path=D` instead of `--ancestry-path` which
+means the same thing when applied to the 'D..M' range but is just more
+explicit.
++
+If we instead are interested in a given topic within this range, and all
+commits affected by that topic, we may only want to view the subset of
+`D..M` which contain that topic in their ancestry path.  So, using
+`--ancestry-path=H D..M` for example would result in:
++
+-----------------------------------------------------------------------
+               E
+                \
+                 G---H---I---J
+                              \
+                               L--M
+-----------------------------------------------------------------------
++
+Whereas `--ancestry-path=K D..M` would result in
++
+-----------------------------------------------------------------------
+               K---------------L--M
+-----------------------------------------------------------------------
 
 Before discussing another option, `--show-pulls`, we need to
 create a new example history.
@@ -659,7 +686,7 @@ Here, the merge commits `O` and `P` contribute extra noise, as they did
 not actually contribute a change to `file.txt`. They only merged a topic
 that was based on an older version of `file.txt`. This is a common
 issue in repositories using a workflow where many contributors work in
-parallel and merge their topic branches along a single trunk: manu
+parallel and merge their topic branches along a single trunk: many
 unrelated merges appear in the `--full-history` results.
 
 When using the `--simplify-merges` option, the commits `O` and `P`
index acfd5dc1d8b2f6103ddb556956b63f71b83a4d1b..c2a5e4291494173ad35f742f73b6c710eaaf3251 100644 (file)
@@ -8,7 +8,8 @@ Basics
 ------
 
 The argument vector `argv[]` may usually contain mandatory or optional
-'non-option arguments', e.g. a filename or a branch, and 'options'.
+'non-option arguments', e.g. a filename or a branch, 'options', and
+'subcommands'.
 Options are optional arguments that start with a dash and
 that allow to change the behavior of a command.
 
@@ -48,6 +49,33 @@ The parse-options API allows:
   option, e.g. `-a -b --option -- --this-is-a-file` indicates that
   `--this-is-a-file` must not be processed as an option.
 
+Subcommands are special in a couple of ways:
+
+* Subcommands only have long form, and they have no double dash prefix, no
+  negated form, and no description, and they don't take any arguments, and
+  can't be abbreviated.
+
+* There must be exactly one subcommand among the arguments, or zero if the
+  command has a default operation mode.
+
+* All arguments following the subcommand are considered to be arguments of
+  the subcommand, and, conversely, arguments meant for the subcommand may
+  not preceed the subcommand.
+
+Therefore, if the options array contains at least one subcommand and
+`parse_options()` encounters the first dashless argument, it will either:
+
+* stop and return, if that dashless argument is a known subcommand, setting
+  `value` to the function pointer associated with that subcommand, storing
+  the name of the subcommand in argv[0], and leaving the rest of the
+  arguments unprocessed, or
+
+* stop and return, if it was invoked with the `PARSE_OPT_SUBCOMMAND_OPTIONAL`
+  flag and that dashless argument doesn't match any subcommands, leaving
+  `value` unchanged and the rest of the arguments unprocessed, or
+
+* show error and usage, and abort.
+
 Steps to parse options
 ----------------------
 
@@ -90,8 +118,8 @@ Flags are the bitwise-or of:
        Keep the first argument, which contains the program name.  It's
        removed from argv[] by default.
 
-`PARSE_OPT_KEEP_UNKNOWN`::
-       Keep unknown arguments instead of erroring out.  This doesn't
+`PARSE_OPT_KEEP_UNKNOWN_OPT`::
+       Keep unknown options instead of erroring out.  This doesn't
        work for all combinations of arguments as users might expect
        it to do.  E.g. if the first argument in `--unknown --known`
        takes a value (which we can't know), the second one is
@@ -101,6 +129,8 @@ Flags are the bitwise-or of:
        non-option, not as a value belonging to the unknown option,
        the parser early.  That's why parse_options() errors out if
        both options are set.
+       Note that non-option arguments are always kept, even without
+       this flag.
 
 `PARSE_OPT_NO_INTERNAL_HELP`::
        By default, parse_options() handles `-h`, `--help` and
@@ -108,6 +138,13 @@ Flags are the bitwise-or of:
        turns it off and allows one to add custom handlers for these
        options, or to just leave them unknown.
 
+`PARSE_OPT_SUBCOMMAND_OPTIONAL`::
+       Don't error out when no subcommand is specified.
+
+Note that `PARSE_OPT_STOP_AT_NON_OPTION` is incompatible with subcommands;
+while `PARSE_OPT_KEEP_DASHDASH` and `PARSE_OPT_KEEP_UNKNOWN_OPT` can only be
+used with subcommands when combined with `PARSE_OPT_SUBCOMMAND_OPTIONAL`.
+
 Data Structure
 --------------
 
@@ -236,10 +273,14 @@ There are some macros to easily define options:
 `OPT_CMDMODE(short, long, &int_var, description, enum_val)`::
        Define an "operation mode" option, only one of which in the same
        group of "operating mode" options that share the same `int_var`
-       can be given by the user. `enum_val` is set to `int_var` when the
+       can be given by the user. `int_var` is set to `enum_val` when the
        option is used, but an error is reported if other "operating mode"
        option has already set its value to the same `int_var`.
+       In new commands consider using subcommands instead.
 
+`OPT_SUBCOMMAND(long, &fn_ptr, subcommand_fn)`::
+       Define a subcommand.  `subcommand_fn` is put into `fn_ptr` when
+       this subcommand is used.
 
 The last element of the array must be `OPT_END()`.
 
index d79ad323e675303ceedc6e608f045299b5ed2d8b..d44ada98e7db9ccdd7dd622221ee298af5de5000 100644 (file)
@@ -78,7 +78,7 @@ client and an optional response message from the server.  Both the
 client and server messages are unlimited in length and are terminated
 with a flush packet.
 
-The pkt-line routines (Documentation/technical/protocol-common.txt)
+The pkt-line routines (linkgit:gitprotocol-common[5])
 are used to simplify buffer management during message generation,
 transmission, and reception.  A flush packet is used to mark the end
 of the message.  This allows the sender to incrementally generate and
index 77a150b30ee80247df18d936c2fc03b5be570af0..2afa28bb5aa121489a95800765ae8bf844a3355f 100644 (file)
@@ -717,6 +717,7 @@ The "exec_id" field is a command-unique id and is only useful if the
 {
        "event":"def_param",
        ...
+       "scope":"global",
        "param":"core.abbrev",
        "value":"7"
 }
@@ -1207,6 +1208,45 @@ at offset 508.
 This example also shows that thread names are assigned in a racy manner
 as each thread starts and allocates TLS storage.
 
+Config (def param) Events::
+
+         Dump "interesting" config values to trace2 log.
++
+We can optionally emit configuration events, see
+`trace2.configparams` in linkgit:git-config[1] for how to enable
+it.
++
+----------------
+$ git config --system color.ui never
+$ git config --global color.ui always
+$ git config --local color.ui auto
+$ git config --list --show-scope | grep 'color.ui'
+system  color.ui=never
+global  color.ui=always
+local   color.ui=auto
+----------------
++
+Then, mark the config `color.ui` as "interesting" config with
+`GIT_TRACE2_CONFIG_PARAMS`:
++
+----------------
+$ export GIT_TRACE2_PERF_BRIEF=1
+$ export GIT_TRACE2_PERF=~/log.perf
+$ export GIT_TRACE2_CONFIG_PARAMS=color.ui
+$ git version
+...
+$ cat ~/log.perf
+d0 | main                     | version      |     |           |           |              | ...
+d0 | main                     | start        |     |  0.001642 |           |              | /usr/local/bin/git version
+d0 | main                     | cmd_name     |     |           |           |              | version (version)
+d0 | main                     | def_param    |     |           |           | scope:system | color.ui:never
+d0 | main                     | def_param    |     |           |           | scope:global | color.ui:always
+d0 | main                     | def_param    |     |           |           | scope:local  | color.ui:auto
+d0 | main                     | data         | r0  |  0.002100 |  0.002100 | fsync        | fsync/writeout-only:0
+d0 | main                     | data         | r0  |  0.002126 |  0.002126 | fsync        | fsync/hardware-flush:0
+d0 | main                     | exit         |     |  0.000470 |           |              | code:0
+d0 | main                     | atexit       |     |  0.000477 |           |              | code:0
+----------------
 == Future Work
 
 === Relationship to the Existing Trace Api (api-trace.txt)
index a85f58f51539cc0cca5313693e96e50858f731a0..c2e652b71a7f698a6843bbf327535b4692538c0f 100644 (file)
@@ -72,6 +72,17 @@ MIDXs, both the bit-cache and rev-cache extensions are required.
            pack/MIDX. The format and meaning of the name-hash is
            described below.
 
+               ** {empty}
+               BITMAP_OPT_LOOKUP_TABLE (0x10): :::
+               If present, the end of the bitmap file contains a table
+               containing a list of `N` <commit_pos, offset, xor_row>
+               triplets. The format and meaning of the table is described
+               below.
++
+NOTE: Unlike the xor_offset used to compress an individual bitmap,
+`xor_row` stores an *absolute* index into the lookup table, not a location
+relative to the current entry.
+
        4-byte entry count (network byte order): ::
            The total count of entries (bitmapped commits) in this bitmap index.
 
@@ -216,3 +227,31 @@ Note that this hashing scheme is tied to the BITMAP_OPT_HASH_CACHE flag.
 If implementations want to choose a different hashing scheme, they are
 free to do so, but MUST allocate a new header flag (because comparing
 hashes made under two different schemes would be pointless).
+
+Commit lookup table
+-------------------
+
+If the BITMAP_OPT_LOOKUP_TABLE flag is set, the last `N * (4 + 8 + 4)`
+bytes (preceding the name-hash cache and trailing hash) of the `.bitmap`
+file contains a lookup table specifying the information needed to get
+the desired bitmap from the entries without parsing previous unnecessary
+bitmaps.
+
+For a `.bitmap` containing `nr_entries` reachability bitmaps, the table
+contains a list of `nr_entries` <commit_pos, offset, xor_row> triplets
+(sorted in the ascending order of `commit_pos`). The content of i'th
+triplet is -
+
+       * {empty}
+       commit_pos (4 byte integer, network byte order): ::
+       It stores the object position of a commit (in the midx or pack
+       index).
+
+       * {empty}
+       offset (8 byte integer, network byte order): ::
+       The offset from which that commit's bitmap can be read.
+
+       * {empty}
+       xor_row (4 byte integer, network byte order): ::
+       The position of the triplet whose bitmap is used to compress
+       this one, or `0xffffffff` if no such bitmap exists.
diff --git a/Documentation/technical/bundle-uri.txt b/Documentation/technical/bundle-uri.txt
new file mode 100644 (file)
index 0000000..c25c423
--- /dev/null
@@ -0,0 +1,573 @@
+Bundle URIs
+===========
+
+Git bundles are files that store a pack-file along with some extra metadata,
+including a set of refs and a (possibly empty) set of necessary commits. See
+linkgit:git-bundle[1] and link:bundle-format.txt[the bundle format] for more
+information.
+
+Bundle URIs are locations where Git can download one or more bundles in
+order to bootstrap the object database in advance of fetching the remaining
+objects from a remote.
+
+One goal is to speed up clones and fetches for users with poor network
+connectivity to the origin server. Another benefit is to allow heavy users,
+such as CI build farms, to use local resources for the majority of Git data
+and thereby reducing the load on the origin server.
+
+To enable the bundle URI feature, users can specify a bundle URI using
+command-line options or the origin server can advertise one or more URIs
+via a protocol v2 capability.
+
+Design Goals
+------------
+
+The bundle URI standard aims to be flexible enough to satisfy multiple
+workloads. The bundle provider and the Git client have several choices in
+how they create and consume bundle URIs.
+
+* Bundles can have whatever name the server desires. This name could refer
+  to immutable data by using a hash of the bundle contents. However, this
+  means that a new URI will be needed after every update of the content.
+  This might be acceptable if the server is advertising the URI (and the
+  server is aware of new bundles being generated) but would not be
+  ergonomic for users using the command line option.
+
+* The bundles could be organized specifically for bootstrapping full
+  clones, but could also be organized with the intention of bootstrapping
+  incremental fetches. The bundle provider must decide on one of several
+  organization schemes to minimize client downloads during incremental
+  fetches, but the Git client can also choose whether to use bundles for
+  either of these operations.
+
+* The bundle provider can choose to support full clones, partial clones,
+  or both. The client can detect which bundles are appropriate for the
+  repository's partial clone filter, if any.
+
+* The bundle provider can use a single bundle (for clones only), or a
+  list of bundles. When using a list of bundles, the provider can specify
+  whether or not the client needs _all_ of the bundle URIs for a full
+  clone, or if _any_ one of the bundle URIs is sufficient. This allows the
+  bundle provider to use different URIs for different geographies.
+
+* The bundle provider can organize the bundles using heuristics, such as
+  creation tokens, to help the client prevent downloading bundles it does
+  not need. When the bundle provider does not provide these heuristics,
+  the client can use optimizations to minimize how much of the data is
+  downloaded.
+
+* The bundle provider does not need to be associated with the Git server.
+  The client can choose to use the bundle provider without it being
+  advertised by the Git server.
+
+* The client can choose to discover bundle providers that are advertised
+  by the Git server. This could happen during `git clone`, during
+  `git fetch`, both, or neither. The user can choose which combination
+  works best for them.
+
+* The client can choose to configure a bundle provider manually at any
+  time. The client can also choose to specify a bundle provider manually
+  as a command-line option to `git clone`.
+
+Each repository is different and every Git server has different needs.
+Hopefully the bundle URI feature is flexible enough to satisfy all needs.
+If not, then the feature can be extended through its versioning mechanism.
+
+Server requirements
+-------------------
+
+To provide a server-side implementation of bundle servers, no other parts
+of the Git protocol are required. This allows server maintainers to use
+static content solutions such as CDNs in order to serve the bundle files.
+
+At the current scope of the bundle URI feature, all URIs are expected to
+be HTTP(S) URLs where content is downloaded to a local file using a `GET`
+request to that URL. The server could include authentication requirements
+to those requests with the aim of triggering the configured credential
+helper for secure access. (Future extensions could use "file://" URIs or
+SSH URIs.)
+
+Assuming a `200 OK` response from the server, the content at the URL is
+inspected. First, Git attempts to parse the file as a bundle file of
+version 2 or higher. If the file is not a bundle, then the file is parsed
+as a plain-text file using Git's config parser. The key-value pairs in
+that config file are expected to describe a list of bundle URIs. If
+neither of these parse attempts succeed, then Git will report an error to
+the user that the bundle URI provided erroneous data.
+
+Any other data provided by the server is considered erroneous.
+
+Bundle Lists
+------------
+
+The Git server can advertise bundle URIs using a set of `key=value` pairs.
+A bundle URI can also serve a plain-text file in the Git config format
+containing these same `key=value` pairs. In both cases, we consider this
+to be a _bundle list_. The pairs specify information about the bundles
+that the client can use to make decisions for which bundles to download
+and which to ignore.
+
+A few keys focus on properties of the list itself.
+
+bundle.version::
+       (Required) This value provides a version number for the bundle
+       list. If a future Git change enables a feature that needs the Git
+       client to react to a new key in the bundle list file, then this version
+       will increment. The only current version number is 1, and if any other
+       value is specified then Git will fail to use this file.
+
+bundle.mode::
+       (Required) This value has one of two values: `all` and `any`. When `all`
+       is specified, then the client should expect to need all of the listed
+       bundle URIs that match their repository's requirements. When `any` is
+       specified, then the client should expect that any one of the bundle URIs
+       that match their repository's requirements will suffice. Typically, the
+       `any` option is used to list a number of different bundle servers
+       located in different geographies.
+
+bundle.heuristic::
+       If this string-valued key exists, then the bundle list is designed to
+       work well with incremental `git fetch` commands. The heuristic signals
+       that there are additional keys available for each bundle that help
+       determine which subset of bundles the client should download. The only
+       heuristic currently planned is `creationToken`.
+
+The remaining keys include an `<id>` segment which is a server-designated
+name for each available bundle. The `<id>` must contain only alphanumeric
+and `-` characters.
+
+bundle.<id>.uri::
+       (Required) This string value is the URI for downloading bundle `<id>`.
+       If the URI begins with a protocol (`http://` or `https://`) then the URI
+       is absolute. Otherwise, the URI is interpreted as relative to the URI
+       used for the bundle list. If the URI begins with `/`, then that relative
+       path is relative to the domain name used for the bundle list. (This use
+       of relative paths is intended to make it easier to distribute a set of
+       bundles across a large number of servers or CDNs with different domain
+       names.)
+
+bundle.<id>.filter::
+       This string value represents an object filter that should also appear in
+       the header of this bundle. The server uses this value to differentiate
+       different kinds of bundles from which the client can choose those that
+       match their object filters.
+
+bundle.<id>.creationToken::
+       This value is a nonnegative 64-bit integer used for sorting the bundles
+       the list. This is used to download a subset of bundles during a fetch
+       when `bundle.heuristic=creationToken`.
+
+bundle.<id>.location::
+       This string value advertises a real-world location from where the bundle
+       URI is served. This can be used to present the user with an option for
+       which bundle URI to use or simply as an informative indicator of which
+       bundle URI was selected by Git. This is only valuable when
+       `bundle.mode` is `any`.
+
+Here is an example bundle list using the Git config format:
+
+       [bundle]
+               version = 1
+               mode = all
+               heuristic = creationToken
+
+       [bundle "2022-02-09-1644442601-daily"]
+               uri = https://bundles.example.com/git/git/2022-02-09-1644442601-daily.bundle
+               creationToken = 1644442601
+
+       [bundle "2022-02-02-1643842562"]
+               uri = https://bundles.example.com/git/git/2022-02-02-1643842562.bundle
+               creationToken = 1643842562
+
+       [bundle "2022-02-09-1644442631-daily-blobless"]
+               uri = 2022-02-09-1644442631-daily-blobless.bundle
+               creationToken = 1644442631
+               filter = blob:none
+
+       [bundle "2022-02-02-1643842568-blobless"]
+               uri = /git/git/2022-02-02-1643842568-blobless.bundle
+               creationToken = 1643842568
+               filter = blob:none
+
+This example uses `bundle.mode=all` as well as the
+`bundle.<id>.creationToken` heuristic. It also uses the `bundle.<id>.filter`
+options to present two parallel sets of bundles: one for full clones and
+another for blobless partial clones.
+
+Suppose that this bundle list was found at the URI
+`https://bundles.example.com/git/git/` and so the two blobless bundles have
+the following fully-expanded URIs:
+
+* `https://bundles.example.com/git/git/2022-02-09-1644442631-daily-blobless.bundle`
+* `https://bundles.example.com/git/git/2022-02-02-1643842568-blobless.bundle`
+
+Advertising Bundle URIs
+-----------------------
+
+If a user knows a bundle URI for the repository they are cloning, then
+they can specify that URI manually through a command-line option. However,
+a Git host may want to advertise bundle URIs during the clone operation,
+helping users unaware of the feature.
+
+The only thing required for this feature is that the server can advertise
+one or more bundle URIs. This advertisement takes the form of a new
+protocol v2 capability specifically for discovering bundle URIs.
+
+The client could choose an arbitrary bundle URI as an option _or_ select
+the URI with best performance by some exploratory checks. It is up to the
+bundle provider to decide if having multiple URIs is preferable to a
+single URI that is geodistributed through server-side infrastructure.
+
+Cloning with Bundle URIs
+------------------------
+
+The primary need for bundle URIs is to speed up clones. The Git client
+will interact with bundle URIs according to the following flow:
+
+1. The user specifies a bundle URI with the `--bundle-uri` command-line
+   option _or_ the client discovers a bundle list advertised by the
+   Git server.
+
+2. If the downloaded data from a bundle URI is a bundle, then the client
+   inspects the bundle headers to check that the prerequisite commit OIDs
+   are present in the client repository. If some are missing, then the
+   client delays unbundling until other bundles have been unbundled,
+   making those OIDs present. When all required OIDs are present, the
+   client unbundles that data using a refspec. The default refspec is
+   `+refs/heads/*:refs/bundles/*`, but this can be configured. These refs
+   are stored so that later `git fetch` negotiations can communicate the
+   bundled refs as `have`s, reducing the size of the fetch over the Git
+   protocol. To allow pruning refs from this ref namespace, Git may
+   introduce a numbered namespace (such as `refs/bundles/<i>/*`) such that
+   stale bundle refs can be deleted.
+
+3. If the file is instead a bundle list, then the client inspects the
+   `bundle.mode` to see if the list is of the `all` or `any` form.
+
+   a. If `bundle.mode=all`, then the client considers all bundle
+      URIs. The list is reduced based on the `bundle.<id>.filter` options
+      matching the client repository's partial clone filter. Then, all
+      bundle URIs are requested. If the `bundle.<id>.creationToken`
+      heuristic is provided, then the bundles are downloaded in decreasing
+      order by the creation token, stopping when a bundle has all required
+      OIDs. The bundles can then be unbundled in increasing creation token
+      order. The client stores the latest creation token as a heuristic
+      for avoiding future downloads if the bundle list does not advertise
+      bundles with larger creation tokens.
+
+   b. If `bundle.mode=any`, then the client can choose any one of the
+      bundle URIs to inspect. The client can use a variety of ways to
+      choose among these URIs. The client can also fallback to another URI
+      if the initial choice fails to return a result.
+
+Note that during a clone we expect that all bundles will be required, and
+heuristics such as `bundle.<uri>.creationToken` can be used to download
+bundles in chronological order or in parallel.
+
+If a given bundle URI is a bundle list with a `bundle.heuristic`
+value, then the client can choose to store that URI as its chosen bundle
+URI. The client can then navigate directly to that URI during later `git
+fetch` calls.
+
+When downloading bundle URIs, the client can choose to inspect the initial
+content before committing to downloading the entire content. This may
+provide enough information to determine if the URI is a bundle list or
+a bundle. In the case of a bundle, the client may inspect the bundle
+header to determine that all advertised tips are already in the client
+repository and cancel the remaining download.
+
+Fetching with Bundle URIs
+-------------------------
+
+When the client fetches new data, it can decide to fetch from bundle
+servers before fetching from the origin remote. This could be done via a
+command-line option, but it is more likely useful to use a config value
+such as the one specified during the clone.
+
+The fetch operation follows the same procedure to download bundles from a
+bundle list (although we do _not_ want to use parallel downloads here). We
+expect that the process will end when all prerequisite commit OIDs in a
+thin bundle are already in the object database.
+
+When using the `creationToken` heuristic, the client can avoid downloading
+any bundles if their creation tokenss are not larger than the stored
+creation token. After fetching new bundles, Git updates this local
+creation token.
+
+If the bundle provider does not provide a heuristic, then the client
+should attempt to inspect the bundle headers before downloading the full
+bundle data in case the bundle tips already exist in the client
+repository.
+
+Error Conditions
+----------------
+
+If the Git client discovers something unexpected while downloading
+information according to a bundle URI or the bundle list found at that
+location, then Git can ignore that data and continue as if it was not
+given a bundle URI. The remote Git server is the ultimate source of truth,
+not the bundle URI.
+
+Here are a few example error conditions:
+
+* The client fails to connect with a server at the given URI or a connection
+  is lost without any chance to recover.
+
+* The client receives a 400-level response (such as `404 Not Found` or
+  `401 Not Authorized`). The client should use the credential helper to
+  find and provide a credential for the URI, but match the semantics of
+  Git's other HTTP protocols in terms of handling specific 400-level
+  errors.
+
+* The server reports any other failure reponse.
+
+* The client receives data that is not parsable as a bundle or bundle list.
+
+* A bundle includes a filter that does not match expectations.
+
+* The client cannot unbundle the bundles because the prerequisite commit OIDs
+  are not in the object database and there are no more bundles to download.
+
+There are also situations that could be seen as wasteful, but are not
+error conditions:
+
+* The downloaded bundles contain more information than is requested by
+  the clone or fetch request. A primary example is if the user requests
+  a clone with `--single-branch` but downloads bundles that store every
+  reachable commit from all `refs/heads/*` references. This might be
+  initially wasteful, but perhaps these objects will become reachable by
+  a later ref update that the client cares about.
+
+* A bundle download during a `git fetch` contains objects already in the
+  object database. This is probably unavoidable if we are using bundles
+  for fetches, since the client will almost always be slightly ahead of
+  the bundle servers after performing its "catch-up" fetch to the remote
+  server. This extra work is most wasteful when the client is fetching
+  much more frequently than the server is computing bundles, such as if
+  the client is using hourly prefetches with background maintenance, but
+  the server is computing bundles weekly. For this reason, the client
+  should not use bundle URIs for fetch unless the server has explicitly
+  recommended it through a `bundle.heuristic` value.
+
+Example Bundle Provider organization
+------------------------------------
+
+The bundle URI feature is intentionally designed to be flexible to
+different ways a bundle provider wants to organize the object data.
+However, it can be helpful to have a complete organization model described
+here so providers can start from that base.
+
+This example organization is a simplified model of what is used by the
+GVFS Cache Servers (see section near the end of this document) which have
+been beneficial in speeding up clones and fetches for very large
+repositories, although using extra software outside of Git.
+
+The bundle provider deploys servers across multiple geographies. Each
+server manages its own bundle set. The server can track a number of Git
+repositories, but provides a bundle list for each based on a pattern. For
+example, when mirroring a repository at `https://<domain>/<org>/<repo>`
+the bundle server could have its bundle list available at
+`https://<server-url>/<domain>/<org>/<repo>`. The origin Git server can
+list all of these servers under the "any" mode:
+
+       [bundle]
+               version = 1
+               mode = any
+
+       [bundle "eastus"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>
+
+       [bundle "europe"]
+               uri = https://europe.example.com/<domain>/<org>/<repo>
+
+       [bundle "apac"]
+               uri = https://apac.example.com/<domain>/<org>/<repo>
+
+This "list of lists" is static and only changes if a bundle server is
+added or removed.
+
+Each bundle server manages its own set of bundles. The initial bundle list
+contains only a single bundle, containing all of the objects received from
+cloning the repository from the origin server. The list uses the
+`creationToken` heuristic and a `creationToken` is made for the bundle
+based on the server's timestamp.
+
+The bundle server runs regularly-scheduled updates for the bundle list,
+such as once a day. During this task, the server fetches the latest
+contents from the origin server and generates a bundle containing the
+objects reachable from the latest origin refs, but not contained in a
+previously-computed bundle. This bundle is added to the list, with care
+that the `creationToken` is strictly greater than the previous maximum
+`creationToken`.
+
+When the bundle list grows too large, say more than 30 bundles, then the
+oldest "_N_ minus 30" bundles are combined into a single bundle. This
+bundle's `creationToken` is equal to the maximum `creationToken` among the
+merged bundles.
+
+An example bundle list is provided here, although it only has two daily
+bundles and not a full list of 30:
+
+       [bundle]
+               version = 1
+               mode = all
+               heuristic = creationToken
+
+       [bundle "2022-02-13-1644770820-daily"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644770820-daily.bundle
+               creationToken = 1644770820
+
+       [bundle "2022-02-09-1644442601-daily"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644442601-daily.bundle
+               creationToken = 1644442601
+
+       [bundle "2022-02-02-1643842562"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-02-1643842562.bundle
+               creationToken = 1643842562
+
+To avoid storing and serving object data in perpetuity despite becoming
+unreachable in the origin server, this bundle merge can be more careful.
+Instead of taking an absolute union of the old bundles, instead the bundle
+can be created by looking at the newer bundles and ensuring that their
+necessary commits are all available in this merged bundle (or in another
+one of the newer bundles). This allows "expiring" object data that is not
+being used by new commits in this window of time. That data could be
+reintroduced by a later push.
+
+The intention of this data organization has two main goals. First, initial
+clones of the repository become faster by downloading precomputed object
+data from a closer source. Second, `git fetch` commands can be faster,
+especially if the client has not fetched for a few days. However, if a
+client does not fetch for 30 days, then the bundle list organization would
+cause redownloading a large amount of object data.
+
+One way to make this organization more useful to users who fetch frequently
+is to have more frequent bundle creation. For example, bundles could be
+created every hour, and then once a day those "hourly" bundles could be
+merged into a "daily" bundle. The daily bundles are merged into the
+oldest bundle after 30 days.
+
+It is recommened that this bundle strategy is repeated with the `blob:none`
+filter if clients of this repository are expecting to use blobless partial
+clones. This list of blobless bundles stays in the same list as the full
+bundles, but uses the `bundle.<id>.filter` key to separate the two groups.
+For very large repositories, the bundle provider may want to _only_ provide
+blobless bundles.
+
+Implementation Plan
+-------------------
+
+This design document is being submitted on its own as an aspirational
+document, with the goal of implementing all of the mentioned client
+features over the course of several patch series. Here is a potential
+outline for submitting these features:
+
+1. Integrate bundle URIs into `git clone` with a `--bundle-uri` option.
+   This will include a new `git fetch --bundle-uri` mode for use as the
+   implementation underneath `git clone`. The initial version here will
+   expect a single bundle at the given URI.
+
+2. Implement the ability to parse a bundle list from a bundle URI and
+   update the `git fetch --bundle-uri` logic to properly distinguish
+   between `bundle.mode` options. Specifically design the feature so
+   that the config format parsing feeds a list of key-value pairs into the
+   bundle list logic.
+
+3. Create the `bundle-uri` protocol v2 command so Git servers can advertise
+   bundle URIs using the key-value pairs. Plug into the existing key-value
+   input to the bundle list logic. Allow `git clone` to discover these
+   bundle URIs and bootstrap the client repository from the bundle data.
+   (This choice is an opt-in via a config option and a command-line
+   option.)
+
+4. Allow the client to understand the `bundle.flag=forFetch` configuration
+   and the `bundle.<id>.creationToken` heuristic. When `git clone`
+   discovers a bundle URI with `bundle.flag=forFetch`, it configures the
+   client repository to check that bundle URI during later `git fetch <remote>`
+   commands.
+
+5. Allow clients to discover bundle URIs during `git fetch` and configure
+   a bundle URI for later fetches if `bundle.flag=forFetch`.
+
+6. Implement the "inspect headers" heuristic to reduce data downloads when
+   the `bundle.<id>.creationToken` heuristic is not available.
+
+As these features are reviewed, this plan might be updated. We also expect
+that new designs will be discovered and implemented as this feature
+matures and becomes used in real-world scenarios.
+
+Related Work: Packfile URIs
+---------------------------
+
+The Git protocol already has a capability where the Git server can list
+a set of URLs along with the packfile response when serving a client
+request. The client is then expected to download the packfiles at those
+locations in order to have a complete understanding of the response.
+
+This mechanism is used by the Gerrit server (implemented with JGit) and
+has been effective at reducing CPU load and improving user performance for
+clones.
+
+A major downside to this mechanism is that the origin server needs to know
+_exactly_ what is in those packfiles, and the packfiles need to be available
+to the user for some time after the server has responded. This coupling
+between the origin and the packfile data is difficult to manage.
+
+Further, this implementation is extremely hard to make work with fetches.
+
+Related Work: GVFS Cache Servers
+--------------------------------
+
+The GVFS Protocol [2] is a set of HTTP endpoints designed independently of
+the Git project before Git's partial clone was created. One feature of this
+protocol is the idea of a "cache server" which can be colocated with build
+machines or developer offices to transfer Git data without overloading the
+central server.
+
+The endpoint that VFS for Git is famous for is the `GET /gvfs/objects/{oid}`
+endpoint, which allows downloading an object on-demand. This is a critical
+piece of the filesystem virtualization of that product.
+
+However, a more subtle need is the `GET /gvfs/prefetch?lastPackTimestamp=<t>`
+endpoint. Given an optional timestamp, the cache server responds with a list
+of precomputed packfiles containing the commits and trees that were introduced
+in those time intervals.
+
+The cache server computes these "prefetch" packfiles using the following
+strategy:
+
+1. Every hour, an "hourly" pack is generated with a given timestamp.
+2. Nightly, the previous 24 hourly packs are rolled up into a "daily" pack.
+3. Nightly, all prefetch packs more than 30 days old are rolled up into
+   one pack.
+
+When a user runs `gvfs clone` or `scalar clone` against a repo with cache
+servers, the client requests all prefetch packfiles, which is at most
+`24 + 30 + 1` packfiles downloading only commits and trees. The client
+then follows with a request to the origin server for the references, and
+attempts to checkout that tip reference. (There is an extra endpoint that
+helps get all reachable trees from a given commit, in case that commit
+was not already in a prefetch packfile.)
+
+During a `git fetch`, a hook requests the prefetch endpoint using the
+most-recent timestamp from a previously-downloaded prefetch packfile.
+Only the list of packfiles with later timestamps are downloaded. Most
+users fetch hourly, so they get at most one hourly prefetch pack. Users
+whose machines have been off or otherwise have not fetched in over 30 days
+might redownload all prefetch packfiles. This is rare.
+
+It is important to note that the clients always contact the origin server
+for the refs advertisement, so the refs are frequently "ahead" of the
+prefetched pack data. The missing objects are downloaded on-demand using
+the `GET gvfs/objects/{oid}` requests, when needed by a command such as
+`git checkout` or `git log`. Some Git optimizations disable checks that
+would cause these on-demand downloads to be too aggressive.
+
+See Also
+--------
+
+[1] https://lore.kernel.org/git/RFC-cover-00.13-0000000000-20210805T150534Z-avarab@gmail.com/
+    An earlier RFC for a bundle URI feature.
+
+[2] https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+    The GVFS Protocol
diff --git a/Documentation/technical/cruft-packs.txt b/Documentation/technical/cruft-packs.txt
deleted file mode 100644 (file)
index d81f3a8..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-= Cruft packs
-
-The cruft packs feature offer an alternative to Git's traditional mechanism of
-removing unreachable objects. This document provides an overview of Git's
-pruning mechanism, and how a cruft pack can be used instead to accomplish the
-same.
-
-== Background
-
-To remove unreachable objects from your repository, Git offers `git repack -Ad`
-(see linkgit:git-repack[1]). Quoting from the documentation:
-
-[quote]
-[...] unreachable objects in a previous pack become loose, unpacked objects,
-instead of being left in the old pack. [...] loose unreachable objects will be
-pruned according to normal expiry rules with the next 'git gc' invocation.
-
-Unreachable objects aren't removed immediately, since doing so could race with
-an incoming push which may reference an object which is about to be deleted.
-Instead, those unreachable objects are stored as loose objects and stay that way
-until they are older than the expiration window, at which point they are removed
-by linkgit:git-prune[1].
-
-Git must store these unreachable objects loose in order to keep track of their
-per-object mtimes. If these unreachable objects were written into one big pack,
-then either freshening that pack (because an object contained within it was
-re-written) or creating a new pack of unreachable objects would cause the pack's
-mtime to get updated, and the objects within it would never leave the expiration
-window. Instead, objects are stored loose in order to keep track of the
-individual object mtimes and avoid a situation where all cruft objects are
-freshened at once.
-
-This can lead to undesirable situations when a repository contains many
-unreachable objects which have not yet left the grace period. Having large
-directories in the shards of `.git/objects` can lead to decreased performance in
-the repository. But given enough unreachable objects, this can lead to inode
-starvation and degrade the performance of the whole system. Since we
-can never pack those objects, these repositories often take up a large amount of
-disk space, since we can only zlib compress them, but not store them in delta
-chains.
-
-== Cruft packs
-
-A cruft pack eliminates the need for storing unreachable objects in a loose
-state by including the per-object mtimes in a separate file alongside a single
-pack containing all loose objects.
-
-A cruft pack is written by `git repack --cruft` when generating a new pack.
-linkgit:git-pack-objects[1]'s `--cruft` option. Note that `git repack --cruft`
-is a classic all-into-one repack, meaning that everything in the resulting pack is
-reachable, and everything else is unreachable. Once written, the `--cruft`
-option instructs `git repack` to generate another pack containing only objects
-not packed in the previous step (which equates to packing all unreachable
-objects together). This progresses as follows:
-
-  1. Enumerate every object, marking any object which is (a) not contained in a
-     kept-pack, and (b) whose mtime is within the grace period as a traversal
-     tip.
-
-  2. Perform a reachability traversal based on the tips gathered in the previous
-     step, adding every object along the way to the pack.
-
-  3. Write the pack out, along with a `.mtimes` file that records the per-object
-     timestamps.
-
-This mode is invoked internally by linkgit:git-repack[1] when instructed to
-write a cruft pack. Crucially, the set of in-core kept packs is exactly the set
-of packs which will not be deleted by the repack; in other words, they contain
-all of the repository's reachable objects.
-
-When a repository already has a cruft pack, `git repack --cruft` typically only
-adds objects to it. An exception to this is when `git repack` is given the
-`--cruft-expiration` option, which allows the generated cruft pack to omit
-expired objects instead of waiting for linkgit:git-gc[1] to expire those objects
-later on.
-
-It is linkgit:git-gc[1] that is typically responsible for removing expired
-unreachable objects.
-
-== Caution for mixed-version environments
-
-Repositories that have cruft packs in them will continue to work with any older
-version of Git. Note, however, that previous versions of Git which do not
-understand the `.mtimes` file will use the cruft pack's mtime as the mtime for
-all of the objects in it. In other words, do not expect older (pre-cruft pack)
-versions of Git to interpret or even read the contents of the `.mtimes` file.
-
-Note that having mixed versions of Git GC-ing the same repository can lead to
-unreachable objects never being completely pruned. This can happen under the
-following circumstances:
-
-  - An older version of Git running GC explodes the contents of an existing
-    cruft pack loose, using the cruft pack's mtime.
-  - A newer version running GC collects those loose objects into a cruft pack,
-    where the .mtime file reflects the loose object's actual mtimes, but the
-    cruft pack mtime is "now".
-
-Repeating this process will lead to unreachable objects not getting pruned as a
-result of repeatedly resetting the objects' mtimes to the present time.
-
-If you are GC-ing repositories in a mixed version environment, consider omitting
-the `--cruft` option when using linkgit:git-repack[1] and linkgit:git-gc[1], and
-leaving the `gc.cruftPacks` configuration unset until all writers understand
-cruft packs.
-
-== Alternatives
-
-Notable alternatives to this design include:
-
-  - The location of the per-object mtime data, and
-  - Storing unreachable objects in multiple cruft packs.
-
-On the location of mtime data, a new auxiliary file tied to the pack was chosen
-to avoid complicating the `.idx` format. If the `.idx` format were ever to gain
-support for optional chunks of data, it may make sense to consolidate the
-`.mtimes` format into the `.idx` itself.
-
-Storing unreachable objects among multiple cruft packs (e.g., creating a new
-cruft pack during each repacking operation including only unreachable objects
-which aren't already stored in an earlier cruft pack) is significantly more
-complicated to construct, and so aren't pursued here. The obvious drawback to
-the current implementation is that the entire cruft pack must be re-written from
-scratch.
index 260224b0331c709da9aa4bcfa06ae97146cfffb6..e2ac36dd210bef993e1cca06f37d788c3d4fe262 100644 (file)
@@ -205,7 +205,7 @@ SHA-1 content.
 Object storage
 ~~~~~~~~~~~~~~
 Loose objects use zlib compression and packed objects use the packed
-format described in Documentation/technical/pack-format.txt, just like
+format described in linkgit:gitformat-pack[5], just like
 today. The content that is compressed and stored uses SHA-256 content
 instead of SHA-1 content.
 
index aa0aa9af1c2eb8835e81a5a43a6332df902ceefe..6f33654b4288d4effafed8a78162da9da2bf7f9a 100644 (file)
@@ -3,7 +3,7 @@ Long-running process protocol
 
 This protocol is used when Git needs to communicate with an external
 process throughout the entire life of a single Git command. All
-communication is in pkt-line format (see technical/protocol-common.txt)
+communication is in pkt-line format (see linkgit:gitprotocol-common[5])
 over standard input and standard output.
 
 Handshake
index 1eb525fe760461608442b7a35a40a48e1fddee79..9d453d47651a03dd432c71a71828c1bae2234dfb 100644 (file)
@@ -18,7 +18,7 @@ a `packfile-uris` argument, the server MAY send a `packfile-uris` section
 directly before the `packfile` section (right after `wanted-refs` if it is
 sent) containing URIs of any of the given protocols. The URIs point to
 packfiles that use only features that the client has declared that it supports
-(e.g. ofs-delta and thin-pack). See protocol-v2.txt for the documentation of
+(e.g. ofs-delta and thin-pack). See linkgit:gitprotocol-v2[5] for the documentation of
 this section.
 
 Clients should then download and index all the given URIs (in addition to
index 99f0eb304061adeb6b9d2b487eb1c2c11d07463b..92fcee2bfffff8c42a07f0dd4b87421abd94f2bb 100644 (file)
@@ -79,7 +79,7 @@ Design Details
   upload-pack negotiation.
 +
 This uses the existing capability discovery mechanism.
-See "filter" in Documentation/technical/pack-protocol.txt.
+See "filter" in linkgit:gitprotocol-pack[5].
 
 - Clients pass a "filter-spec" to clone and fetch which is passed to the
   server to request filtering during packfile construction.
index 2fd5cc88e0b808c75344b4e3e7ba80d2e1938c3f..af091a7556a4efa16b5b76f0d85484c258a0d5da 100644 (file)
@@ -20,7 +20,7 @@ Outline:
   3. Why any rename on MERGE_SIDE1 in any given pick is _almost_ always also
      a rename on MERGE_SIDE1 for the next pick
 
-  4. A detailed description of the the counter-examples to #3.
+  4. A detailed description of the counter-examples to #3.
 
   5. Why the special cases in #4 are still fully reasonable to use to pair
      up files for three-way content merging in the merge machinery, and why
diff --git a/Documentation/technical/scalar.txt b/Documentation/technical/scalar.txt
new file mode 100644 (file)
index 0000000..0600150
--- /dev/null
@@ -0,0 +1,127 @@
+Scalar
+======
+
+Scalar is a repository management tool that optimizes Git for use in large
+repositories. It accomplishes this by helping users to take advantage of
+advanced performance features in Git. Unlike most other Git built-in commands,
+Scalar is not executed as a subcommand of 'git'; rather, it is built as a
+separate executable containing its own series of subcommands.
+
+Background
+----------
+
+Scalar was originally designed as an add-on to Git and implemented as a .NET
+Core application. It was created based on the learnings from the VFS for Git
+project (another application aimed at improving the experience of working with
+large repositories). As part of its initial implementation, Scalar relied on
+custom features in the Microsoft fork of Git that have since been integrated
+into core Git:
+
+* partial clone,
+* commit graphs,
+* multi-pack index,
+* sparse checkout (cone mode),
+* scheduled background maintenance,
+* etc
+
+With the requisite Git functionality in place and a desire to bring the benefits
+of Scalar to the larger Git community, the Scalar application itself was ported
+from C# to C and integrated upstream.
+
+Features
+--------
+
+Scalar is comprised of two major pieces of functionality: automatically
+configuring built-in Git performance features and managing repository
+enlistments.
+
+The Git performance features configured by Scalar (see "Background" for
+examples) confer substantial performance benefits to large repositories, but are
+either too experimental to enable for all of Git yet, or only benefit large
+repositories. As new features are introduced, Scalar should be updated
+accordingly to incorporate them. This will prevent the tool from becoming stale
+while also providing a path for more easily bringing features to the appropriate
+users.
+
+Enlistments are how Scalar knows which repositories on a user's system should
+utilize Scalar-configured features. This allows it to update performance
+settings when new ones are added to the tool, as well as centrally manage
+repository maintenance. The enlistment structure - a root directory with a
+`src/` subdirectory containing the cloned repository itself - is designed to
+encourage users to route build outputs outside of the repository to avoid the
+performance-limiting overhead of ignoring those files in Git.
+
+Design
+------
+
+Scalar is implemented in C and interacts with Git via a mix of child process
+invocations of Git and direct usage of `libgit.a`. Internally, it is structured
+much like other built-ins with subcommands (e.g., `git stash`), containing a
+`cmd_<subcommand>()` function for each subcommand, routed through a `cmd_main()`
+function. Most options are unique to each subcommand, with `scalar` respecting
+some "global" `git` options (e.g., `-c` and `-C`).
+
+Because `scalar` is not invoked as a Git subcommand (like `git scalar`), it is
+built and installed as its own executable in the `bin/` directory, alongside
+`git`, `git-gui`, etc.
+
+Roadmap
+-------
+
+NOTE: this section will be removed once the remaining tasks outlined in this
+roadmap are complete.
+
+Scalar is a large enough project that it is being upstreamed incrementally,
+living in `contrib/` until it is feature-complete. So far, the following patch
+series have been accepted:
+
+- `scalar-the-beginning`: The initial patch series which sets up
+  `contrib/scalar/` and populates it with a minimal `scalar` command that
+  demonstrates the fundamental ideas.
+
+- `scalar-c-and-C`: The `scalar` command learns about two options that can be
+  specified before the command, `-c <key>=<value>` and `-C <directory>`.
+
+- `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand.
+
+- `scalar-generalize-diagnose`: Move the functionality of `scalar diagnose`
+  into `git diagnose` and `git bugreport --diagnose`.
+
+- 'scalar-add-fsmonitor: Enable the built-in FSMonitor in Scalar
+  enlistments. At the end of this series, Scalar should be feature-complete
+  from the perspective of a user.
+
+Roughly speaking (and subject to change), the following series are needed to
+"finish" this initial version of Scalar:
+
+- Move Scalar to toplevel: Move Scalar out of `contrib/` and into the root of
+  `git`. This includes a variety of related updates, including:
+    - building & installing Scalar in the Git root-level 'make [install]'.
+    - builing & testing Scalar as part of CI.
+    - moving and expanding test coverage of Scalar (including perf tests).
+    - implementing 'scalar help'/'git help scalar' to display scalar
+      documentation.
+
+Finally, there are two additional patch series that exist in Microsoft's fork of
+Git, but there is no current plan to upstream them. There are some interesting
+ideas there, but the implementation is too specific to Azure Repos and/or VFS
+for Git to be of much help in general.
+
+These still exist mainly because the GVFS protocol is what Azure Repos has
+instead of partial clone, while Git is focused on improving partial clone:
+
+- `scalar-with-gvfs`: The primary purpose of this patch series is to support
+  existing Scalar users whose repositories are hosted in Azure Repos (which does
+  not support Git's partial clones, but supports its predecessor, the GVFS
+  protocol, which is used by Scalar to emulate the partial clone).
+
+  Since the GVFS protocol will never be supported by core Git, this patch series
+  will remain in Microsoft's fork of Git.
+
+- `run-scalar-functional-tests`: The Scalar project developed a quite
+  comprehensive set of integration tests (or, "Functional Tests"). They are the
+  sole remaining part of the original C#-based Scalar project, and this patch
+  adds a GitHub workflow that runs them all.
+
+  Since the tests partially depend on features that are only provided in the
+  `scalar-with-gvfs` patch series, this patch cannot be upstreamed.
index 865074bed4eacac0ba3da5edb8f0094dc411227c..ca9decdd952f88b970999cfd5e772004c45d30d4 100644 (file)
@@ -3133,7 +3133,7 @@ those "loose" objects.
 You can save space and make Git faster by moving these loose objects in
 to a "pack file", which stores a group of objects in an efficient
 compressed format; the details of how pack files are formatted can be
-found in link:technical/pack-format.html[pack format].
+found in link:gitformat-pack[5].
 
 To put the loose objects into a pack, just run git repack:
 
index fb16249620d01d4c948bdbf476dfa606c84b65e4..6ec9e34282bde3771d203a0a5208a68418d720d7 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v2.37.3
+DEF_VER=v2.37.GIT
 
 LF='
 '
diff --git a/INSTALL b/INSTALL
index 4140a3f5c8b6946ca821c2a876cd4390a1a05f1b..89b15d71df521b2b710b77481e9607c03b78fccc 100644 (file)
--- a/INSTALL
+++ b/INSTALL
@@ -135,8 +135,7 @@ Issues of note:
 
          By default, git uses OpenSSL for SHA1 but it will use its own
          library (inspired by Mozilla's) with either NO_OPENSSL or
-         BLK_SHA1.  Also included is a version optimized for PowerPC
-         (PPC_SHA1).
+         BLK_SHA1.
 
        - "libcurl" library is used for fetching and pushing
          repositories over http:// or https://, as well as by
index 04d0fd1fe60702c2040f3658301ce7e322761ceb..d9247ead45bad24c8f8d4dc5e7ad7d3fa256f247 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -155,9 +155,6 @@ include shared.mak
 # Define BLK_SHA1 environment variable to make use of the bundled
 # optimized C SHA1 routine.
 #
-# Define PPC_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for PowerPC.
-#
 # Define DC_SHA1 to unconditionally enable the collision-detecting sha1
 # algorithm. This is slower, but may detect attempted collision attacks.
 # Takes priority over other *_SHA1 knobs.
@@ -182,6 +179,8 @@ include shared.mak
 #
 # Define BLK_SHA256 to use the built-in SHA-256 routines.
 #
+# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle.
+#
 # Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
 #
 # Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
@@ -309,6 +308,11 @@ include shared.mak
 # distributions that want to use their packaged versions of Perl
 # modules, instead of the fallbacks shipped with Git.
 #
+# Define NO_GITWEB if you do not want to build or install
+# 'gitweb'. Note that defining NO_PERL currently has the same effect
+# on not installing gitweb, but not on whether it's built in the
+# gitweb/ directory.
+#
 # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
 # but /usr/bin/python2.7 or /usr/bin/python3 on some platforms).
 #
@@ -544,6 +548,7 @@ gitexecdir = libexec/git-core
 mergetoolsdir = $(gitexecdir)/mergetools
 sharedir = $(prefix)/share
 gitwebdir = $(sharedir)/gitweb
+gitwebstaticdir = $(gitwebdir)/static
 perllibdir = $(sharedir)/perl5
 localedir = $(sharedir)/locale
 template_dir = share/git-core/templates
@@ -562,7 +567,7 @@ localedir_relative = $(patsubst $(prefix)/%,%,$(localedir))
 htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
 perllibdir_relative = $(patsubst $(prefix)/%,%,$(perllibdir))
 
-export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
+export prefix bindir sharedir sysconfdir perllibdir localedir
 
 # Set our default programs
 CC = cc
@@ -764,6 +769,7 @@ TEST_BUILTINS_OBJS += test-read-midx.o
 TEST_BUILTINS_OBJS += test-ref-store.o
 TEST_BUILTINS_OBJS += test-reftable.o
 TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-rot13-filter.o
 TEST_BUILTINS_OBJS += test-repository.o
 TEST_BUILTINS_OBJS += test-revision-walking.o
 TEST_BUILTINS_OBJS += test-run-command.o
@@ -777,6 +783,7 @@ TEST_BUILTINS_OBJS += test-strcmp-offset.o
 TEST_BUILTINS_OBJS += test-string-list.o
 TEST_BUILTINS_OBJS += test-submodule-config.o
 TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o
+TEST_BUILTINS_OBJS += test-submodule.o
 TEST_BUILTINS_OBJS += test-subprocess.o
 TEST_BUILTINS_OBJS += test-trace2.o
 TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
@@ -898,6 +905,7 @@ LIB_OBJS += blob.o
 LIB_OBJS += bloom.o
 LIB_OBJS += branch.o
 LIB_OBJS += bulk-checkin.o
+LIB_OBJS += bundle-uri.o
 LIB_OBJS += bundle.o
 LIB_OBJS += cache-tree.o
 LIB_OBJS += cbtree.o
@@ -910,6 +918,7 @@ LIB_OBJS += combine-diff.o
 LIB_OBJS += commit-graph.o
 LIB_OBJS += commit-reach.o
 LIB_OBJS += commit.o
+LIB_OBJS += compat/nonblock.o
 LIB_OBJS += compat/obstack.o
 LIB_OBJS += compat/terminal.o
 LIB_OBJS += compat/zlib-uncompress2.o
@@ -924,6 +933,7 @@ LIB_OBJS += ctype.o
 LIB_OBJS += date.o
 LIB_OBJS += decorate.o
 LIB_OBJS += delta-islands.o
+LIB_OBJS += diagnose.o
 LIB_OBJS += diff-delta.o
 LIB_OBJS += diff-merges.o
 LIB_OBJS += diff-lib.o
@@ -984,7 +994,6 @@ LIB_OBJS += merge-ort.o
 LIB_OBJS += merge-ort-wrappers.o
 LIB_OBJS += merge-recursive.o
 LIB_OBJS += merge.o
-LIB_OBJS += mergesort.o
 LIB_OBJS += midx.o
 LIB_OBJS += name-hash.o
 LIB_OBJS += negotiator/default.o
@@ -1145,6 +1154,7 @@ BUILTIN_OBJS += builtin/credential-cache.o
 BUILTIN_OBJS += builtin/credential-store.o
 BUILTIN_OBJS += builtin/credential.o
 BUILTIN_OBJS += builtin/describe.o
+BUILTIN_OBJS += builtin/diagnose.o
 BUILTIN_OBJS += builtin/diff-files.o
 BUILTIN_OBJS += builtin/diff-index.o
 BUILTIN_OBJS += builtin/diff-tree.o
@@ -1286,7 +1296,7 @@ SANITIZE_ADDRESS =
 # For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
 # usually result in less CPU usage at the cost of higher peak memory.
 # Setting it to 0 will feed all files in a single spatch invocation.
-SPATCH_FLAGS = --all-includes --patch .
+SPATCH_FLAGS = --all-includes
 SPATCH_BATCH_SIZE = 1
 
 include config.mak.uname
@@ -1791,6 +1801,10 @@ ifdef APPLE_COMMON_CRYPTO
        SHA1_MAX_BLOCK_SIZE = 1024L*1024L*1024L
 endif
 
+ifdef PPC_SHA1
+$(error the PPC_SHA1 flag has been removed along with the PowerPC-specific SHA-1 implementation.)
+endif
+
 ifdef OPENSSL_SHA1
        EXTLIBS += $(LIB_4_CRYPTO)
        BASIC_CFLAGS += -DSHA1_OPENSSL
@@ -1799,10 +1813,6 @@ ifdef BLK_SHA1
        LIB_OBJS += block-sha1/sha1.o
        BASIC_CFLAGS += -DSHA1_BLK
 else
-ifdef PPC_SHA1
-       LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o
-       BASIC_CFLAGS += -DSHA1_PPC
-else
 ifdef APPLE_COMMON_CRYPTO
        COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL
        BASIC_CFLAGS += -DSHA1_APPLE
@@ -1836,12 +1846,15 @@ endif
 endif
 endif
 endif
-endif
 
 ifdef OPENSSL_SHA256
        EXTLIBS += $(LIB_4_CRYPTO)
        BASIC_CFLAGS += -DSHA256_OPENSSL
 else
+ifdef NETTLE_SHA256
+       BASIC_CFLAGS += -DSHA256_NETTLE
+       EXTLIBS += -lnettle
+else
 ifdef GCRYPT_SHA256
        BASIC_CFLAGS += -DSHA256_GCRYPT
        EXTLIBS += -lgcrypt
@@ -1850,6 +1863,7 @@ else
        BASIC_CFLAGS += -DSHA256_BLK
 endif
 endif
+endif
 
 ifdef SHA1_MAX_BLOCK_SIZE
        LIB_OBJS += compat/sha1-chunked.o
@@ -2089,6 +2103,7 @@ htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative))
 prefix_SQ = $(subst ','\'',$(prefix))
 perllibdir_relative_SQ = $(subst ','\'',$(perllibdir_relative))
 gitwebdir_SQ = $(subst ','\'',$(gitwebdir))
+gitwebstaticdir_SQ = $(subst ','\'',$(gitwebstaticdir))
 
 SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
 TEST_SHELL_PATH_SQ = $(subst ','\'',$(TEST_SHELL_PATH))
@@ -2417,10 +2432,6 @@ GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile
 perllibdir:
        @echo '$(perllibdir_SQ)'
 
-.PHONY: gitweb
-gitweb:
-       $(QUIET_SUBDIR0)gitweb $(QUIET_SUBDIR1) all
-
 git-instaweb: git-instaweb.sh GIT-SCRIPT-DEFINES
        $(QUIET_GEN)$(cmd_munge_script) && \
        chmod +x $@+ && \
@@ -2581,13 +2592,7 @@ missing_compdb_dir =
 compdb_args =
 endif
 
-ASM_SRC := $(wildcard $(OBJECTS:o=S))
-ASM_OBJ := $(ASM_SRC:S=o)
-C_OBJ := $(filter-out $(ASM_OBJ),$(OBJECTS))
-
-$(C_OBJ): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir)
-       $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $<
-$(ASM_OBJ): %.o: %.S GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir)
+$(OBJECTS): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir)
        $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $<
 
 %.s: %.c GIT-CFLAGS FORCE
@@ -3079,7 +3084,7 @@ t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS) $(REFTABLE_TEST_LIB)
 check-sha1:: t/helper/test-tool$X
        t/helper/test-sha1.sh
 
-SP_OBJ = $(patsubst %.o,%.sp,$(C_OBJ))
+SP_OBJ = $(patsubst %.o,%.sp,$(OBJECTS))
 
 $(SP_OBJ): %.sp: %.c %.o
        $(QUIET_SP)cgcc -no-compile $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) \
@@ -3091,6 +3096,9 @@ $(SP_OBJ): %.sp: %.c %.o
 sparse: $(SP_OBJ)
 
 EXCEPT_HDRS := $(GENERATED_H) unicode-width.h compat/% xdiff/%
+ifndef NETTLE_SHA256
+       EXCEPT_HDRS += sha256/nettle.h
+endif
 ifndef GCRYPT_SHA256
        EXCEPT_HDRS += sha256/gcrypt.h
 endif
@@ -3123,6 +3131,8 @@ check: $(GENERATED_H)
                exit 1; \
        fi
 
+COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res)
+
 %.cocci.patch: %.cocci $(COCCI_SOURCES)
        $(QUIET_SPATCH) \
        if test $(SPATCH_BATCH_SIZE) = 0; then \
@@ -3131,7 +3141,8 @@ check: $(GENERATED_H)
                limit='-n $(SPATCH_BATCH_SIZE)'; \
        fi; \
        if ! echo $(COCCI_SOURCES) | xargs $$limit \
-               $(SPATCH) --sp-file $< $(SPATCH_FLAGS) \
+               $(SPATCH) $(SPATCH_FLAGS) \
+               --sp-file $< --patch . \
                >$@+ 2>$@.log; \
        then \
                cat $@.log; \
@@ -3142,13 +3153,43 @@ check: $(GENERATED_H)
        then \
                echo '    ' SPATCH result: $@; \
        fi
+
+COCCI_TEST_RES_GEN = $(addprefix .build/,$(COCCI_TEST_RES))
+$(COCCI_TEST_RES_GEN): .build/%.res : %.c
+$(COCCI_TEST_RES_GEN): .build/%.res : %.res
+$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci
+       $(call mkdir_p_parent_template)
+       $(QUIET_SPATCH_T)$(SPATCH) $(SPATCH_FLAGS) \
+               --very-quiet --no-show-diff \
+               --sp-file $< -o $@ \
+               $(@:.build/%.res=%.c) && \
+       cmp $(@:.build/%=%) $@ || \
+       git -P diff --no-index $(@:.build/%=%) $@ 2>/dev/null; \
+
+.PHONY: coccicheck-test
+coccicheck-test: $(COCCI_TEST_RES_GEN)
+
+coccicheck: coccicheck-test
 coccicheck: $(addsuffix .patch,$(filter-out %.pending.cocci,$(wildcard contrib/coccinelle/*.cocci)))
 
 # See contrib/coccinelle/README
+coccicheck-pending: coccicheck-test
 coccicheck-pending: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.pending.cocci))
 
 .PHONY: coccicheck coccicheck-pending
 
+# "Sub"-Makefiles, not really because they can't be run stand-alone,
+# only there to contain directory-specific rules and variables
+## gitweb/Makefile inclusion:
+MAK_DIR_GITWEB = gitweb/
+include gitweb/Makefile
+
+.PHONY: gitweb
+gitweb: $(MAK_DIR_GITWEB_ALL)
+ifndef NO_GITWEB
+all:: gitweb
+endif
+
 ### Installation rules
 
 ifneq ($(filter /%,$(firstword $(template_dir))),)
@@ -3221,7 +3262,6 @@ ifndef NO_PERL
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)'
        (cd perl/build/lib && $(TAR) cf - .) | \
        (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -)
-       $(MAKE) -C gitweb install
 endif
 ifndef NO_TCLTK
        $(MAKE) -C gitk-git install
@@ -3276,10 +3316,8 @@ endif
                  cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \
        done
 
-.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf
+.PHONY: install-doc install-man install-man-perl install-html install-info install-pdf
 .PHONY: quick-install-doc quick-install-man quick-install-html
-install-gitweb:
-       $(MAKE) -C gitweb install
 
 install-doc: install-man-perl
        $(MAKE) -C Documentation install
@@ -3403,12 +3441,13 @@ profile-clean:
        $(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
 
 cocciclean:
+       $(RM) -r .build/contrib/coccinelle
        $(RM) contrib/coccinelle/*.cocci.patch*
 
 clean: profile-clean coverage-clean cocciclean
        $(RM) -r .build
        $(RM) po/git.pot po/git-core.pot
-       $(RM) *.res
+       $(RM) git.res
        $(RM) $(OBJECTS)
        $(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(REFTABLE_TEST_LIB)
        $(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X
@@ -3425,7 +3464,6 @@ clean: profile-clean coverage-clean cocciclean
        $(MAKE) -C Documentation/ clean
        $(RM) Documentation/GIT-EXCLUDED-PROGRAMS
 ifndef NO_PERL
-       $(MAKE) -C gitweb clean
        $(RM) -r perl/build/
 endif
        $(MAKE) -C templates/ clean
@@ -3489,6 +3527,7 @@ check-docs::
                sed -e '1,/^### command list/d' \
                    -e '/^#/d' \
                    -e '/guide$$/d' \
+                   -e '/interfaces$$/d' \
                    -e 's/[     ].*//' \
                    -e 's/^/listed /' command-list.txt; \
                $(MAKE) -C Documentation print-man1 | \
index 6d88c2c8e06a8da48c16fc5acd0b2f37053af31f..d505db645bec7e01446fc88472acd84b83bdf34f 120000 (symlink)
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.37.3.txt
\ No newline at end of file
+Documentation/RelNotes/2.38.0.txt
\ No newline at end of file
index 22fcd3412ca5d8b343190cd3ae07148d83fd55e3..f071b2a1b4f2ee8bf1959706d1778c3697d81ba7 100644 (file)
@@ -430,7 +430,7 @@ struct pathname_entry {
        struct file_item *item;
 };
 
-static int pathname_entry_cmp(const void *unused_cmp_data,
+static int pathname_entry_cmp(const void *cmp_data UNUSED,
                              const struct hashmap_entry *he1,
                              const struct hashmap_entry *he2,
                              const void *name)
index 509ca04456bd390eb757f9c04a9d35a3285e313f..33ecd8398a12707836cd89975226179413fa9574 100644 (file)
@@ -191,10 +191,10 @@ static struct patch_mode patch_mode_worktree_head = {
        .apply_check_args = { "-R", NULL },
        .is_reverse = 1,
        .prompt_mode = {
-               N_("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Discard addition from index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "),
+               N_("Discard mode change from worktree [y,n,q,a,d%s,?]? "),
+               N_("Discard deletion from worktree [y,n,q,a,d%s,?]? "),
+               N_("Discard addition from worktree [y,n,q,a,d%s,?]? "),
+               N_("Discard this hunk from worktree [y,n,q,a,d%s,?]? "),
        },
        .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk "
                             "will immediately be marked for discarding."),
@@ -213,10 +213,10 @@ static struct patch_mode patch_mode_worktree_nothead = {
        .apply_args = { NULL },
        .apply_check_args = { NULL },
        .prompt_mode = {
-               N_("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Apply addition to index and worktree [y,n,q,a,d%s,?]? "),
-               N_("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "),
+               N_("Apply mode change to worktree [y,n,q,a,d%s,?]? "),
+               N_("Apply deletion to worktree [y,n,q,a,d%s,?]? "),
+               N_("Apply addition to worktree [y,n,q,a,d%s,?]? "),
+               N_("Apply this hunk to worktree [y,n,q,a,d%s,?]? "),
        },
        .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk "
                             "will immediately be marked for applying."),
@@ -238,6 +238,7 @@ struct hunk_header {
         * include the newline.
         */
        size_t extra_start, extra_end, colored_extra_start, colored_extra_end;
+       unsigned suppress_colored_line_range:1;
 };
 
 struct hunk {
@@ -358,15 +359,14 @@ static int parse_hunk_header(struct add_p_state *s, struct hunk *hunk)
        if (!eol)
                eol = s->colored.buf + s->colored.len;
        p = memmem(line, eol - line, "@@ -", 4);
-       if (!p)
-               return error(_("could not parse colored hunk header '%.*s'"),
-                            (int)(eol - line), line);
-       p = memmem(p + 4, eol - p - 4, " @@", 3);
-       if (!p)
-               return error(_("could not parse colored hunk header '%.*s'"),
-                            (int)(eol - line), line);
+       if (p && (p = memmem(p + 4, eol - p - 4, " @@", 3))) {
+               header->colored_extra_start = p + 3 - s->colored.buf;
+       } else {
+               /* could not parse colored hunk header, leave as-is */
+               header->colored_extra_start = hunk->colored_start;
+               header->suppress_colored_line_range = 1;
+       }
        hunk->colored_start = eol - s->colored.buf + (*eol == '\n');
-       header->colored_extra_start = p + 3 - s->colored.buf;
        header->colored_extra_end = hunk->colored_start;
 
        return 0;
@@ -419,7 +419,8 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
        }
        color_arg_index = args.nr;
        /* Use `--no-color` explicitly, just in case `diff.color = always`. */
-       strvec_pushl(&args, "--no-color", "-p", "--", NULL);
+       strvec_pushl(&args, "--no-color", "--ignore-submodules=dirty", "-p",
+                    "--", NULL);
        for (i = 0; i < ps->nr; i++)
                strvec_push(&args, ps->items[i].original);
 
@@ -592,7 +593,10 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
                        if (colored_eol)
                                colored_p = colored_eol + 1;
                        else if (p != pend)
-                               /* colored shorter than non-colored? */
+                               /* non-colored has more lines? */
+                               goto mismatched_output;
+                       else if (colored_p == colored_pend)
+                               /* last line has no matching colored one? */
                                goto mismatched_output;
                        else
                                colored_p = colored_pend;
@@ -656,6 +660,15 @@ static void render_hunk(struct add_p_state *s, struct hunk *hunk,
                if (!colored) {
                        p = s->plain.buf + header->extra_start;
                        len = header->extra_end - header->extra_start;
+               } else if (header->suppress_colored_line_range) {
+                       strbuf_add(out,
+                                  s->colored.buf + header->colored_extra_start,
+                                  header->colored_extra_end -
+                                  header->colored_extra_start);
+
+                       strbuf_add(out, s->colored.buf + hunk->colored_start,
+                                  hunk->colored_end - hunk->colored_start);
+                       return;
                } else {
                        strbuf_addstr(out, s->s.fraginfo_color);
                        p = s->colored.buf + header->colored_extra_start;
@@ -1547,7 +1560,7 @@ soft_increment:
                        strbuf_remove(&s->answer, 0, 1);
                        strbuf_trim(&s->answer);
                        i = hunk_index - DISPLAY_HUNKS_LINES / 2;
-                       if (i < file_diff->mode_change)
+                       if (i < (int)file_diff->mode_change)
                                i = file_diff->mode_change;
                        while (s->answer.len == 0) {
                                i = display_hunks(s, file_diff, i);
index 042feb66d287329c43cd7b4f94d03339f6c144a3..3e4822b68409b85d36436bd42da862991274cd81 100644 (file)
@@ -38,11 +38,18 @@ static int write_tar_filter_archive(const struct archiver *ar,
 #define USTAR_MAX_MTIME 077777777777ULL
 #endif
 
+static void tar_write_block(const void *buf)
+{
+       write_or_die(1, buf, BLOCKSIZE);
+}
+
+static void (*write_block)(const void *) = tar_write_block;
+
 /* writes out the whole block, but only if it is full */
 static void write_if_needed(void)
 {
        if (offset == BLOCKSIZE) {
-               write_or_die(1, block, BLOCKSIZE);
+               write_block(block);
                offset = 0;
        }
 }
@@ -66,7 +73,7 @@ static void do_write_blocked(const void *data, unsigned long size)
                write_if_needed();
        }
        while (size >= BLOCKSIZE) {
-               write_or_die(1, buf, BLOCKSIZE);
+               write_block(buf);
                size -= BLOCKSIZE;
                buf += BLOCKSIZE;
        }
@@ -101,10 +108,10 @@ static void write_trailer(void)
 {
        int tail = BLOCKSIZE - offset;
        memset(block + offset, 0, tail);
-       write_or_die(1, block, BLOCKSIZE);
+       write_block(block);
        if (tail < 2 * RECORDSIZE) {
                memset(block, 0, offset);
-               write_or_die(1, block, BLOCKSIZE);
+               write_block(block);
        }
 }
 
@@ -359,7 +366,8 @@ static struct archiver *find_tar_filter(const char *name, size_t len)
        return NULL;
 }
 
-static int tar_filter_config(const char *var, const char *value, void *data)
+static int tar_filter_config(const char *var, const char *value,
+                            void *data UNUSED)
 {
        struct archiver *ar;
        const char *name;
@@ -383,8 +391,8 @@ static int tar_filter_config(const char *var, const char *value, void *data)
        if (!strcmp(type, "command")) {
                if (!value)
                        return config_error_nonbool(var);
-               free(ar->data);
-               ar->data = xstrdup(value);
+               free(ar->filter_command);
+               ar->filter_command = xstrdup(value);
                return 0;
        }
        if (!strcmp(type, "remote")) {
@@ -413,7 +421,7 @@ static int git_tar_config(const char *var, const char *value, void *cb)
        return tar_filter_config(var, value, cb);
 }
 
-static int write_tar_archive(const struct archiver *ar,
+static int write_tar_archive(const struct archiver *ar UNUSED,
                             struct archiver_args *args)
 {
        int err = 0;
@@ -425,17 +433,65 @@ static int write_tar_archive(const struct archiver *ar,
        return err;
 }
 
+static git_zstream gzstream;
+static unsigned char outbuf[16384];
+
+static void tgz_deflate(int flush)
+{
+       while (gzstream.avail_in || flush == Z_FINISH) {
+               int status = git_deflate(&gzstream, flush);
+               if (!gzstream.avail_out || status == Z_STREAM_END) {
+                       write_or_die(1, outbuf, gzstream.next_out - outbuf);
+                       gzstream.next_out = outbuf;
+                       gzstream.avail_out = sizeof(outbuf);
+                       if (status == Z_STREAM_END)
+                               break;
+               }
+               if (status != Z_OK && status != Z_BUF_ERROR)
+                       die(_("deflate error (%d)"), status);
+       }
+}
+
+static void tgz_write_block(const void *data)
+{
+       gzstream.next_in = (void *)data;
+       gzstream.avail_in = BLOCKSIZE;
+       tgz_deflate(Z_NO_FLUSH);
+}
+
+static const char internal_gzip_command[] = "git archive gzip";
+
 static int write_tar_filter_archive(const struct archiver *ar,
                                    struct archiver_args *args)
 {
+#if ZLIB_VERNUM >= 0x1221
+       struct gz_header_s gzhead = { .os = 3 }; /* Unix, for reproducibility */
+#endif
        struct strbuf cmd = STRBUF_INIT;
        struct child_process filter = CHILD_PROCESS_INIT;
        int r;
 
-       if (!ar->data)
+       if (!ar->filter_command)
                BUG("tar-filter archiver called with no filter defined");
 
-       strbuf_addstr(&cmd, ar->data);
+       if (!strcmp(ar->filter_command, internal_gzip_command)) {
+               write_block = tgz_write_block;
+               git_deflate_init_gzip(&gzstream, args->compression_level);
+#if ZLIB_VERNUM >= 0x1221
+               if (deflateSetHeader(&gzstream.z, &gzhead) != Z_OK)
+                       BUG("deflateSetHeader() called too late");
+#endif
+               gzstream.next_out = outbuf;
+               gzstream.avail_out = sizeof(outbuf);
+
+               r = write_tar_archive(ar, args);
+
+               tgz_deflate(Z_FINISH);
+               git_deflate_end(&gzstream);
+               return r;
+       }
+
+       strbuf_addstr(&cmd, ar->filter_command);
        if (args->compression_level >= 0)
                strbuf_addf(&cmd, " -%d", args->compression_level);
 
@@ -471,14 +527,14 @@ void init_tar_archiver(void)
        int i;
        register_archiver(&tar_archiver);
 
-       tar_filter_config("tar.tgz.command", "gzip -cn", NULL);
+       tar_filter_config("tar.tgz.command", internal_gzip_command, NULL);
        tar_filter_config("tar.tgz.remote", "true", NULL);
-       tar_filter_config("tar.tar.gz.command", "gzip -cn", NULL);
+       tar_filter_config("tar.tar.gz.command", internal_gzip_command, NULL);
        tar_filter_config("tar.tar.gz.remote", "true", NULL);
        git_config(git_tar_config, NULL);
        for (i = 0; i < nr_tar_filters; i++) {
                /* omit any filters that never had a command configured */
-               if (tar_filters[i]->data)
+               if (tar_filters[i]->filter_command)
                        register_archiver(tar_filters[i]);
        }
 }
index 9fe43d740d83f99111c31ef64207cf908765c17e..0456f1ebf15c839639f4759c329cb957b0a64feb 100644 (file)
@@ -612,12 +612,13 @@ static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time)
        *dos_time = tm.tm_sec / 2 + tm.tm_min * 32 + tm.tm_hour * 2048;
 }
 
-static int archive_zip_config(const char *var, const char *value, void *data)
+static int archive_zip_config(const char *var, const char *value,
+                             void *data UNUSED)
 {
        return userdiff_config(var, value);
 }
 
-static int write_zip_archive(const struct archiver *ar,
+static int write_zip_archive(const struct archiver *ar UNUSED,
                             struct archiver_args *args)
 {
        int err;
index d5109abb894200f9b37b3631286a529437463816..61a79e4a2270dfc0e738ee97dd5cb5a9054ba79f 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -382,7 +382,8 @@ struct path_exists_context {
        struct archiver_args *args;
 };
 
-static int reject_entry(const struct object_id *oid, struct strbuf *base,
+static int reject_entry(const struct object_id *oid UNUSED,
+                       struct strbuf *base,
                        const char *filename, unsigned mode,
                        void *context)
 {
index 49fab71aaf649e1d5576f359eec8dcc0c096bf22..08bed3ed3af6b062f3ff2791613c64b86d42ec00 100644 (file)
--- a/archive.h
+++ b/archive.h
@@ -43,7 +43,7 @@ struct archiver {
        const char *name;
        int (*write_archive)(const struct archiver *, struct archiver_args *);
        unsigned flags;
-       void *data;
+       char *filter_command;
 };
 void register_archiver(struct archiver *);
 
diff --git a/attr.c b/attr.c
index 21e4ad25ada625885ebcc524facf6a9ce816d39e..8250b0695321a804be3d06777570702aee1e5f2e 100644 (file)
--- a/attr.c
+++ b/attr.c
@@ -61,10 +61,10 @@ struct attr_hash_entry {
 };
 
 /* attr_hashmap comparison function */
-static int attr_hash_entry_cmp(const void *unused_cmp_data,
+static int attr_hash_entry_cmp(const void *cmp_data UNUSED,
                               const struct hashmap_entry *eptr,
                               const struct hashmap_entry *entry_or_key,
-                              const void *unused_keydata)
+                              const void *keydata UNUSED)
 {
        const struct attr_hash_entry *a, *b;
 
@@ -1023,7 +1023,7 @@ static int path_matches(const char *pathname, int pathlen,
        }
        return match_pathname(pathname, pathlen - isdir,
                              base, baselen,
-                             pattern, prefix, pat->patternlen, pat->flags);
+                             pattern, prefix, pat->patternlen);
 }
 
 static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem);
index b63669cc9d768fd1de2cbc06e20e942f77ffcda7..fd581b85a72cc6d1f9b447893f9a3b345a8ce9da 100644 (file)
--- a/bisect.c
+++ b/bisect.c
@@ -441,7 +441,7 @@ void find_bisection(struct commit_list **commit_list, int *reaches,
 }
 
 static int register_ref(const char *refname, const struct object_id *oid,
-                       int flags, void *cb_data)
+                       int flags UNUSED, void *cb_data UNUSED)
 {
        struct strbuf good_prefix = STRBUF_INIT;
        strbuf_addstr(&good_prefix, term_good);
@@ -648,11 +648,14 @@ static struct commit_list *managed_skipped(struct commit_list *list,
 }
 
 static void bisect_rev_setup(struct repository *r, struct rev_info *revs,
+                            struct strvec *rev_argv,
                             const char *prefix,
                             const char *bad_format, const char *good_format,
                             int read_paths)
 {
-       struct strvec rev_argv = STRVEC_INIT;
+       struct setup_revision_opt opt = {
+               .free_removed_argv_elements = 1,
+       };
        int i;
 
        repo_init_revisions(r, revs, prefix);
@@ -660,17 +663,16 @@ static void bisect_rev_setup(struct repository *r, struct rev_info *revs,
        revs->commit_format = CMIT_FMT_UNSPECIFIED;
 
        /* rev_argv.argv[0] will be ignored by setup_revisions */
-       strvec_push(&rev_argv, "bisect_rev_setup");
-       strvec_pushf(&rev_argv, bad_format, oid_to_hex(current_bad_oid));
+       strvec_push(rev_argv, "bisect_rev_setup");
+       strvec_pushf(rev_argv, bad_format, oid_to_hex(current_bad_oid));
        for (i = 0; i < good_revs.nr; i++)
-               strvec_pushf(&rev_argv, good_format,
+               strvec_pushf(rev_argv, good_format,
                             oid_to_hex(good_revs.oid + i));
-       strvec_push(&rev_argv, "--");
+       strvec_push(rev_argv, "--");
        if (read_paths)
-               read_bisect_paths(&rev_argv);
+               read_bisect_paths(rev_argv);
 
-       setup_revisions(rev_argv.nr, rev_argv.v, revs, NULL);
-       /* XXX leak rev_argv, as "revs" may still be pointing to it */
+       setup_revisions(rev_argv->nr, rev_argv->v, revs, &opt);
 }
 
 static void bisect_common(struct rev_info *revs)
@@ -873,10 +875,11 @@ static enum bisect_error check_merge_bases(int rev_nr, struct commit **rev, int
 static int check_ancestors(struct repository *r, int rev_nr,
                           struct commit **rev, const char *prefix)
 {
+       struct strvec rev_argv = STRVEC_INIT;
        struct rev_info revs;
        int res;
 
-       bisect_rev_setup(r, &revs, prefix, "^%s", "%s", 0);
+       bisect_rev_setup(r, &revs, &rev_argv, prefix, "^%s", "%s", 0);
 
        bisect_common(&revs);
        res = (revs.commits != NULL);
@@ -885,6 +888,7 @@ static int check_ancestors(struct repository *r, int rev_nr,
        clear_commit_marks_many(rev_nr, rev, ALL_REV_FLAGS);
 
        release_revisions(&revs);
+       strvec_clear(&rev_argv);
        return res;
 }
 
@@ -1010,6 +1014,7 @@ void read_bisect_terms(const char **read_bad, const char **read_good)
  */
 enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
 {
+       struct strvec rev_argv = STRVEC_INIT;
        struct rev_info revs = REV_INFO_INIT;
        struct commit_list *tried;
        int reaches = 0, all = 0, nr, steps;
@@ -1037,7 +1042,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
        if (res)
                goto cleanup;
 
-       bisect_rev_setup(r, &revs, prefix, "%s", "^%s", 1);
+       bisect_rev_setup(r, &revs, &rev_argv, prefix, "%s", "^%s", 1);
 
        revs.first_parent_only = !!(bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY);
        revs.limited = 1;
@@ -1054,7 +1059,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
                 */
                res = error_if_skipped_commits(tried, NULL);
                if (res < 0)
-                       return res;
+                       goto cleanup;
                printf(_("%s was both %s and %s\n"),
                       oid_to_hex(current_bad_oid),
                       term_good,
@@ -1112,6 +1117,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
        res = bisect_checkout(bisect_rev, no_checkout);
 cleanup:
        release_revisions(&revs);
+       strvec_clear(&rev_argv);
        return res;
 }
 
@@ -1154,8 +1160,9 @@ int estimate_bisect_steps(int all)
        return (e < 3 * x) ? n : n - 1;
 }
 
-static int mark_for_removal(const char *refname, const struct object_id *oid,
-                           int flag, void *cb_data)
+static int mark_for_removal(const char *refname,
+                           const struct object_id *oid UNUSED,
+                           int flag UNUSED, void *cb_data)
 {
        struct string_list *refs = cb_data;
        char *ref = xstrfmt("refs/bisect%s", refname);
diff --git a/blame.c b/blame.c
index da1052ac94bb47282fb6191e457dda6597c3e8fc..8bfeaa1c63aedc151b1125e98f52229842d48b19 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -1098,30 +1098,22 @@ static struct blame_entry *blame_merge(struct blame_entry *list1,
        }
 }
 
-static void *get_next_blame(const void *p)
-{
-       return ((struct blame_entry *)p)->next;
-}
-
-static void set_next_blame(void *p1, void *p2)
-{
-       ((struct blame_entry *)p1)->next = p2;
-}
+DEFINE_LIST_SORT(static, sort_blame_entries, struct blame_entry, next);
 
 /*
  * Final image line numbers are all different, so we don't need a
  * three-way comparison here.
  */
 
-static int compare_blame_final(const void *p1, const void *p2)
+static int compare_blame_final(const struct blame_entry *e1,
+                              const struct blame_entry *e2)
 {
-       return ((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno
-               ? 1 : -1;
+       return e1->lno > e2->lno ? 1 : -1;
 }
 
-static int compare_blame_suspect(const void *p1, const void *p2)
+static int compare_blame_suspect(const struct blame_entry *s1,
+                                const struct blame_entry *s2)
 {
-       const struct blame_entry *s1 = p1, *s2 = p2;
        /*
         * to allow for collating suspects, we sort according to the
         * respective pointer value as the primary sorting criterion.
@@ -1138,8 +1130,7 @@ static int compare_blame_suspect(const void *p1, const void *p2)
 
 void blame_sort_final(struct blame_scoreboard *sb)
 {
-       sb->ent = llist_mergesort(sb->ent, get_next_blame, set_next_blame,
-                                 compare_blame_final);
+       sort_blame_entries(&sb->ent, compare_blame_final);
 }
 
 static int compare_commits_by_reverse_commit_date(const void *a,
@@ -1964,9 +1955,7 @@ static void pass_blame_to_parent(struct blame_scoreboard *sb,
                    parent, target, 0);
        *d.dstq = NULL;
        if (ignore_diffs)
-               newdest = llist_mergesort(newdest, get_next_blame,
-                                         set_next_blame,
-                                         compare_blame_suspect);
+               sort_blame_entries(&newdest, compare_blame_suspect);
        queue_blames(sb, parent, newdest);
 
        return;
@@ -2383,8 +2372,7 @@ static int num_scapegoats(struct rev_info *revs, struct commit *commit, int reve
  */
 static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *blamed)
 {
-       blamed = llist_mergesort(blamed, get_next_blame, set_next_blame,
-                                compare_blame_suspect);
+       sort_blame_entries(&blamed, compare_blame_suspect);
        while (blamed)
        {
                struct blame_origin *porigin = blamed->suspect;
index 5974cd7dd3cc128a7d0f7fd9a5f5c7c9c972de15..80cebd27564f5aeace14bd3963f0a2f981fad330 100644 (file)
  * try to do the silly "optimize away loads" part because it won't
  * see what the value will be).
  *
- * Ben Herrenschmidt reports that on PPC, the C version comes close
- * to the optimized asm with this (ie on PPC you don't want that
- * 'volatile', since there are lots of registers).
- *
  * On ARM we get the best code generation by forcing a full memory barrier
  * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
  * the stack frame size simply explode and performance goes down the drain.
diff --git a/bloom.c b/bloom.c
index 816f063dca58bda64cd0ff452c9ae917d3c681a9..d0730525da51f783e0f72f502a62db4407bd7ad9 100644 (file)
--- a/bloom.c
+++ b/bloom.c
@@ -163,10 +163,10 @@ void init_bloom_filters(void)
        init_bloom_filter_slab(&bloom_filters);
 }
 
-static int pathmap_cmp(const void *hashmap_cmp_fn_data,
+static int pathmap_cmp(const void *hashmap_cmp_fn_data UNUSED,
                       const struct hashmap_entry *eptr,
                       const struct hashmap_entry *entry_or_key,
-                      const void *keydata)
+                      const void *keydata UNUSED)
 {
        const struct pathmap_hash_entry *e1, *e2;
 
index 4c8523c66ad07f956fc7ff8a81339d069dd586c8..d182756827fe5128292798b707a52aed25e7aa48 100644 (file)
--- a/branch.c
+++ b/branch.c
@@ -10,6 +10,7 @@
 #include "worktree.h"
 #include "submodule-config.h"
 #include "run-command.h"
+#include "strmap.h"
 
 struct tracking {
        struct refspec_item spec;
@@ -369,6 +370,83 @@ int validate_branchname(const char *name, struct strbuf *ref)
        return ref_exists(ref->buf);
 }
 
+static int initialized_checked_out_branches;
+static struct strmap current_checked_out_branches = STRMAP_INIT;
+
+static void prepare_checked_out_branches(void)
+{
+       int i = 0;
+       struct worktree **worktrees;
+
+       if (initialized_checked_out_branches)
+               return;
+       initialized_checked_out_branches = 1;
+
+       worktrees = get_worktrees();
+
+       while (worktrees[i]) {
+               char *old;
+               struct wt_status_state state = { 0 };
+               struct worktree *wt = worktrees[i++];
+               struct string_list update_refs = STRING_LIST_INIT_DUP;
+
+               if (wt->is_bare)
+                       continue;
+
+               if (wt->head_ref) {
+                       old = strmap_put(&current_checked_out_branches,
+                                        wt->head_ref,
+                                        xstrdup(wt->path));
+                       free(old);
+               }
+
+               if (wt_status_check_rebase(wt, &state) &&
+                   (state.rebase_in_progress || state.rebase_interactive_in_progress) &&
+                   state.branch) {
+                       struct strbuf ref = STRBUF_INIT;
+                       strbuf_addf(&ref, "refs/heads/%s", state.branch);
+                       old = strmap_put(&current_checked_out_branches,
+                                        ref.buf,
+                                        xstrdup(wt->path));
+                       free(old);
+                       strbuf_release(&ref);
+               }
+               wt_status_state_free_buffers(&state);
+
+               if (wt_status_check_bisect(wt, &state) &&
+                   state.branch) {
+                       struct strbuf ref = STRBUF_INIT;
+                       strbuf_addf(&ref, "refs/heads/%s", state.branch);
+                       old = strmap_put(&current_checked_out_branches,
+                                        ref.buf,
+                                        xstrdup(wt->path));
+                       free(old);
+                       strbuf_release(&ref);
+               }
+               wt_status_state_free_buffers(&state);
+
+               if (!sequencer_get_update_refs_state(get_worktree_git_dir(wt),
+                                                    &update_refs)) {
+                       struct string_list_item *item;
+                       for_each_string_list_item(item, &update_refs) {
+                               old = strmap_put(&current_checked_out_branches,
+                                                item->string,
+                                                xstrdup(wt->path));
+                               free(old);
+                       }
+                       string_list_clear(&update_refs, 1);
+               }
+       }
+
+       free_worktrees(worktrees);
+}
+
+const char *branch_checked_out(const char *refname)
+{
+       prepare_checked_out_branches();
+       return strmap_get(&current_checked_out_branches, refname);
+}
+
 /*
  * Check if a branch 'name' can be created as a new branch; die otherwise.
  * 'force' can be used when it is OK for the named branch already exists.
@@ -377,9 +455,7 @@ int validate_branchname(const char *name, struct strbuf *ref)
  */
 int validate_new_branchname(const char *name, struct strbuf *ref, int force)
 {
-       struct worktree **worktrees;
-       const struct worktree *wt;
-
+       const char *path;
        if (!validate_branchname(name, ref))
                return 0;
 
@@ -387,13 +463,10 @@ int validate_new_branchname(const char *name, struct strbuf *ref, int force)
                die(_("a branch named '%s' already exists"),
                    ref->buf + strlen("refs/heads/"));
 
-       worktrees = get_worktrees();
-       wt = find_shared_symref(worktrees, "HEAD", ref->buf);
-       if (wt && !wt->is_bare)
+       if ((path = branch_checked_out(ref->buf)))
                die(_("cannot force update the branch '%s' "
                      "checked out at '%s'"),
-                   ref->buf + strlen("refs/heads/"), wt->path);
-       free_worktrees(worktrees);
+                   ref->buf + strlen("refs/heads/"), path);
 
        return 1;
 }
index 560b6b96a8f307baac31ea699321e17ed2c22dbe..ef56103c050fa09d6087e2bade7f24240d79ae04 100644 (file)
--- a/branch.h
+++ b/branch.h
@@ -101,6 +101,13 @@ void create_branches_recursively(struct repository *r, const char *name,
                                 const char *tracking_name, int force,
                                 int reflog, int quiet, enum branch_track track,
                                 int dry_run);
+
+/*
+ * If the branch at 'refname' is currently checked out in a worktree,
+ * then return the path to that worktree.
+ */
+const char *branch_checked_out(const char *refname);
+
 /*
  * Check if 'name' can be a valid name for a branch; die otherwise.
  * Return 1 if the named branch already exists; return 0 otherwise.
index 40e9ecc8485324a40e142d5cbc9345a22e45f333..8901a34d6bf424680b9d13a1bdf332bedb4d8e20 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -144,6 +144,7 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix);
 int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix);
 int cmd_credential_store(int argc, const char **argv, const char *prefix);
 int cmd_describe(int argc, const char **argv, const char *prefix);
+int cmd_diagnose(int argc, const char **argv, const char *prefix);
 int cmd_diff_files(int argc, const char **argv, const char *prefix);
 int cmd_diff_index(int argc, const char **argv, const char *prefix);
 int cmd_diff(int argc, const char **argv, const char *prefix);
index 93bec62afa993cb236ce09c13b38932ee396f250..39fea24833078be76d76d974e5b601715ed51aea 100644 (file)
@@ -2301,7 +2301,7 @@ static int parse_opt_show_current_patch(const struct option *opt, const char *ar
        return 0;
 }
 
-static int git_am_config(const char *k, const char *v, void *cb)
+static int git_am_config(const char *k, const char *v, void *cb UNUSED)
 {
        int status;
 
index 7176b041b6d85b5760c91f94fcdde551a38d147f..f094390ee01f810e7035f2efc2be75deedc3befa 100644 (file)
@@ -75,7 +75,7 @@ static int run_remote_archiver(int argc, const char **argv,
 
 #define PARSE_OPT_KEEP_ALL ( PARSE_OPT_KEEP_DASHDASH |         \
                             PARSE_OPT_KEEP_ARGV0 |     \
-                            PARSE_OPT_KEEP_UNKNOWN |   \
+                            PARSE_OPT_KEEP_UNKNOWN_OPT |       \
                             PARSE_OPT_NO_INTERNAL_HELP )
 
 int cmd_archive(int argc, const char **argv, const char *prefix)
index 8a052c7111f97f02690a31e98ef4102961796800..501245fac9521717d4ca83341ab5528686fb5777 100644 (file)
@@ -329,8 +329,9 @@ static int check_and_set_terms(struct bisect_terms *terms, const char *cmd)
        return 0;
 }
 
-static int inc_nr(const char *refname, const struct object_id *oid,
-                 int flag, void *cb_data)
+static int inc_nr(const char *refname UNUSED,
+                 const struct object_id *oid UNUSED,
+                 int flag UNUSED, void *cb_data)
 {
        unsigned int *nr = (unsigned int *)cb_data;
        (*nr)++;
@@ -518,7 +519,7 @@ finish:
 }
 
 static int add_bisect_ref(const char *refname, const struct object_id *oid,
-                         int flags, void *cb)
+                         int flags UNUSED, void *cb)
 {
        struct add_bisect_ref_data *data = cb;
 
@@ -1134,8 +1135,9 @@ static int bisect_visualize(struct bisect_terms *terms, const char **argv, int a
        return res;
 }
 
-static int get_first_good(const char *refname, const struct object_id *oid,
-                         int flag, void *cb_data)
+static int get_first_good(const char *refname UNUSED,
+                         const struct object_id *oid,
+                         int flag UNUSED, void *cb_data)
 {
        oidcpy(cb_data, oid);
        return 1;
@@ -1324,7 +1326,7 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
 
        argc = parse_options(argc, argv, prefix, options,
                             git_bisect_helper_usage,
-                            PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        if (!cmdmode)
                usage_with_options(git_bisect_helper_usage, options);
index 02e39420b62babb5124708f3305985ebe6e316a9..a9fe8cf7a68bf31547b7482a40eb269f6ea0c30f 100644 (file)
@@ -920,6 +920,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
                        break;
                case PARSE_OPT_HELP:
                case PARSE_OPT_ERROR:
+               case PARSE_OPT_SUBCOMMAND:
                        exit(129);
                case PARSE_OPT_COMPLETE:
                        exit(0);
index 5d00d0b8d327c5cc048ac0baf997e3670d5ff3df..55cd9a6e9984fc66334a7e78bd65294c628e34fb 100644 (file)
@@ -204,7 +204,6 @@ static void delete_branch_config(const char *branchname)
 static int delete_branches(int argc, const char **argv, int force, int kinds,
                           int quiet)
 {
-       struct worktree **worktrees;
        struct commit *head_rev = NULL;
        struct object_id oid;
        char *name = NULL;
@@ -242,8 +241,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
                        die(_("Couldn't look up commit object for HEAD"));
        }
 
-       worktrees = get_worktrees();
-
        for (i = 0; i < argc; i++, strbuf_reset(&bname)) {
                char *target = NULL;
                int flags = 0;
@@ -253,12 +250,11 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
                name = mkpathdup(fmt, bname.buf);
 
                if (kinds == FILTER_REFS_BRANCHES) {
-                       const struct worktree *wt =
-                               find_shared_symref(worktrees, "HEAD", name);
-                       if (wt) {
+                       const char *path;
+                       if ((path = branch_checked_out(name))) {
                                error(_("Cannot delete branch '%s' "
                                        "checked out at '%s'"),
-                                     bname.buf, wt->path);
+                                     bname.buf, path);
                                ret = 1;
                                continue;
                        }
@@ -315,7 +311,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
 
        free(name);
        strbuf_release(&bname);
-       free_worktrees(worktrees);
 
        return ret;
 }
index 9de32bc96e7a6f2171473b8d20f5cde3834e5c42..530895be55fe6d9627c5d954f4c235d22769051e 100644 (file)
@@ -5,6 +5,7 @@
 #include "compat/compiler.h"
 #include "hook.h"
 #include "hook-list.h"
+#include "diagnose.h"
 
 
 static void get_system_info(struct strbuf *sys_info)
@@ -59,7 +60,7 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit)
 }
 
 static const char * const bugreport_usage[] = {
-       N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>]"),
+       N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>] [--diagnose[=<mode>]"),
        NULL
 };
 
@@ -98,16 +99,21 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
        int report = -1;
        time_t now = time(NULL);
        struct tm tm;
+       enum diagnose_mode diagnose = DIAGNOSE_NONE;
        char *option_output = NULL;
        char *option_suffix = "%Y-%m-%d-%H%M";
        const char *user_relative_path = NULL;
        char *prefixed_filename;
+       size_t output_path_len;
 
        const struct option bugreport_options[] = {
+               OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"),
+                              N_("create an additional zip archive of detailed diagnostics (default 'stats')"),
+                              PARSE_OPT_OPTARG, option_parse_diagnose),
                OPT_STRING('o', "output-directory", &option_output, N_("path"),
-                          N_("specify a destination for the bugreport file")),
+                          N_("specify a destination for the bugreport file(s)")),
                OPT_STRING('s', "suffix", &option_suffix, N_("format"),
-                          N_("specify a strftime format suffix for the filename")),
+                          N_("specify a strftime format suffix for the filename(s)")),
                OPT_END()
        };
 
@@ -119,6 +125,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
                                            option_output ? option_output : "");
        strbuf_addstr(&report_path, prefixed_filename);
        strbuf_complete(&report_path, '/');
+       output_path_len = report_path.len;
 
        strbuf_addstr(&report_path, "git-bugreport-");
        strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0);
@@ -133,6 +140,20 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
                    report_path.buf);
        }
 
+       /* Prepare diagnostics, if requested */
+       if (diagnose != DIAGNOSE_NONE) {
+               struct strbuf zip_path = STRBUF_INIT;
+               strbuf_add(&zip_path, report_path.buf, output_path_len);
+               strbuf_addstr(&zip_path, "git-diagnostics-");
+               strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+               strbuf_addstr(&zip_path, ".zip");
+
+               if (create_diagnostics_archive(&zip_path, diagnose))
+                       die_errno(_("unable to create diagnostics archive %s"), zip_path.buf);
+
+               strbuf_release(&zip_path);
+       }
+
        /* Prepare the report contents */
        get_bug_template(&buffer);
 
index 2adad545a2e972221869703658e98be0c514d005..e80efce3a420a0371bd984102105b8a51af34df1 100644 (file)
@@ -195,30 +195,19 @@ cleanup:
 
 int cmd_bundle(int argc, const char **argv, const char *prefix)
 {
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
+               OPT_SUBCOMMAND("create", &fn, cmd_bundle_create),
+               OPT_SUBCOMMAND("verify", &fn, cmd_bundle_verify),
+               OPT_SUBCOMMAND("list-heads", &fn, cmd_bundle_list_heads),
+               OPT_SUBCOMMAND("unbundle", &fn, cmd_bundle_unbundle),
                OPT_END()
        };
-       int result;
 
        argc = parse_options(argc, argv, prefix, options, builtin_bundle_usage,
-               PARSE_OPT_STOP_AT_NON_OPTION);
+                            0);
 
        packet_trace_identity("bundle");
 
-       if (argc < 2)
-               usage_with_options(builtin_bundle_usage, options);
-
-       else if (!strcmp(argv[0], "create"))
-               result = cmd_bundle_create(argc, argv, prefix);
-       else if (!strcmp(argv[0], "verify"))
-               result = cmd_bundle_verify(argc, argv, prefix);
-       else if (!strcmp(argv[0], "list-heads"))
-               result = cmd_bundle_list_heads(argc, argv, prefix);
-       else if (!strcmp(argv[0], "unbundle"))
-               result = cmd_bundle_unbundle(argc, argv, prefix);
-       else {
-               error(_("Unknown subcommand: %s"), argv[0]);
-               usage_with_options(builtin_bundle_usage, options);
-       }
-       return result ? 1 : 0;
+       return !!fn(argc, argv, prefix);
 }
index 50cf38999d10125428e4c558383582af4f191b95..989eee0bb4c7d924b97b3aac6b43443f7c34ab67 100644 (file)
@@ -16,6 +16,7 @@
 #include "packfile.h"
 #include "object-store.h"
 #include "promisor-remote.h"
+#include "mailmap.h"
 
 enum batch_mode {
        BATCH_MODE_CONTENTS,
@@ -31,11 +32,28 @@ struct batch_options {
        int all_objects;
        int unordered;
        int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */
+       int nul_terminated;
        const char *format;
 };
 
 static const char *force_path;
 
+static struct string_list mailmap = STRING_LIST_INIT_NODUP;
+static int use_mailmap;
+
+static char *replace_idents_using_mailmap(char *, size_t *);
+
+static char *replace_idents_using_mailmap(char *object_buf, size_t *size)
+{
+       struct strbuf sb = STRBUF_INIT;
+       const char *headers[] = { "author ", "committer ", "tagger ", NULL };
+
+       strbuf_attach(&sb, object_buf, *size, *size + 1);
+       apply_mailmap_to_header(&sb, headers, &mailmap);
+       *size = sb.len;
+       return strbuf_detach(&sb, NULL);
+}
+
 static int filter_object(const char *path, unsigned mode,
                         const struct object_id *oid,
                         char **buf, unsigned long *size)
@@ -71,6 +89,7 @@ static int stream_blob(const struct object_id *oid)
 static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        int unknown_type)
 {
+       int ret;
        struct object_id oid;
        enum object_type type;
        char *buf;
@@ -106,7 +125,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                if (sb.len) {
                        printf("%s\n", sb.buf);
                        strbuf_release(&sb);
-                       return 0;
+                       ret = 0;
+                       goto cleanup;
                }
                break;
 
@@ -115,7 +135,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                printf("%"PRIuMAX"\n", (uintmax_t)size);
-               return 0;
+               ret = 0;
+               goto cleanup;
 
        case 'e':
                return !has_object_file(&oid);
@@ -123,8 +144,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        case 'w':
 
                if (filter_object(path, obj_context.mode,
-                                 &oid, &buf, &size))
-                       return -1;
+                                 &oid, &buf, &size)) {
+                       ret = -1;
+                       goto cleanup;
+               }
                break;
 
        case 'c':
@@ -143,15 +166,24 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        const char *ls_args[3] = { NULL };
                        ls_args[0] =  "ls-tree";
                        ls_args[1] =  obj_name;
-                       return cmd_ls_tree(2, ls_args, NULL);
+                       ret = cmd_ls_tree(2, ls_args, NULL);
+                       goto cleanup;
                }
 
-               if (type == OBJ_BLOB)
-                       return stream_blob(&oid);
+               if (type == OBJ_BLOB) {
+                       ret = stream_blob(&oid);
+                       goto cleanup;
+               }
                buf = read_object_file(&oid, &type, &size);
                if (!buf)
                        die("Cannot read object %s", obj_name);
 
+               if (use_mailmap) {
+                       size_t s = size;
+                       buf = replace_idents_using_mailmap(buf, &s);
+                       size = cast_size_t_to_ulong(s);
+               }
+
                /* otherwise just spit out the data */
                break;
 
@@ -172,8 +204,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        } else
                                oidcpy(&blob_oid, &oid);
 
-                       if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
-                               return stream_blob(&blob_oid);
+                       if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) {
+                               ret = stream_blob(&blob_oid);
+                               goto cleanup;
+                       }
                        /*
                         * we attempted to dereference a tag to a blob
                         * and failed; there may be new dereference
@@ -183,6 +217,12 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                }
                buf = read_object_with_reference(the_repository, &oid,
                                                 exp_type_id, &size, NULL);
+
+               if (use_mailmap) {
+                       size_t s = size;
+                       buf = replace_idents_using_mailmap(buf, &s);
+                       size = cast_size_t_to_ulong(s);
+               }
                break;
        }
        default:
@@ -193,9 +233,11 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                die("git cat-file %s: bad file", obj_name);
 
        write_or_die(1, buf, size);
+       ret = 0;
+cleanup:
        free(buf);
        free(obj_context.path);
-       return 0;
+       return ret;
 }
 
 struct expand_data {
@@ -348,11 +390,18 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                void *contents;
 
                contents = read_object_file(oid, &type, &size);
+
+               if (use_mailmap) {
+                       size_t s = size;
+                       contents = replace_idents_using_mailmap(contents, &s);
+                       size = cast_size_t_to_ulong(s);
+               }
+
                if (!contents)
                        die("object %s disappeared", oid_to_hex(oid));
                if (type != data->type)
                        die("object %s changed type!?", oid_to_hex(oid));
-               if (data->info.sizep && size != data->size)
+               if (data->info.sizep && size != data->size && !use_mailmap)
                        die("object %s changed size!?", oid_to_hex(oid));
 
                batch_write(opt, contents, size);
@@ -602,12 +651,20 @@ static void batch_objects_command(struct batch_options *opt,
        struct queued_cmd *queued_cmd = NULL;
        size_t alloc = 0, nr = 0;
 
-       while (!strbuf_getline(&input, stdin)) {
-               int i;
+       while (1) {
+               int i, ret;
                const struct parse_cmd *cmd = NULL;
                const char *p = NULL, *cmd_end;
                struct queued_cmd call = {0};
 
+               if (opt->nul_terminated)
+                       ret = strbuf_getline_nul(&input, stdin);
+               else
+                       ret = strbuf_getline(&input, stdin);
+
+               if (ret)
+                       break;
+
                if (!input.len)
                        die(_("empty command in input"));
                if (isspace(*input.buf))
@@ -655,6 +712,7 @@ static void batch_objects_command(struct batch_options *opt,
                free_cmds(queued_cmd, &nr);
        }
 
+       free_cmds(queued_cmd, &nr);
        free(queued_cmd);
        strbuf_release(&input);
 }
@@ -750,7 +808,16 @@ static int batch_objects(struct batch_options *opt)
                goto cleanup;
        }
 
-       while (strbuf_getline(&input, stdin) != EOF) {
+       while (1) {
+               int ret;
+               if (opt->nul_terminated)
+                       ret = strbuf_getline_nul(&input, stdin);
+               else
+                       ret = strbuf_getline(&input, stdin);
+
+               if (ret == EOF)
+                       break;
+
                if (data.split_on_whitespace) {
                        /*
                         * Split at first whitespace, tying off the beginning
@@ -843,6 +910,8 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'),
                OPT_BOOL(0, "allow-unknown-type", &unknown_type,
                          N_("allow -s and -t to work with broken/corrupt objects")),
+               OPT_BOOL(0, "use-mailmap", &use_mailmap, N_("use mail map file")),
+               OPT_ALIAS(0, "mailmap", "use-mailmap"),
                /* Batch mode */
                OPT_GROUP(N_("Batch objects requested on stdin (or --batch-all-objects)")),
                OPT_CALLBACK_F(0, "batch", &batch, N_("format"),
@@ -853,6 +922,7 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
                        N_("like --batch, but don't emit <contents>"),
                        PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
                        batch_option_callback),
+               OPT_BOOL('z', NULL, &batch.nul_terminated, N_("stdin is NUL-terminated")),
                OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"),
                        N_("read commands from stdin"),
                        PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
@@ -885,6 +955,9 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
        opt_cw = (opt == 'c' || opt == 'w');
        opt_epts = (opt == 'e' || opt == 'p' || opt == 't' || opt == 's');
 
+       if (use_mailmap)
+               read_mailmap(&mailmap);
+
        /* --batch-all-objects? */
        if (opt == 'b')
                batch.all_objects = 1;
@@ -908,6 +981,9 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
        else if (batch.all_objects)
                usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
                               "--batch-all-objects");
+       else if (batch.nul_terminated)
+               usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+                              "-z");
 
        /* Batch defaults */
        if (batch.buffer_output < 0)
index bc67d3f0a83d35b2fc6d81f28c6908075564644a..fd0e5f86832a0ed4d9c08512291c836938f2bec2 100644 (file)
@@ -57,6 +57,8 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix)
        int normalize = 0;
        int flags = 0;
        const char *refname;
+       char *to_free = NULL;
+       int ret = 1;
 
        if (argc == 2 && !strcmp(argv[1], "-h"))
                usage(builtin_check_ref_format_usage);
@@ -81,11 +83,14 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix)
 
        refname = argv[i];
        if (normalize)
-               refname = collapse_slashes(refname);
+               refname = to_free = collapse_slashes(refname);
        if (check_refname_format(refname, flags))
-               return 1;
+               goto cleanup;
        if (normalize)
                printf("%s\n", refname);
 
-       return 0;
+       ret = 0;
+cleanup:
+       free(to_free);
+       return ret;
 }
index cdd96cd9c6f623fa87b85299eeef6debe6ab04f9..2a132392fbe7478c808b01d06039112fc321a55e 100644 (file)
@@ -125,7 +125,7 @@ static int post_checkout_hook(struct commit *old_commit, struct commit *new_comm
 }
 
 static int update_some(const struct object_id *oid, struct strbuf *base,
-               const char *pathname, unsigned mode, void *context)
+                      const char *pathname, unsigned mode, void *context UNUSED)
 {
        int len;
        struct cache_entry *ce;
@@ -711,6 +711,26 @@ static void setup_branch_path(struct branch_info *branch)
        branch->path = strbuf_detach(&buf, NULL);
 }
 
+static void init_topts(struct unpack_trees_options *topts, int merge,
+                      int show_progress, int overwrite_ignore,
+                      struct commit *old_commit)
+{
+       memset(topts, 0, sizeof(*topts));
+       topts->head_idx = -1;
+       topts->src_index = &the_index;
+       topts->dst_index = &the_index;
+
+       setup_unpack_trees_porcelain(topts, "checkout");
+
+       topts->initial_checkout = is_cache_unborn();
+       topts->update = 1;
+       topts->merge = 1;
+       topts->quiet = merge && old_commit;
+       topts->verbose_update = show_progress;
+       topts->fn = twoway_merge;
+       topts->preserve_ignored = !overwrite_ignore;
+}
+
 static int merge_working_tree(const struct checkout_opts *opts,
                              struct branch_info *old_branch_info,
                              struct branch_info *new_branch_info,
@@ -741,13 +761,6 @@ static int merge_working_tree(const struct checkout_opts *opts,
                struct unpack_trees_options topts;
                const struct object_id *old_commit_oid;
 
-               memset(&topts, 0, sizeof(topts));
-               topts.head_idx = -1;
-               topts.src_index = &the_index;
-               topts.dst_index = &the_index;
-
-               setup_unpack_trees_porcelain(&topts, "checkout");
-
                refresh_cache(REFRESH_QUIET);
 
                if (unmerged_cache()) {
@@ -756,17 +769,12 @@ static int merge_working_tree(const struct checkout_opts *opts,
                }
 
                /* 2-way merge to the new branch */
-               topts.initial_checkout = is_cache_unborn();
-               topts.update = 1;
-               topts.merge = 1;
-               topts.quiet = opts->merge && old_branch_info->commit;
-               topts.verbose_update = opts->show_progress;
-               topts.fn = twoway_merge;
+               init_topts(&topts, opts->merge, opts->show_progress,
+                          opts->overwrite_ignore, old_branch_info->commit);
                init_checkout_metadata(&topts.meta, new_branch_info->refname,
                                       new_branch_info->commit ?
                                       &new_branch_info->commit->object.oid :
                                       &new_branch_info->oid, NULL);
-               topts.preserve_ignored = !opts->overwrite_ignore;
 
                old_commit_oid = old_branch_info->commit ?
                        &old_branch_info->commit->object.oid :
@@ -982,7 +990,7 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
 
 static int add_pending_uninteresting_ref(const char *refname,
                                         const struct object_id *oid,
-                                        int flags, void *cb_data)
+                                        int flags UNUSED, void *cb_data)
 {
        add_pending_oid(cb_data, refname, oid, UNINTERESTING);
        return 0;
index 9e0b2b45cae9124f4dcaf6c862c1e3dcbbc86bab..e21d42dfee54338c4beba9431b469d1f285c0bdf 100644 (file)
@@ -34,6 +34,7 @@
 #include "list-objects-filter-options.h"
 #include "hook.h"
 #include "bundle.h"
+#include "bundle-uri.h"
 
 /*
  * Overall FIXMEs:
@@ -77,6 +78,7 @@ static int option_filter_submodules = -1;    /* unspecified */
 static int config_filter_submodules = -1;    /* unspecified */
 static struct string_list server_options = STRING_LIST_INIT_NODUP;
 static int option_remote_submodules;
+static const char *bundle_uri;
 
 static int recurse_submodules_cb(const struct option *opt,
                                 const char *arg, int unset)
@@ -160,6 +162,8 @@ static struct option builtin_clone_options[] = {
                    N_("any cloned submodules will use their remote-tracking branch")),
        OPT_BOOL(0, "sparse", &option_sparse_checkout,
                    N_("initialize sparse-checkout file to include only files at root")),
+       OPT_STRING(0, "bundle-uri", &bundle_uri,
+                  N_("uri"), N_("a URI for downloading bundles before fetching from origin remote")),
        OPT_END()
 };
 
@@ -494,6 +498,7 @@ static struct ref *wanted_peer_refs(const struct ref *refs,
                        /* if --branch=tag, pull the requested tag explicitly */
                        get_fetch_map(remote_head, tag_refspec, &tail, 0);
                }
+               free_refs(remote_head);
        } else {
                int i;
                for (i = 0; i < refspec->nr; i++)
@@ -932,6 +937,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
                option_no_checkout = 1;
        }
 
+       if (bundle_uri && deepen)
+               die(_("--bundle-uri is incompatible with --depth, --shallow-since, and --shallow-exclude"));
+
        repo_name = argv[0];
 
        path = get_repo_path(repo_name, &is_bundle);
@@ -1231,6 +1239,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        if (transport->smart_options && !deepen && !filter_options.choice)
                transport->smart_options->check_self_contained_and_connected = 1;
 
+       /*
+        * Before fetching from the remote, download and install bundle
+        * data from the --bundle-uri option.
+        */
+       if (bundle_uri) {
+               /* At this point, we need the_repository to match the cloned repo. */
+               if (repo_init(the_repository, git_dir, work_tree))
+                       warning(_("failed to initialize the repo, skipping bundle URI"));
+               else if (fetch_bundle_uri(the_repository, bundle_uri))
+                       warning(_("failed to fetch objects from bundle URI '%s'"),
+                               bundle_uri);
+       }
 
        strvec_push(&transport_ls_refs_options.ref_prefixes, "HEAD");
        refspec_ref_prefixes(&remote->fetch,
index 51c4040ea6c879bda3631343643f3a52063a1bdc..51557fe786e6d1674ad5ffff87d55e7155a0c4b9 100644 (file)
@@ -58,7 +58,7 @@ static struct option *add_common_options(struct option *to)
        return parse_options_concat(common_opts, to);
 }
 
-static int graph_verify(int argc, const char **argv)
+static int graph_verify(int argc, const char **argv, const char *prefix)
 {
        struct commit_graph *graph = NULL;
        struct object_directory *odb = NULL;
@@ -80,7 +80,7 @@ static int graph_verify(int argc, const char **argv)
        trace2_cmd_mode("verify");
 
        opts.progress = isatty(2);
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options,
                             builtin_commit_graph_verify_usage, 0);
        if (argc)
@@ -179,7 +179,7 @@ static int write_option_max_new_filters(const struct option *opt,
 }
 
 static int git_commit_graph_write_config(const char *var, const char *value,
-                                        void *cb)
+                                        void *cb UNUSED)
 {
        if (!strcmp(var, "commitgraph.maxnewfilters"))
                write_opts.max_new_filters = git_config_int(var, value);
@@ -190,7 +190,7 @@ static int git_commit_graph_write_config(const char *var, const char *value,
        return 0;
 }
 
-static int graph_write(int argc, const char **argv)
+static int graph_write(int argc, const char **argv, const char *prefix)
 {
        struct string_list pack_indexes = STRING_LIST_INIT_DUP;
        struct strbuf buf = STRBUF_INIT;
@@ -241,7 +241,7 @@ static int graph_write(int argc, const char **argv)
 
        git_config(git_commit_graph_write_config, &opts);
 
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options,
                             builtin_commit_graph_write_usage, 0);
        if (argc)
@@ -307,26 +307,22 @@ cleanup:
 
 int cmd_commit_graph(int argc, const char **argv, const char *prefix)
 {
-       struct option *builtin_commit_graph_options = common_opts;
+       parse_opt_subcommand_fn *fn = NULL;
+       struct option builtin_commit_graph_options[] = {
+               OPT_SUBCOMMAND("verify", &fn, graph_verify),
+               OPT_SUBCOMMAND("write", &fn, graph_write),
+               OPT_END(),
+       };
+       struct option *options = parse_options_concat(builtin_commit_graph_options, common_opts);
 
        git_config(git_default_config, NULL);
-       argc = parse_options(argc, argv, prefix,
-                            builtin_commit_graph_options,
-                            builtin_commit_graph_usage,
-                            PARSE_OPT_STOP_AT_NON_OPTION);
-       if (!argc)
-               goto usage;
 
        read_replace_refs = 0;
        save_commit_buffer = 0;
 
-       if (!strcmp(argv[0], "verify"))
-               return graph_verify(argc, argv);
-       else if (argc && !strcmp(argv[0], "write"))
-               return graph_write(argc, argv);
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_commit_graph_usage, 0);
+       FREE_AND_NULL(options);
 
-       error(_("unrecognized subcommand: %s"), argv[0]);
-usage:
-       usage_with_options(builtin_commit_graph_usage,
-                          builtin_commit_graph_options);
+       return fn(argc, argv, prefix);
 }
index e7b88a9c08dc17055dd201eb5f985a77b6150604..753e5fac297e08763317adcab2fefe386ea68421 100644 (file)
@@ -207,7 +207,8 @@ static void show_config_scope(struct strbuf *buf)
        strbuf_addch(buf, term);
 }
 
-static int show_all_config(const char *key_, const char *value_, void *cb)
+static int show_all_config(const char *key_, const char *value_,
+                          void *cb UNUSED)
 {
        if (show_origin || show_scope) {
                struct strbuf buf = STRBUF_INIT;
@@ -458,7 +459,8 @@ static const char *get_color_slot;
 static const char *get_colorbool_slot;
 static char parsed_color[COLOR_MAXLEN];
 
-static int git_get_color_config(const char *var, const char *value, void *cb)
+static int git_get_color_config(const char *var, const char *value,
+                               void *cb UNUSED)
 {
        if (!strcmp(var, get_color_slot)) {
                if (!value)
@@ -490,7 +492,7 @@ static int get_colorbool_found;
 static int get_diff_color_found;
 static int get_color_ui_found;
 static int git_get_colorbool_config(const char *var, const char *value,
-               void *cb)
+                                   void *data UNUSED)
 {
        if (!strcmp(var, get_colorbool_slot))
                get_colorbool_found = git_config_colorbool(var, value);
index a76f1a1a7a7004cc95f392024cc1a27d54c870df..e17c4b4c69b0feb7b9c8a897ca6120e46e502251 100644 (file)
@@ -63,7 +63,7 @@ static const char *prio_names[] = {
        N_("head"), N_("lightweight"), N_("annotated"),
 };
 
-static int commit_name_neq(const void *unused_cmp_data,
+static int commit_name_neq(const void *cmp_data UNUSED,
                           const struct hashmap_entry *eptr,
                           const struct hashmap_entry *entry_or_key,
                           const void *peeled)
@@ -140,7 +140,8 @@ static void add_to_known_names(const char *path,
        }
 }
 
-static int get_name(const char *path, const struct object_id *oid, int flag, void *cb_data)
+static int get_name(const char *path, const struct object_id *oid,
+                   int flag UNUSED, void *cb_data UNUSED)
 {
        int is_tag = 0;
        struct object_id peeled;
diff --git a/builtin/diagnose.c b/builtin/diagnose.c
new file mode 100644 (file)
index 0000000..cd260c2
--- /dev/null
@@ -0,0 +1,61 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "diagnose.h"
+
+static const char * const diagnose_usage[] = {
+       N_("git diagnose [-o|--output-directory <path>] [-s|--suffix <format>] [--mode=<mode>]"),
+       NULL
+};
+
+int cmd_diagnose(int argc, const char **argv, const char *prefix)
+{
+       struct strbuf zip_path = STRBUF_INIT;
+       time_t now = time(NULL);
+       struct tm tm;
+       enum diagnose_mode mode = DIAGNOSE_STATS;
+       char *option_output = NULL;
+       char *option_suffix = "%Y-%m-%d-%H%M";
+       char *prefixed_filename;
+
+       const struct option diagnose_options[] = {
+               OPT_STRING('o', "output-directory", &option_output, N_("path"),
+                          N_("specify a destination for the diagnostics archive")),
+               OPT_STRING('s', "suffix", &option_suffix, N_("format"),
+                          N_("specify a strftime format suffix for the filename")),
+               OPT_CALLBACK_F(0, "mode", &mode, N_("(stats|all)"),
+                              N_("specify the content of the diagnostic archive"),
+                              PARSE_OPT_NONEG, option_parse_diagnose),
+               OPT_END()
+       };
+
+       argc = parse_options(argc, argv, prefix, diagnose_options,
+                            diagnose_usage, 0);
+
+       /* Prepare the path to put the result */
+       prefixed_filename = prefix_filename(prefix,
+                                           option_output ? option_output : "");
+       strbuf_addstr(&zip_path, prefixed_filename);
+       strbuf_complete(&zip_path, '/');
+
+       strbuf_addstr(&zip_path, "git-diagnostics-");
+       strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+       strbuf_addstr(&zip_path, ".zip");
+
+       switch (safe_create_leading_directories(zip_path.buf)) {
+       case SCLD_OK:
+       case SCLD_EXISTS:
+               break;
+       default:
+               die_errno(_("could not create leading directories for '%s'"),
+                         zip_path.buf);
+       }
+
+       /* Prepare diagnostics */
+       if (create_diagnostics_archive(&zip_path, mode))
+               die_errno(_("unable to create diagnostics archive %s"),
+                         zip_path.buf);
+
+       free(prefixed_filename);
+       strbuf_release(&zip_path);
+       return 0;
+}
index b3c509b8de305db3ba5b393618024087e387dc46..4b10ad1a36908fe8067daa1e66a000c8c8e85718 100644 (file)
@@ -125,10 +125,10 @@ struct working_tree_entry {
        char path[FLEX_ARRAY];
 };
 
-static int working_tree_entry_cmp(const void *unused_cmp_data,
+static int working_tree_entry_cmp(const void *cmp_data UNUSED,
                                  const struct hashmap_entry *eptr,
                                  const struct hashmap_entry *entry_or_key,
-                                 const void *unused_keydata)
+                                 const void *keydata UNUSED)
 {
        const struct working_tree_entry *a, *b;
 
@@ -148,10 +148,10 @@ struct pair_entry {
        const char path[FLEX_ARRAY];
 };
 
-static int pair_cmp(const void *unused_cmp_data,
+static int pair_cmp(const void *cmp_data UNUSED,
                    const struct hashmap_entry *eptr,
                    const struct hashmap_entry *entry_or_key,
-                   const void *unused_keydata)
+                   const void *keydata UNUSED)
 {
        const struct pair_entry *a, *b;
 
@@ -184,7 +184,7 @@ struct path_entry {
        char path[FLEX_ARRAY];
 };
 
-static int path_entry_cmp(const void *unused_cmp_data,
+static int path_entry_cmp(const void *cmp_data UNUSED,
                          const struct hashmap_entry *eptr,
                          const struct hashmap_entry *entry_or_key,
                          const void *key)
@@ -716,7 +716,7 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
        symlinks = has_symlinks;
 
        argc = parse_options(argc, argv, prefix, builtin_difftool_options,
-                            builtin_difftool_usage, PARSE_OPT_KEEP_UNKNOWN |
+                            builtin_difftool_usage, PARSE_OPT_KEEP_UNKNOWN_OPT |
                             PARSE_OPT_KEEP_DASHDASH);
 
        if (tool_help)
index 27349098b074f9c3118114ee16f3653f3d61baa2..ea04c166364fe5776cd196950c3ee44a866076d7 100644 (file)
@@ -50,7 +50,7 @@ int cmd_env__helper(int argc, const char **argv, const char *prefix)
        };
 
        argc = parse_options(argc, argv, prefix, opts, env__helper_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
        if (env_default && !*env_default)
                usage_with_options(env__helper_usage, opts);
        if (!cmdmode)
index e1748fb98be5b970a8d31bf43f3f708d6a60c3d9..3b3314e7b2a3343b78d14ee3bc9d4a92fb578fd6 100644 (file)
@@ -119,7 +119,7 @@ struct anonymized_entry_key {
        size_t orig_len;
 };
 
-static int anonymized_entry_cmp(const void *unused_cmp_data,
+static int anonymized_entry_cmp(const void *cmp_data UNUSED,
                                const struct hashmap_entry *eptr,
                                const struct hashmap_entry *entry_or_key,
                                const void *keydata)
@@ -1221,7 +1221,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
        revs.sources = &revision_sources;
        revs.rewrite_parents = 1;
        argc = parse_options(argc, argv, prefix, options, fast_export_usage,
-                       PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN);
+                       PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT);
        argc = setup_revisions(argc, argv, &revs, NULL);
        if (argc > 1)
                usage_with_options (fast_export_usage, options);
index 14113cfd82b78034cb7bbb4d0ff843f20d5ae498..7134683ab93f96d4c213d478fc7fadc8a17856c0 100644 (file)
@@ -46,7 +46,7 @@ struct object_entry {
                depth : DEPTH_BITS;
 };
 
-static int object_entry_hashcmp(const void *map_data,
+static int object_entry_hashcmp(const void *map_data UNUSED,
                                const struct hashmap_entry *eptr,
                                const struct hashmap_entry *entry_or_key,
                                const void *keydata)
index acc4e89bc92ccc4955c30a9347f4ab68726cb23f..3aecbe4a3deb2555bf1f7cf198548be61ac942a8 100644 (file)
@@ -301,7 +301,7 @@ struct refname_hash_entry {
        char refname[FLEX_ARRAY];
 };
 
-static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
+static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data UNUSED,
                                  const struct hashmap_entry *eptr,
                                  const struct hashmap_entry *entry_or_key,
                                  const void *keydata)
@@ -329,7 +329,7 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
 
 static int add_one_refname(const char *refname,
                           const struct object_id *oid,
-                          int flag, void *cbdata)
+                          int flag UNUSED, void *cbdata)
 {
        struct hashmap *refname_map = cbdata;
 
@@ -490,7 +490,9 @@ static void filter_prefetch_refspec(struct refspec *rs)
                        continue;
                if (!rs->items[i].dst ||
                    (rs->items[i].src &&
-                    !strncmp(rs->items[i].src, "refs/tags/", 10))) {
+                    !strncmp(rs->items[i].src,
+                             ref_namespace[NAMESPACE_TAGS].ref,
+                             strlen(ref_namespace[NAMESPACE_TAGS].ref)))) {
                        int j;
 
                        free(rs->items[i].src);
@@ -506,7 +508,7 @@ static void filter_prefetch_refspec(struct refspec *rs)
                }
 
                old_dst = rs->items[i].dst;
-               strbuf_addstr(&new_dst, "refs/prefetch/");
+               strbuf_addstr(&new_dst, ref_namespace[NAMESPACE_PREFETCH].ref);
 
                /*
                 * If old_dst starts with "refs/", then place
@@ -881,11 +883,9 @@ static void format_display(struct strbuf *display, char code,
 static int update_local_ref(struct ref *ref,
                            struct ref_transaction *transaction,
                            const char *remote, const struct ref *remote_ref,
-                           struct strbuf *display, int summary_width,
-                           struct worktree **worktrees)
+                           struct strbuf *display, int summary_width)
 {
        struct commit *current = NULL, *updated;
-       const struct worktree *wt;
        const char *pretty_ref = prettify_refname(ref->name);
        int fast_forward = 0;
 
@@ -900,16 +900,14 @@ static int update_local_ref(struct ref *ref,
        }
 
        if (!update_head_ok &&
-           (wt = find_shared_symref(worktrees, "HEAD", ref->name)) &&
-           !wt->is_bare && !is_null_oid(&ref->old_oid)) {
+           !is_null_oid(&ref->old_oid) &&
+           branch_checked_out(ref->name)) {
                /*
                 * If this is the head, and it's not okay to update
                 * the head, and the old value of the head isn't empty...
                 */
                format_display(display, '!', _("[rejected]"),
-                              wt->is_current ?
-                                      _("can't fetch in current branch") :
-                                      _("checked out in another worktree"),
+                              _("can't fetch into checked-out branch"),
                               remote, pretty_ref, summary_width);
                return 1;
        }
@@ -1110,10 +1108,10 @@ N_("it took %.2f seconds to check forced updates; you can use\n"
 static int store_updated_refs(const char *raw_url, const char *remote_name,
                              int connectivity_checked,
                              struct ref_transaction *transaction, struct ref *ref_map,
-                             struct fetch_head *fetch_head, struct worktree **worktrees)
+                             struct fetch_head *fetch_head)
 {
        int url_len, i, rc = 0;
-       struct strbuf note = STRBUF_INIT, err = STRBUF_INIT;
+       struct strbuf note = STRBUF_INIT;
        const char *what, *kind;
        struct ref *rm;
        char *url;
@@ -1240,8 +1238,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
                        strbuf_reset(&note);
                        if (ref) {
                                rc |= update_local_ref(ref, transaction, what,
-                                                      rm, &note, summary_width,
-                                                      worktrees);
+                                                      rm, &note, summary_width);
                                free(ref);
                        } else if (write_fetch_head || dry_run) {
                                /*
@@ -1281,7 +1278,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
 
  abort:
        strbuf_release(&note);
-       strbuf_release(&err);
        free(url);
        return rc;
 }
@@ -1332,8 +1328,7 @@ static int check_exist_and_connected(struct ref *ref_map)
 static int fetch_and_consume_refs(struct transport *transport,
                                  struct ref_transaction *transaction,
                                  struct ref *ref_map,
-                                 struct fetch_head *fetch_head,
-                                 struct worktree **worktrees)
+                                 struct fetch_head *fetch_head)
 {
        int connectivity_checked = 1;
        int ret;
@@ -1356,7 +1351,7 @@ static int fetch_and_consume_refs(struct transport *transport,
        trace2_region_enter("fetch", "consume_refs", the_repository);
        ret = store_updated_refs(transport->url, transport->remote->name,
                                 connectivity_checked, transaction, ref_map,
-                                fetch_head, worktrees);
+                                fetch_head);
        trace2_region_leave("fetch", "consume_refs", the_repository);
 
 out:
@@ -1434,19 +1429,16 @@ cleanup:
        return result;
 }
 
-static void check_not_current_branch(struct ref *ref_map,
-                                    struct worktree **worktrees)
+static void check_not_current_branch(struct ref *ref_map)
 {
-       const struct worktree *wt;
+       const char *path;
        for (; ref_map; ref_map = ref_map->next)
                if (ref_map->peer_ref &&
                    starts_with(ref_map->peer_ref->name, "refs/heads/") &&
-                   (wt = find_shared_symref(worktrees, "HEAD",
-                                            ref_map->peer_ref->name)) &&
-                   !wt->is_bare)
+                   (path = branch_checked_out(ref_map->peer_ref->name)))
                        die(_("refusing to fetch into branch '%s' "
                              "checked out at '%s'"),
-                           ref_map->peer_ref->name, wt->path);
+                           ref_map->peer_ref->name, path);
 }
 
 static int truncate_fetch_head(void)
@@ -1472,8 +1464,9 @@ static void set_option(struct transport *transport, const char *name, const char
 }
 
 
-static int add_oid(const char *refname, const struct object_id *oid, int flags,
-                  void *cb_data)
+static int add_oid(const char *refname UNUSED,
+                  const struct object_id *oid,
+                  int flags UNUSED, void *cb_data)
 {
        struct oid_array *oids = cb_data;
 
@@ -1549,8 +1542,7 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
 static int backfill_tags(struct transport *transport,
                         struct ref_transaction *transaction,
                         struct ref *ref_map,
-                        struct fetch_head *fetch_head,
-                        struct worktree **worktrees)
+                        struct fetch_head *fetch_head)
 {
        int retcode, cannot_reuse;
 
@@ -1571,7 +1563,7 @@ static int backfill_tags(struct transport *transport,
        transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
        transport_set_option(transport, TRANS_OPT_DEPTH, "0");
        transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
-       retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head, worktrees);
+       retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head);
 
        if (gsecondary) {
                transport_disconnect(gsecondary);
@@ -1592,7 +1584,6 @@ static int do_fetch(struct transport *transport,
        struct transport_ls_refs_options transport_ls_refs_options =
                TRANSPORT_LS_REFS_OPTIONS_INIT;
        int must_list_refs = 1;
-       struct worktree **worktrees = get_worktrees();
        struct fetch_head fetch_head = { 0 };
        struct strbuf err = STRBUF_INIT;
 
@@ -1662,7 +1653,7 @@ static int do_fetch(struct transport *transport,
        ref_map = get_ref_map(transport->remote, remote_refs, rs,
                              tags, &autotags);
        if (!update_head_ok)
-               check_not_current_branch(ref_map, worktrees);
+               check_not_current_branch(ref_map);
 
        retcode = open_fetch_head(&fetch_head);
        if (retcode)
@@ -1695,7 +1686,7 @@ static int do_fetch(struct transport *transport,
                        retcode = 1;
        }
 
-       if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head, worktrees)) {
+       if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head)) {
                retcode = 1;
                goto cleanup;
        }
@@ -1718,7 +1709,7 @@ static int do_fetch(struct transport *transport,
                         * the transaction and don't commit anything.
                         */
                        if (backfill_tags(transport, transaction, tags_ref_map,
-                                         &fetch_head, worktrees))
+                                         &fetch_head))
                                retcode = 1;
                }
 
@@ -1803,7 +1794,6 @@ cleanup:
        close_fetch_head(&fetch_head);
        strbuf_release(&err);
        free_refs(ref_map);
-       free_worktrees(worktrees);
        return retcode;
 }
 
index 6c73092f10dedd64a53090847c16b65232bc0c86..f7916f06ed51119576035c9381c4dace2dbbecf2 100644 (file)
@@ -488,8 +488,9 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
 }
 
 static int fsck_handle_reflog_ent(struct object_id *ooid, struct object_id *noid,
-               const char *email, timestamp_t timestamp, int tz,
-               const char *message, void *cb_data)
+                                 const char *email UNUSED,
+                                 timestamp_t timestamp, int tz UNUSED,
+                                 const char *message UNUSED, void *cb_data)
 {
        const char *refname = cb_data;
 
@@ -502,8 +503,9 @@ static int fsck_handle_reflog_ent(struct object_id *ooid, struct object_id *noid
        return 0;
 }
 
-static int fsck_handle_reflog(const char *logname, const struct object_id *oid,
-                             int flag, void *cb_data)
+static int fsck_handle_reflog(const char *logname,
+                             const struct object_id *oid UNUSED,
+                             int flag UNUSED, void *cb_data)
 {
        struct strbuf refname = STRBUF_INIT;
 
@@ -514,7 +516,7 @@ static int fsck_handle_reflog(const char *logname, const struct object_id *oid,
 }
 
 static int fsck_handle_ref(const char *refname, const struct object_id *oid,
-                          int flag, void *cb_data)
+                          int flag UNUSED, void *cb_data UNUSED)
 {
        struct object *obj;
 
index 021e9256ae23561007b1178b3de99341f6c72357..0accc02406729f581d0dfda1d86041a9f76f633d 100644 (file)
@@ -168,9 +168,15 @@ struct maintenance_run_opts;
 static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts)
 {
        struct strvec pack_refs_cmd = STRVEC_INIT;
+       int ret;
+
        strvec_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL);
 
-       return run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD);
+       ret = run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD);
+
+       strvec_clear(&pack_refs_cmd);
+
+       return ret;
 }
 
 static int too_many_loose_objects(void)
@@ -776,8 +782,9 @@ struct cg_auto_data {
        int limit;
 };
 
-static int dfs_on_ref(const char *refname,
-                     const struct object_id *oid, int flags,
+static int dfs_on_ref(const char *refname UNUSED,
+                     const struct object_id *oid,
+                     int flags UNUSED,
                      void *cb_data)
 {
        struct cg_auto_data *data = (struct cg_auto_data *)cb_data;
@@ -904,12 +911,6 @@ static int fetch_remote(struct remote *remote, void *cbdata)
 
 static int maintenance_task_prefetch(struct maintenance_run_opts *opts)
 {
-       git_config_set_multivar_gently("log.excludedecoration",
-                                       "refs/prefetch/",
-                                       "refs/prefetch/",
-                                       CONFIG_FLAGS_FIXED_VALUE |
-                                       CONFIG_FLAGS_MULTI_REPLACE);
-
        if (for_each_remote(fetch_remote, opts)) {
                error(_("failed to prefetch remotes"));
                return 1;
@@ -1459,14 +1460,28 @@ static char *get_maintpath(void)
        return strbuf_detach(&sb, NULL);
 }
 
-static int maintenance_register(void)
+static char const * const builtin_maintenance_register_usage[] = {
+       N_("git maintenance register"),
+       NULL
+};
+
+static int maintenance_register(int argc, const char **argv, const char *prefix)
 {
+       struct option options[] = {
+               OPT_END(),
+       };
        int rc;
        char *config_value;
        struct child_process config_set = CHILD_PROCESS_INIT;
        struct child_process config_get = CHILD_PROCESS_INIT;
        char *maintpath = get_maintpath();
 
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_maintenance_register_usage, 0);
+       if (argc)
+               usage_with_options(builtin_maintenance_register_usage,
+                                  options);
+
        /* Disable foreground maintenance */
        git_config_set("maintenance.auto", "false");
 
@@ -1503,12 +1518,26 @@ done:
        return rc;
 }
 
-static int maintenance_unregister(void)
+static char const * const builtin_maintenance_unregister_usage[] = {
+       N_("git maintenance unregister"),
+       NULL
+};
+
+static int maintenance_unregister(int argc, const char **argv, const char *prefix)
 {
+       struct option options[] = {
+               OPT_END(),
+       };
        int rc;
        struct child_process config_unset = CHILD_PROCESS_INIT;
        char *maintpath = get_maintpath();
 
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_maintenance_unregister_usage, 0);
+       if (argc)
+               usage_with_options(builtin_maintenance_unregister_usage,
+                                  options);
+
        config_unset.git_cmd = 1;
        strvec_pushl(&config_unset.args, "config", "--global", "--unset",
                     "--fixed-value", "maintenance.repo", maintpath, NULL);
@@ -2059,6 +2088,7 @@ static int crontab_update_schedule(int run_maintenance, int fd)
        struct child_process crontab_edit = CHILD_PROCESS_INIT;
        FILE *cron_list, *cron_in;
        struct strbuf line = STRBUF_INIT;
+       struct tempfile *tmpedit = NULL;
 
        get_schedule_cmd(&cmd, NULL);
        strvec_split(&crontab_list.args, cmd);
@@ -2073,6 +2103,17 @@ static int crontab_update_schedule(int run_maintenance, int fd)
        /* Ignore exit code, as an empty crontab will return error. */
        finish_command(&crontab_list);
 
+       tmpedit = mks_tempfile_t(".git_cron_edit_tmpXXXXXX");
+       if (!tmpedit) {
+               result = error(_("failed to create crontab temporary file"));
+               goto out;
+       }
+       cron_in = fdopen_tempfile(tmpedit, "w");
+       if (!cron_in) {
+               result = error(_("failed to open temporary file"));
+               goto out;
+       }
+
        /*
         * Read from the .lock file, filtering out the old
         * schedule while appending the new schedule.
@@ -2080,19 +2121,6 @@ static int crontab_update_schedule(int run_maintenance, int fd)
        cron_list = fdopen(fd, "r");
        rewind(cron_list);
 
-       strvec_split(&crontab_edit.args, cmd);
-       crontab_edit.in = -1;
-       crontab_edit.git_cmd = 0;
-
-       if (start_command(&crontab_edit))
-               return error(_("failed to run 'crontab'; your system might not support 'cron'"));
-
-       cron_in = fdopen(crontab_edit.in, "w");
-       if (!cron_in) {
-               result = error(_("failed to open stdin of 'crontab'"));
-               goto done_editing;
-       }
-
        while (!strbuf_getline_lf(&line, cron_list)) {
                if (!in_old_region && !strcmp(line.buf, BEGIN_LINE))
                        in_old_region = 1;
@@ -2126,14 +2154,22 @@ static int crontab_update_schedule(int run_maintenance, int fd)
        }
 
        fflush(cron_in);
-       fclose(cron_in);
-       close(crontab_edit.in);
 
-done_editing:
+       strvec_split(&crontab_edit.args, cmd);
+       strvec_push(&crontab_edit.args, get_tempfile_path(tmpedit));
+       crontab_edit.git_cmd = 0;
+
+       if (start_command(&crontab_edit)) {
+               result = error(_("failed to run 'crontab'; your system might not support 'cron'"));
+               goto out;
+       }
+
        if (finish_command(&crontab_edit))
                result = error(_("'crontab' died"));
        else
                fclose(cron_list);
+out:
+       delete_tempfile(&tmpedit);
        return result;
 }
 
@@ -2490,6 +2526,7 @@ static int maintenance_start(int argc, const char **argv, const char *prefix)
                        PARSE_OPT_NONEG, maintenance_opt_scheduler),
                OPT_END()
        };
+       const char *register_args[] = { "register", NULL };
 
        argc = parse_options(argc, argv, prefix, options,
                             builtin_maintenance_start_usage, 0);
@@ -2499,34 +2536,46 @@ static int maintenance_start(int argc, const char **argv, const char *prefix)
        opts.scheduler = resolve_scheduler(opts.scheduler);
        validate_scheduler(opts.scheduler);
 
-       if (maintenance_register())
+       if (maintenance_register(ARRAY_SIZE(register_args)-1, register_args, NULL))
                warning(_("failed to add repo to global config"));
        return update_background_schedule(&opts, 1);
 }
 
-static int maintenance_stop(void)
+static const char *const builtin_maintenance_stop_usage[] = {
+       N_("git maintenance stop"),
+       NULL
+};
+
+static int maintenance_stop(int argc, const char **argv, const char *prefix)
 {
+       struct option options[] = {
+               OPT_END()
+       };
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_maintenance_stop_usage, 0);
+       if (argc)
+               usage_with_options(builtin_maintenance_stop_usage, options);
        return update_background_schedule(NULL, 0);
 }
 
-static const char builtin_maintenance_usage[] =        N_("git maintenance <subcommand> [<options>]");
+static const char * const builtin_maintenance_usage[] = {
+       N_("git maintenance <subcommand> [<options>]"),
+       NULL,
+};
 
 int cmd_maintenance(int argc, const char **argv, const char *prefix)
 {
-       if (argc < 2 ||
-           (argc == 2 && !strcmp(argv[1], "-h")))
-               usage(builtin_maintenance_usage);
-
-       if (!strcmp(argv[1], "run"))
-               return maintenance_run(argc - 1, argv + 1, prefix);
-       if (!strcmp(argv[1], "start"))
-               return maintenance_start(argc - 1, argv + 1, prefix);
-       if (!strcmp(argv[1], "stop"))
-               return maintenance_stop();
-       if (!strcmp(argv[1], "register"))
-               return maintenance_register();
-       if (!strcmp(argv[1], "unregister"))
-               return maintenance_unregister();
-
-       die(_("invalid subcommand: %s"), argv[1]);
+       parse_opt_subcommand_fn *fn = NULL;
+       struct option builtin_maintenance_options[] = {
+               OPT_SUBCOMMAND("run", &fn, maintenance_run),
+               OPT_SUBCOMMAND("start", &fn, maintenance_start),
+               OPT_SUBCOMMAND("stop", &fn, maintenance_stop),
+               OPT_SUBCOMMAND("register", &fn, maintenance_register),
+               OPT_SUBCOMMAND("unregister", &fn, maintenance_unregister),
+               OPT_END(),
+       };
+
+       argc = parse_options(argc, argv, prefix, builtin_maintenance_options,
+                            builtin_maintenance_usage, 0);
+       return fn(argc, argv, prefix);
 }
index bcb07ea7f75ba35079d0093248df0647292665a1..e6bcdf860cc96af2e70975eea2f66ac8ec8606eb 100644 (file)
@@ -961,6 +961,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
                           N_("allow calling of grep(1) (ignored by this build)"),
                           PARSE_OPT_NOCOMPLETE),
+               OPT_INTEGER('m', "max-count", &opt.max_count,
+                       N_("maximum number of results per file")),
                OPT_END()
        };
        grep_prefix = prefix;
@@ -1101,6 +1103,13 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
        if (recurse_submodules && untracked)
                die(_("--untracked not supported with --recurse-submodules"));
 
+       /*
+        * Optimize out the case where the amount of matches is limited to zero.
+        * We do this to keep results consistent with GNU grep(1).
+        */
+       if (opt.max_count == 0)
+               return 1;
+
        if (show_in_pager) {
                if (num_threads > 1)
                        warning(_("invalid option combination, ignoring --threads"));
index 222f994f863cba4226e867bf0f1022e9d551e308..09ac4289f13065a20e0fb50d08d0a784d3dab8e0 100644 (file)
@@ -43,6 +43,8 @@ static enum help_action {
        HELP_ACTION_ALL = 1,
        HELP_ACTION_GUIDES,
        HELP_ACTION_CONFIG,
+       HELP_ACTION_USER_INTERFACES,
+       HELP_ACTION_DEVELOPER_INTERFACES,
        HELP_ACTION_CONFIG_FOR_COMPLETION,
        HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION,
 } cmd_mode;
@@ -69,6 +71,12 @@ static struct option builtin_help_options[] = {
 
        OPT_CMDMODE('g', "guides", &cmd_mode, N_("print list of useful guides"),
                    HELP_ACTION_GUIDES),
+       OPT_CMDMODE(0, "user-interfaces", &cmd_mode,
+                   N_("print list of user-facing repository, command and file interfaces"),
+                   HELP_ACTION_USER_INTERFACES),
+       OPT_CMDMODE(0, "developer-interfaces", &cmd_mode,
+                   N_("print list of file formats, protocols and other developer interfaces"),
+                   HELP_ACTION_DEVELOPER_INTERFACES),
        OPT_CMDMODE('c', "config", &cmd_mode, N_("print all configuration variable names"),
                    HELP_ACTION_CONFIG),
        OPT_CMDMODE_F(0, "config-for-completion", &cmd_mode, "",
@@ -81,9 +89,11 @@ static struct option builtin_help_options[] = {
 
 static const char * const builtin_help_usage[] = {
        "git help [-a|--all] [--[no-]verbose]] [--[no-]external-commands] [--[no-]aliases]",
-       N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>]"),
+       N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>]"),
        "git help [-g|--guides]",
        "git help [-c|--config]",
+       "git help [--user-interfaces]",
+       "git help [--developer-interfaces]",
        NULL
 };
 
@@ -654,6 +664,14 @@ int cmd_help(int argc, const char **argv, const char *prefix)
                opt_mode_usage(argc, "--config-for-completion", help_format);
                list_config_help(SHOW_CONFIG_VARS);
                return 0;
+       case HELP_ACTION_USER_INTERFACES:
+               opt_mode_usage(argc, "--user-interfaces", help_format);
+               list_user_interfaces_help();
+               return 0;
+       case HELP_ACTION_DEVELOPER_INTERFACES:
+               opt_mode_usage(argc, "--developer-interfaces", help_format);
+               list_developer_interfaces_help();
+               return 0;
        case HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION:
                opt_mode_usage(argc, "--config-sections-for-completion",
                               help_format);
index 54e5c6ec933f4c032fa03d514d8f7e93e29883e5..b6530d189ad08dc76367fca61cc8eae95be13ffd 100644 (file)
@@ -67,18 +67,14 @@ usage:
 
 int cmd_hook(int argc, const char **argv, const char *prefix)
 {
+       parse_opt_subcommand_fn *fn = NULL;
        struct option builtin_hook_options[] = {
+               OPT_SUBCOMMAND("run", &fn, run),
                OPT_END(),
        };
 
        argc = parse_options(argc, argv, NULL, builtin_hook_options,
-                            builtin_hook_usage, PARSE_OPT_STOP_AT_NON_OPTION);
-       if (!argc)
-               goto usage;
+                            builtin_hook_usage, 0);
 
-       if (!strcmp(argv[0], "run"))
-               return run(argc, argv, prefix);
-
-usage:
-       usage_with_options(builtin_hook_usage, builtin_hook_options);
+       return fn(argc, argv, prefix);
 }
index 88a5e98875adb0398b2855460bddd5fa43e073ec..ee19dc5d450c57df89a864767a2b4f17d933781d 100644 (file)
@@ -52,6 +52,7 @@ static int default_encode_email_headers = 1;
 static int decoration_style;
 static int decoration_given;
 static int use_mailmap_config = 1;
+static unsigned int force_in_body_from;
 static const char *fmt_patch_subject_prefix = "PATCH";
 static int fmt_patch_name_max = FORMAT_PATCH_NAME_MAX_DEFAULT;
 static const char *fmt_pretty;
@@ -101,6 +102,20 @@ static int parse_decoration_style(const char *value)
        return -1;
 }
 
+static int use_default_decoration_filter = 1;
+static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
+
+static int clear_decorations_callback(const struct option *opt,
+                                           const char *arg, int unset)
+{
+       string_list_clear(&decorate_refs_include, 0);
+       string_list_clear(&decorate_refs_exclude, 0);
+       use_default_decoration_filter = 0;
+       return 0;
+}
+
 static int decorate_callback(const struct option *opt, const char *arg, int unset)
 {
        if (unset)
@@ -162,18 +177,61 @@ static void cmd_log_init_defaults(struct rev_info *rev)
                parse_date_format(default_date_mode, &rev->date_mode);
 }
 
+static void set_default_decoration_filter(struct decoration_filter *decoration_filter)
+{
+       int i;
+       char *value = NULL;
+       struct string_list *include = decoration_filter->include_ref_pattern;
+       const struct string_list *config_exclude =
+                       git_config_get_value_multi("log.excludeDecoration");
+
+       if (config_exclude) {
+               struct string_list_item *item;
+               for_each_string_list_item(item, config_exclude)
+                       string_list_append(decoration_filter->exclude_ref_config_pattern,
+                                          item->string);
+       }
+
+       /*
+        * By default, decorate_all is disabled. Enable it if
+        * log.initialDecorationSet=all. Don't ever disable it by config,
+        * since the command-line takes precedent.
+        */
+       if (use_default_decoration_filter &&
+           !git_config_get_string("log.initialdecorationset", &value) &&
+           !strcmp("all", value))
+               use_default_decoration_filter = 0;
+       free(value);
+
+       if (!use_default_decoration_filter ||
+           decoration_filter->exclude_ref_pattern->nr ||
+           decoration_filter->include_ref_pattern->nr ||
+           decoration_filter->exclude_ref_config_pattern->nr)
+               return;
+
+       /*
+        * No command-line or config options were given, so
+        * populate with sensible defaults.
+        */
+       for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) {
+               if (!ref_namespace[i].decoration)
+                       continue;
+
+               string_list_append(include, ref_namespace[i].ref);
+       }
+}
+
 static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
                         struct rev_info *rev, struct setup_revision_opt *opt)
 {
        struct userformat_want w;
        int quiet = 0, source = 0, mailmap;
        static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP};
-       static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
-       static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
-       static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
-       struct decoration_filter decoration_filter = {&decorate_refs_include,
-                                                     &decorate_refs_exclude,
-                                                     &decorate_refs_exclude_config};
+       struct decoration_filter decoration_filter = {
+               .exclude_ref_pattern = &decorate_refs_exclude,
+               .include_ref_pattern = &decorate_refs_include,
+               .exclude_ref_config_pattern = &decorate_refs_exclude_config,
+       };
        static struct revision_sources revision_sources;
 
        const struct option builtin_log_options[] = {
@@ -181,6 +239,10 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
                OPT_BOOL(0, "source", &source, N_("show source")),
                OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")),
                OPT_ALIAS(0, "mailmap", "use-mailmap"),
+               OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL,
+                              N_("clear all previously-defined decoration filters"),
+                              PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+                              clear_decorations_callback),
                OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include,
                                N_("pattern"), N_("only decorate refs that match <pattern>")),
                OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude,
@@ -199,7 +261,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
        mailmap = use_mailmap_config;
        argc = parse_options(argc, argv, prefix,
                             builtin_log_options, builtin_log_usage,
-                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN |
+                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
                             PARSE_OPT_KEEP_DASHDASH);
 
        if (quiet)
@@ -265,16 +327,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
        }
 
        if (decoration_style || rev->simplify_by_decoration) {
-               const struct string_list *config_exclude =
-                       repo_config_get_value_multi(the_repository,
-                                                   "log.excludeDecoration");
-
-               if (config_exclude) {
-                       struct string_list_item *item;
-                       for_each_string_list_item(item, config_exclude)
-                               string_list_append(&decorate_refs_exclude_config,
-                                                  item->string);
-               }
+               set_default_decoration_filter(&decoration_filter);
 
                if (decoration_style)
                        rev->show_decorations = 1;
@@ -645,9 +698,10 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
        return 0;
 }
 
-static int show_tree_object(const struct object_id *oid,
-               struct strbuf *base,
-               const char *pathname, unsigned mode, void *context)
+static int show_tree_object(const struct object_id *oid UNUSED,
+                           struct strbuf *base UNUSED,
+                           const char *pathname, unsigned mode,
+                           void *context)
 {
        FILE *file = context;
        fprintf(file, "%s%s\n", pathname, S_ISDIR(mode) ? "/" : "");
@@ -668,10 +722,10 @@ static void show_setup_revisions_tweak(struct rev_info *rev,
 int cmd_show(int argc, const char **argv, const char *prefix)
 {
        struct rev_info rev;
-       struct object_array_entry *objects;
+       unsigned int i;
        struct setup_revision_opt opt;
        struct pathspec match_all;
-       int i, count, ret = 0;
+       int ret = 0;
 
        init_log_defaults();
        git_config(git_log_config, NULL);
@@ -698,12 +752,10 @@ int cmd_show(int argc, const char **argv, const char *prefix)
        if (!rev.no_walk)
                return cmd_log_deinit(cmd_log_walk(&rev), &rev);
 
-       count = rev.pending.nr;
-       objects = rev.pending.objects;
        rev.diffopt.no_free = 1;
-       for (i = 0; i < count && !ret; i++) {
-               struct object *o = objects[i].item;
-               const char *name = objects[i].name;
+       for (i = 0; i < rev.pending.nr && !ret; i++) {
+               struct object *o = rev.pending.objects[i].item;
+               const char *name = rev.pending.objects[i].name;
                switch (o->type) {
                case OBJ_BLOB:
                        ret = show_blob_object(&o->oid, &rev, name);
@@ -726,7 +778,7 @@ int cmd_show(int argc, const char **argv, const char *prefix)
                        if (!o)
                                ret = error(_("could not read object %s"),
                                            oid_to_hex(oid));
-                       objects[i].item = o;
+                       rev.pending.objects[i].item = o;
                        i--;
                        break;
                }
@@ -743,11 +795,24 @@ int cmd_show(int argc, const char **argv, const char *prefix)
                        rev.shown_one = 1;
                        break;
                case OBJ_COMMIT:
-                       rev.pending.nr = rev.pending.alloc = 0;
-                       rev.pending.objects = NULL;
+               {
+                       struct object_array old;
+                       struct object_array blank = OBJECT_ARRAY_INIT;
+
+                       memcpy(&old, &rev.pending, sizeof(old));
+                       memcpy(&rev.pending, &blank, sizeof(rev.pending));
+
                        add_object_array(o, name, &rev.pending);
                        ret = cmd_log_walk_no_free(&rev);
+
+                       /*
+                        * No need for
+                        * object_array_clear(&pending). It was
+                        * cleared already in prepare_revision_walk()
+                        */
+                       memcpy(&rev.pending, &old, sizeof(rev.pending));
                        break;
+               }
                default:
                        ret = error(_("unknown type: %d"), o->type);
                }
@@ -995,6 +1060,10 @@ static int git_format_config(const char *var, const char *value, void *cb)
                        from = NULL;
                return 0;
        }
+       if (!strcmp(var, "format.forceinbodyfrom")) {
+               force_in_body_from = git_config_bool(var, value);
+               return 0;
+       }
        if (!strcmp(var, "format.notes")) {
                int b = git_parse_maybe_bool(value);
                if (b < 0)
@@ -1886,6 +1955,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                           N_("show changes against <refspec> in cover letter or single patch")),
                OPT_INTEGER(0, "creation-factor", &creation_factor,
                            N_("percentage by which creation is weighted")),
+               OPT_BOOL(0, "force-in-body-from", &force_in_body_from,
+                        N_("show in-body From: even if identical to the e-mail header")),
                OPT_END()
        };
 
@@ -1926,9 +1997,11 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
         */
        argc = parse_options(argc, argv, prefix, builtin_format_patch_options,
                             builtin_format_patch_usage,
-                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN |
+                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
                             PARSE_OPT_KEEP_DASHDASH);
 
+       rev.force_in_body_from = force_in_body_from;
+
        /* Make sure "0000-$sub.patch" gives non-negative length for $sub */
        if (fmt_patch_name_max <= strlen("0000-") + strlen(fmt_patch_suffix))
                fmt_patch_name_max = strlen("0000-") + strlen(fmt_patch_suffix);
index e791b65e7e9afb2b9f51fee4af9a0f1c978b9536..779dc18e59d56b46bcccba7ac9754726e6b1935e 100644 (file)
@@ -11,6 +11,7 @@
 #include "quote.h"
 #include "dir.h"
 #include "builtin.h"
+#include "strbuf.h"
 #include "tree.h"
 #include "cache-tree.h"
 #include "parse-options.h"
@@ -48,6 +49,7 @@ static char *ps_matched;
 static const char *with_tree;
 static int exc_given;
 static int exclude_args;
+static const char *format;
 
 static const char *tag_cached = "";
 static const char *tag_unmerged = "";
@@ -85,6 +87,16 @@ static void write_name(const char *name)
                                   stdout, line_terminator);
 }
 
+static void write_name_to_buf(struct strbuf *sb, const char *name)
+{
+       const char *rel = relative_path(name, prefix_len ? prefix : NULL, sb);
+
+       if (line_terminator)
+               quote_c_style(rel, sb, NULL, 0);
+       else
+               strbuf_addstr(sb, rel);
+}
+
 static const char *get_tag(const struct cache_entry *ce, const char *tag)
 {
        static char alttag[4];
@@ -222,6 +234,73 @@ static void show_submodule(struct repository *superproject,
        repo_clear(&subrepo);
 }
 
+struct show_index_data {
+       const char *pathname;
+       struct index_state *istate;
+       const struct cache_entry *ce;
+};
+
+static size_t expand_show_index(struct strbuf *sb, const char *start,
+                               void *context)
+{
+       struct show_index_data *data = context;
+       const char *end;
+       const char *p;
+       size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+       struct stat st;
+
+       if (len)
+               return len;
+       if (*start != '(')
+               die(_("bad ls-files format: element '%s' "
+                     "does not start with '('"), start);
+
+       end = strchr(start + 1, ')');
+       if (!end)
+               die(_("bad ls-files format: element '%s'"
+                     "does not end in ')'"), start);
+
+       len = end - start + 1;
+       if (skip_prefix(start, "(objectmode)", &p))
+               strbuf_addf(sb, "%06o", data->ce->ce_mode);
+       else if (skip_prefix(start, "(objectname)", &p))
+               strbuf_add_unique_abbrev(sb, &data->ce->oid, abbrev);
+       else if (skip_prefix(start, "(stage)", &p))
+               strbuf_addf(sb, "%d", ce_stage(data->ce));
+       else if (skip_prefix(start, "(eolinfo:index)", &p))
+               strbuf_addstr(sb, S_ISREG(data->ce->ce_mode) ?
+                             get_cached_convert_stats_ascii(data->istate,
+                             data->ce->name) : "");
+       else if (skip_prefix(start, "(eolinfo:worktree)", &p))
+               strbuf_addstr(sb, !lstat(data->pathname, &st) &&
+                             S_ISREG(st.st_mode) ?
+                             get_wt_convert_stats_ascii(data->pathname) : "");
+       else if (skip_prefix(start, "(eolattr)", &p))
+               strbuf_addstr(sb, get_convert_attr_ascii(data->istate,
+                             data->pathname));
+       else if (skip_prefix(start, "(path)", &p))
+               write_name_to_buf(sb, data->pathname);
+       else
+               die(_("bad ls-files format: %%%.*s"), (int)len, start);
+
+       return len;
+}
+
+static void show_ce_fmt(struct repository *repo, const struct cache_entry *ce,
+                       const char *format, const char *fullname) {
+       struct show_index_data data = {
+               .pathname = fullname,
+               .istate = repo->index,
+               .ce = ce,
+       };
+       struct strbuf sb = STRBUF_INIT;
+
+       strbuf_expand(&sb, format, expand_show_index, &data);
+       strbuf_addch(&sb, line_terminator);
+       fwrite(sb.buf, sb.len, 1, stdout);
+       strbuf_release(&sb);
+}
+
 static void show_ce(struct repository *repo, struct dir_struct *dir,
                    const struct cache_entry *ce, const char *fullname,
                    const char *tag)
@@ -236,6 +315,12 @@ static void show_ce(struct repository *repo, struct dir_struct *dir,
                                  max_prefix_len, ps_matched,
                                  S_ISDIR(ce->ce_mode) ||
                                  S_ISGITLINK(ce->ce_mode))) {
+               if (format) {
+                       show_ce_fmt(repo, ce, format, fullname);
+                       print_debug(ce);
+                       return;
+               }
+
                tag = get_tag(ce, tag);
 
                if (!show_stage) {
@@ -675,6 +760,9 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
                         N_("suppress duplicate entries")),
                OPT_BOOL(0, "sparse", &show_sparse_dirs,
                         N_("show sparse directories in the presence of a sparse index")),
+               OPT_STRING_F(0, "format", &format, N_("format"),
+                            N_("format to use for the output"),
+                            PARSE_OPT_NONEG),
                OPT_END()
        };
        int ret = 0;
@@ -699,6 +787,13 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
        for (i = 0; i < exclude_list.nr; i++) {
                add_pattern(exclude_list.items[i].string, "", 0, pl, --exclude_args);
        }
+
+       if (format && (show_stage || show_others || show_killed ||
+               show_resolve_undo || skipping_duplicates || show_eol || show_tag))
+                       usage_msg_opt(_("--format cannot be used with -s, -o, -k, -t, "
+                                     "--resolve-undo, --deduplicate, --eol"),
+                                     ls_files_usage, builtin_ls_files_options);
+
        if (show_tag || show_valid_bit || show_fsmonitor_bit) {
                tag_cached = "H ";
                tag_unmerged = "M ";
index e279be8bb636d6a945f36ad10699b50cf115fd78..c3ea09281afebe0c9aefb690f6f10044142a4d1a 100644 (file)
@@ -142,7 +142,7 @@ static int show_recursive(const char *base, size_t baselen, const char *pathname
 }
 
 static int show_tree_fmt(const struct object_id *oid, struct strbuf *base,
-                        const char *pathname, unsigned mode, void *context)
+                        const char *pathname, unsigned mode, void *context UNUSED)
 {
        size_t baselen;
        int recurse = 0;
@@ -213,7 +213,7 @@ static void show_tree_common_default_long(struct strbuf *base,
 
 static int show_tree_default(const struct object_id *oid, struct strbuf *base,
                             const char *pathname, unsigned mode,
-                            void *context)
+                            void *context UNUSED)
 {
        int early;
        int recurse;
@@ -230,7 +230,8 @@ static int show_tree_default(const struct object_id *oid, struct strbuf *base,
 }
 
 static int show_tree_long(const struct object_id *oid, struct strbuf *base,
-                         const char *pathname, unsigned mode, void *context)
+                         const char *pathname, unsigned mode,
+                         void *context UNUSED)
 {
        int early;
        int recurse;
@@ -259,7 +260,8 @@ static int show_tree_long(const struct object_id *oid, struct strbuf *base,
 }
 
 static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
-                              const char *pathname, unsigned mode, void *context)
+                              const char *pathname, unsigned mode,
+                              void *context UNUSED)
 {
        int early;
        int recurse;
@@ -279,7 +281,8 @@ static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
 }
 
 static int show_tree_object(const struct object_id *oid, struct strbuf *base,
-                           const char *pathname, unsigned mode, void *context)
+                           const char *pathname, unsigned mode,
+                           void *context UNUSED)
 {
        int early;
        int recurse;
index e695867ee54894dceb5b0460cc9e0670d0f50607..c923bbf2abbdfa9d8a5461fcf5c0402a341a061b 100644 (file)
@@ -25,10 +25,10 @@ static int label_cb(const struct option *opt, const char *arg, int unset)
 
 int cmd_merge_file(int argc, const char **argv, const char *prefix)
 {
-       const char *names[3] = { NULL, NULL, NULL };
-       mmfile_t mmfs[3];
-       mmbuffer_t result = {NULL, 0};
-       xmparam_t xmp = {{0}};
+       const char *names[3] = { 0 };
+       mmfile_t mmfs[3] = { 0 };
+       mmbuffer_t result = { 0 };
+       xmparam_t xmp = { 0 };
        int ret = 0, i = 0, to_stdout = 0;
        int quiet = 0;
        struct option options[] = {
@@ -71,21 +71,24 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
 
        for (i = 0; i < 3; i++) {
                char *fname;
-               int ret;
+               mmfile_t *mmf = mmfs + i;
 
                if (!names[i])
                        names[i] = argv[i];
 
                fname = prefix_filename(prefix, argv[i]);
-               ret = read_mmfile(mmfs + i, fname);
+
+               if (read_mmfile(mmf, fname))
+                       ret = -1;
+               else if (mmf->size > MAX_XDIFF_SIZE ||
+                        buffer_is_binary(mmf->ptr, mmf->size))
+                       ret = error("Cannot merge binary files: %s",
+                                   argv[i]);
+
                free(fname);
                if (ret)
-                       return -1;
+                       goto cleanup;
 
-               if (mmfs[i].size > MAX_XDIFF_SIZE ||
-                   buffer_is_binary(mmfs[i].ptr, mmfs[i].size))
-                       return error("Cannot merge binary files: %s",
-                                       argv[i]);
        }
 
        xmp.ancestor = names[1];
@@ -93,9 +96,6 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
        xmp.file2 = names[2];
        ret = xdl_merge(mmfs + 1, mmfs + 0, mmfs + 2, &xmp, &result);
 
-       for (i = 0; i < 3; i++)
-               free(mmfs[i].ptr);
-
        if (ret >= 0) {
                const char *filename = argv[0];
                char *fpath = prefix_filename(prefix, argv[0]);
@@ -116,5 +116,9 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
        if (ret > 127)
                ret = 127;
 
+cleanup:
+       for (i = 0; i < 3; i++)
+               free(mmfs[i].ptr);
+
        return ret;
 }
index 5dc94d6f8804d600a37e75079b59b1960db32ef7..ae5782917b96c57917b60b7e192f74f18e77794c 100644 (file)
@@ -2,13 +2,18 @@
 #include "builtin.h"
 #include "tree-walk.h"
 #include "xdiff-interface.h"
+#include "help.h"
+#include "commit-reach.h"
+#include "merge-ort.h"
 #include "object-store.h"
+#include "parse-options.h"
 #include "repository.h"
 #include "blob.h"
 #include "exec-cmd.h"
 #include "merge-blobs.h"
+#include "quote.h"
 
-static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
+static int line_termination = '\n';
 
 struct merge_list {
        struct merge_list *next;
@@ -28,7 +33,7 @@ static void add_merge_entry(struct merge_list *entry)
        merge_result_end = &entry->next;
 }
 
-static void merge_trees(struct tree_desc t[3], const char *base);
+static void trivial_merge_trees(struct tree_desc t[3], const char *base);
 
 static const char *explanation(struct merge_list *entry)
 {
@@ -225,7 +230,7 @@ static void unresolved_directory(const struct traverse_info *info,
        buf2 = fill_tree_descriptor(r, t + 2, ENTRY_OID(n + 2));
 #undef ENTRY_OID
 
-       merge_trees(t, newbase);
+       trivial_merge_trees(t, newbase);
 
        free(buf0);
        free(buf1);
@@ -342,7 +347,7 @@ static int threeway_callback(int n, unsigned long mask, unsigned long dirmask, s
        return mask;
 }
 
-static void merge_trees(struct tree_desc t[3], const char *base)
+static void trivial_merge_trees(struct tree_desc t[3], const char *base)
 {
        struct traverse_info info;
 
@@ -366,19 +371,18 @@ static void *get_tree_descriptor(struct repository *r,
        return buf;
 }
 
-int cmd_merge_tree(int argc, const char **argv, const char *prefix)
+static int trivial_merge(const char *base,
+                        const char *branch1,
+                        const char *branch2)
 {
        struct repository *r = the_repository;
        struct tree_desc t[3];
        void *buf1, *buf2, *buf3;
 
-       if (argc != 4)
-               usage(merge_tree_usage);
-
-       buf1 = get_tree_descriptor(r, t+0, argv[1]);
-       buf2 = get_tree_descriptor(r, t+1, argv[2]);
-       buf3 = get_tree_descriptor(r, t+2, argv[3]);
-       merge_trees(t, "");
+       buf1 = get_tree_descriptor(r, t+0, base);
+       buf2 = get_tree_descriptor(r, t+1, branch1);
+       buf3 = get_tree_descriptor(r, t+2, branch2);
+       trivial_merge_trees(t, "");
        free(buf1);
        free(buf2);
        free(buf3);
@@ -386,3 +390,162 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
        show_result();
        return 0;
 }
+
+enum mode {
+       MODE_UNKNOWN,
+       MODE_TRIVIAL,
+       MODE_REAL,
+};
+
+struct merge_tree_options {
+       int mode;
+       int allow_unrelated_histories;
+       int show_messages;
+       int name_only;
+};
+
+static int real_merge(struct merge_tree_options *o,
+                     const char *branch1, const char *branch2,
+                     const char *prefix)
+{
+       struct commit *parent1, *parent2;
+       struct commit_list *merge_bases = NULL;
+       struct merge_options opt;
+       struct merge_result result = { 0 };
+
+       parent1 = get_merge_parent(branch1);
+       if (!parent1)
+               help_unknown_ref(branch1, "merge-tree",
+                                _("not something we can merge"));
+
+       parent2 = get_merge_parent(branch2);
+       if (!parent2)
+               help_unknown_ref(branch2, "merge-tree",
+                                _("not something we can merge"));
+
+       init_merge_options(&opt, the_repository);
+
+       opt.show_rename_progress = 0;
+
+       opt.branch1 = branch1;
+       opt.branch2 = branch2;
+
+       /*
+        * Get the merge bases, in reverse order; see comment above
+        * merge_incore_recursive in merge-ort.h
+        */
+       merge_bases = get_merge_bases(parent1, parent2);
+       if (!merge_bases && !o->allow_unrelated_histories)
+               die(_("refusing to merge unrelated histories"));
+       merge_bases = reverse_commit_list(merge_bases);
+
+       merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
+       if (result.clean < 0)
+               die(_("failure to merge"));
+
+       if (o->show_messages == -1)
+               o->show_messages = !result.clean;
+
+       printf("%s%c", oid_to_hex(&result.tree->object.oid), line_termination);
+       if (!result.clean) {
+               struct string_list conflicted_files = STRING_LIST_INIT_NODUP;
+               const char *last = NULL;
+               int i;
+
+               merge_get_conflicted_files(&result, &conflicted_files);
+               for (i = 0; i < conflicted_files.nr; i++) {
+                       const char *name = conflicted_files.items[i].string;
+                       struct stage_info *c = conflicted_files.items[i].util;
+                       if (!o->name_only)
+                               printf("%06o %s %d\t",
+                                      c->mode, oid_to_hex(&c->oid), c->stage);
+                       else if (last && !strcmp(last, name))
+                               continue;
+                       write_name_quoted_relative(
+                               name, prefix, stdout, line_termination);
+                       last = name;
+               }
+               string_list_clear(&conflicted_files, 1);
+       }
+       if (o->show_messages) {
+               putchar(line_termination);
+               merge_display_update_messages(&opt, line_termination == '\0',
+                                             &result);
+       }
+       merge_finalize(&opt, &result);
+       return !result.clean; /* result.clean < 0 handled above */
+}
+
+int cmd_merge_tree(int argc, const char **argv, const char *prefix)
+{
+       struct merge_tree_options o = { .show_messages = -1 };
+       int expected_remaining_argc;
+       int original_argc;
+
+       const char * const merge_tree_usage[] = {
+               N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"),
+               N_("git merge-tree [--trivial-merge] <base-tree> <branch1> <branch2>"),
+               NULL
+       };
+       struct option mt_options[] = {
+               OPT_CMDMODE(0, "write-tree", &o.mode,
+                           N_("do a real merge instead of a trivial merge"),
+                           MODE_REAL),
+               OPT_CMDMODE(0, "trivial-merge", &o.mode,
+                           N_("do a trivial merge only"), MODE_TRIVIAL),
+               OPT_BOOL(0, "messages", &o.show_messages,
+                        N_("also show informational/conflict messages")),
+               OPT_SET_INT('z', NULL, &line_termination,
+                           N_("separate paths with the NUL character"), '\0'),
+               OPT_BOOL_F(0, "name-only",
+                          &o.name_only,
+                          N_("list filenames without modes/oids/stages"),
+                          PARSE_OPT_NONEG),
+               OPT_BOOL_F(0, "allow-unrelated-histories",
+                          &o.allow_unrelated_histories,
+                          N_("allow merging unrelated histories"),
+                          PARSE_OPT_NONEG),
+               OPT_END()
+       };
+
+       /* Parse arguments */
+       original_argc = argc - 1; /* ignoring argv[0] */
+       argc = parse_options(argc, argv, prefix, mt_options,
+                            merge_tree_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+       switch (o.mode) {
+       default:
+               BUG("unexpected command mode %d", o.mode);
+       case MODE_UNKNOWN:
+               switch (argc) {
+               default:
+                       usage_with_options(merge_tree_usage, mt_options);
+               case 2:
+                       o.mode = MODE_REAL;
+                       break;
+               case 3:
+                       o.mode = MODE_TRIVIAL;
+                       break;
+               }
+               expected_remaining_argc = argc;
+               break;
+       case MODE_REAL:
+               expected_remaining_argc = 2;
+               break;
+       case MODE_TRIVIAL:
+               expected_remaining_argc = 3;
+               /* Removal of `--trivial-merge` is expected */
+               original_argc--;
+               break;
+       }
+       if (o.mode == MODE_TRIVIAL && argc < original_argc)
+               die(_("--trivial-merge is incompatible with all other options"));
+
+       if (argc != expected_remaining_argc)
+               usage_with_options(merge_tree_usage, mt_options);
+
+       /* Do the relevant type of merge */
+       if (o.mode == MODE_REAL)
+               return real_merge(&o, argv[0], argv[1], prefix);
+       else
+               return trivial_merge(argv[0], argv[1], argv[2]);
+}
index d9784d4891c92bbc27f7e0ecdcf1d17b7bcded5b..5900b81729d8da757d04613781d41f2e7621ebcd 100644 (file)
@@ -313,8 +313,16 @@ static int save_state(struct object_id *stash)
        int len;
        struct child_process cp = CHILD_PROCESS_INIT;
        struct strbuf buffer = STRBUF_INIT;
+       struct lock_file lock_file = LOCK_INIT;
+       int fd;
        int rc = -1;
 
+       fd = repo_hold_locked_index(the_repository, &lock_file, 0);
+       refresh_cache(REFRESH_QUIET);
+       if (0 <= fd)
+               repo_update_index_if_able(the_repository, &lock_file);
+       rollback_lock_file(&lock_file);
+
        strvec_pushl(&cp.args, "stash", "create", NULL);
        cp.out = -1;
        cp.git_cmd = 1;
@@ -375,24 +383,26 @@ static void reset_hard(const struct object_id *oid, int verbose)
 static void restore_state(const struct object_id *head,
                          const struct object_id *stash)
 {
-       struct strbuf sb = STRBUF_INIT;
-       const char *args[] = { "stash", "apply", NULL, NULL };
-
-       if (is_null_oid(stash))
-               return;
+       struct strvec args = STRVEC_INIT;
 
        reset_hard(head, 1);
 
-       args[2] = oid_to_hex(stash);
+       if (is_null_oid(stash))
+               goto refresh_cache;
+
+       strvec_pushl(&args, "stash", "apply", "--index", "--quiet", NULL);
+       strvec_push(&args, oid_to_hex(stash));
 
        /*
         * It is OK to ignore error here, for example when there was
         * nothing to restore.
         */
-       run_command_v_opt(args, RUN_GIT_CMD);
+       run_command_v_opt(args.v, RUN_GIT_CMD);
+       strvec_clear(&args);
 
-       strbuf_release(&sb);
-       refresh_cache(REFRESH_QUIET);
+refresh_cache:
+       if (discard_cache() < 0 || read_cache() < 0)
+               die(_("could not read index"));
 }
 
 /* This is called when no merge was necessary. */
@@ -493,7 +503,8 @@ static void finish(struct commit *head_commit,
        /* Run a post-merge hook */
        run_hooks_l("post-merge", squash ? "1" : "0", NULL);
 
-       apply_autostash(git_path_merge_autostash(the_repository));
+       if (new_head)
+               apply_autostash(git_path_merge_autostash(the_repository));
        strbuf_release(&reflog_message);
 }
 
@@ -502,7 +513,6 @@ static void merge_name(const char *remote, struct strbuf *msg)
 {
        struct commit *remote_head;
        struct object_id branch_head;
-       struct strbuf buf = STRBUF_INIT;
        struct strbuf bname = STRBUF_INIT;
        struct merge_remote_desc *desc;
        const char *ptr;
@@ -590,7 +600,6 @@ static void merge_name(const char *remote, struct strbuf *msg)
                oid_to_hex(&remote_head->object.oid), remote);
 cleanup:
        free(found_ref);
-       strbuf_release(&buf);
        strbuf_release(&bname);
 }
 
@@ -758,8 +767,10 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
                else
                        clean = merge_recursive(&o, head, remoteheads->item,
                                                reversed, &result);
-               if (clean < 0)
-                       exit(128);
+               if (clean < 0) {
+                       rollback_lock_file(&lock);
+                       return 2;
+               }
                if (write_locked_index(&the_index, &lock,
                                       COMMIT_LOCK | SKIP_IF_UNCHANGED))
                        die(_("unable to write %s"), get_index_file());
@@ -1603,6 +1614,21 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                 */
                refresh_cache(REFRESH_QUIET);
                if (allow_trivial && fast_forward != FF_ONLY) {
+                       /*
+                        * Must first ensure that index matches HEAD before
+                        * attempting a trivial merge.
+                        */
+                       struct tree *head_tree = get_commit_tree(head_commit);
+                       struct strbuf sb = STRBUF_INIT;
+
+                       if (repo_index_has_changes(the_repository, head_tree,
+                                                  &sb)) {
+                               error(_("Your local changes to the following files would be overwritten by merge:\n  %s"),
+                                     sb.buf);
+                               strbuf_release(&sb);
+                               return 2;
+                       }
+
                        /* See if it is really trivial. */
                        git_committer_info(IDENT_STRICT);
                        printf(_("Trying really trivial in-index merge...\n"));
@@ -1659,15 +1685,15 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
         * tree in the index -- this means that the index must be in
         * sync with the head commit.  The strategies are responsible
         * to ensure this.
+        *
+        * Stash away the local changes so that we can try more than one
+        * and/or recover from merge strategies bailing while leaving the
+        * index and working tree polluted.
         */
-       if (use_strategies_nr == 1 ||
-           /*
-            * Stash away the local changes so that we can try more than one.
-            */
-           save_state(&stash))
+       if (save_state(&stash))
                oidclr(&stash);
 
-       for (i = 0; !merge_was_ok && i < use_strategies_nr; i++) {
+       for (i = 0; i < use_strategies_nr; i++) {
                int ret, cnt;
                if (i) {
                        printf(_("Rewinding the tree to pristine...\n"));
@@ -1682,7 +1708,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                 */
                wt_strategy = use_strategies[i]->name;
 
-               ret = try_merge_strategy(use_strategies[i]->name,
+               ret = try_merge_strategy(wt_strategy,
                                         common, remoteheads,
                                         head_commit);
                /*
@@ -1692,16 +1718,17 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                 */
                if (ret < 2) {
                        if (!ret) {
-                               if (option_commit) {
-                                       /* Automerge succeeded. */
-                                       automerge_was_ok = 1;
-                                       break;
-                               }
+                               /*
+                                * This strategy worked; no point in trying
+                                * another.
+                                */
                                merge_was_ok = 1;
+                               best_strategy = wt_strategy;
+                               break;
                        }
                        cnt = (use_strategies_nr > 1) ? evaluate_result() : 0;
                        if (best_cnt <= 0 || cnt <= best_cnt) {
-                               best_strategy = use_strategies[i]->name;
+                               best_strategy = wt_strategy;
                                best_cnt = cnt;
                        }
                }
@@ -1711,7 +1738,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
         * If we have a resulting tree, that means the strategy module
         * auto resolved the merge cleanly.
         */
-       if (automerge_was_ok) {
+       if (merge_was_ok && option_commit) {
+               automerge_was_ok = 1;
                ret = finish_automerge(head_commit, head_subsumed,
                                       common, remoteheads,
                                       &result_tree, wt_strategy);
@@ -1756,6 +1784,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                        "stopped before committing as requested\n"));
        else
                ret = suggest_conflicts();
+       if (autostash)
+               printf(_("When finished, apply stashed changes with `git stash pop`\n"));
 
 done:
        if (!automerge_was_ok) {
index 5edbb7fe86e81fb09ca2245ba920bc8bccd18726..9b126d6ce0e7f5fbaa4a5616d45c0c9bfa41d5b5 100644 (file)
@@ -78,7 +78,7 @@ static struct option *add_common_options(struct option *prev)
 }
 
 static int git_multi_pack_index_write_config(const char *var, const char *value,
-                                            void *cb)
+                                            void *cb UNUSED)
 {
        if (!strcmp(var, "pack.writebitmaphashcache")) {
                if (git_config_bool(var, value))
@@ -87,6 +87,13 @@ static int git_multi_pack_index_write_config(const char *var, const char *value,
                        opts.flags &= ~MIDX_WRITE_BITMAP_HASH_CACHE;
        }
 
+       if (!strcmp(var, "pack.writebitmaplookuptable")) {
+               if (git_config_bool(var, value))
+                       opts.flags |= MIDX_WRITE_BITMAP_LOOKUP_TABLE;
+               else
+                       opts.flags &= ~MIDX_WRITE_BITMAP_LOOKUP_TABLE;
+       }
+
        /*
         * We should never make a fall-back call to 'git_default_config', since
         * this was already called in 'cmd_multi_pack_index()'.
@@ -104,7 +111,8 @@ static void read_packs_from_stdin(struct string_list *to)
        strbuf_release(&buf);
 }
 
-static int cmd_multi_pack_index_write(int argc, const char **argv)
+static int cmd_multi_pack_index_write(int argc, const char **argv,
+                                     const char *prefix)
 {
        struct option *options;
        static struct option builtin_multi_pack_index_write_options[] = {
@@ -132,9 +140,9 @@ static int cmd_multi_pack_index_write(int argc, const char **argv)
 
        if (isatty(2))
                opts.flags |= MIDX_PROGRESS;
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options, builtin_multi_pack_index_write_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            0);
        if (argc)
                usage_with_options(builtin_multi_pack_index_write_usage,
                                   options);
@@ -160,7 +168,8 @@ static int cmd_multi_pack_index_write(int argc, const char **argv)
                               opts.refs_snapshot, opts.flags);
 }
 
-static int cmd_multi_pack_index_verify(int argc, const char **argv)
+static int cmd_multi_pack_index_verify(int argc, const char **argv,
+                                      const char *prefix)
 {
        struct option *options;
        static struct option builtin_multi_pack_index_verify_options[] = {
@@ -174,9 +183,9 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv)
 
        if (isatty(2))
                opts.flags |= MIDX_PROGRESS;
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options, builtin_multi_pack_index_verify_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            0);
        if (argc)
                usage_with_options(builtin_multi_pack_index_verify_usage,
                                   options);
@@ -186,7 +195,8 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv)
        return verify_midx_file(the_repository, opts.object_dir, opts.flags);
 }
 
-static int cmd_multi_pack_index_expire(int argc, const char **argv)
+static int cmd_multi_pack_index_expire(int argc, const char **argv,
+                                      const char *prefix)
 {
        struct option *options;
        static struct option builtin_multi_pack_index_expire_options[] = {
@@ -200,9 +210,9 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv)
 
        if (isatty(2))
                opts.flags |= MIDX_PROGRESS;
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options, builtin_multi_pack_index_expire_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            0);
        if (argc)
                usage_with_options(builtin_multi_pack_index_expire_usage,
                                   options);
@@ -212,7 +222,8 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv)
        return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
 }
 
-static int cmd_multi_pack_index_repack(int argc, const char **argv)
+static int cmd_multi_pack_index_repack(int argc, const char **argv,
+                                      const char *prefix)
 {
        struct option *options;
        static struct option builtin_multi_pack_index_repack_options[] = {
@@ -229,10 +240,10 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
 
        if (isatty(2))
                opts.flags |= MIDX_PROGRESS;
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             options,
                             builtin_multi_pack_index_repack_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            0);
        if (argc)
                usage_with_options(builtin_multi_pack_index_repack_usage,
                                   options);
@@ -247,7 +258,15 @@ int cmd_multi_pack_index(int argc, const char **argv,
                         const char *prefix)
 {
        int res;
-       struct option *builtin_multi_pack_index_options = common_opts;
+       parse_opt_subcommand_fn *fn = NULL;
+       struct option builtin_multi_pack_index_options[] = {
+               OPT_SUBCOMMAND("repack", &fn, cmd_multi_pack_index_repack),
+               OPT_SUBCOMMAND("write", &fn, cmd_multi_pack_index_write),
+               OPT_SUBCOMMAND("verify", &fn, cmd_multi_pack_index_verify),
+               OPT_SUBCOMMAND("expire", &fn, cmd_multi_pack_index_expire),
+               OPT_END(),
+       };
+       struct option *options = parse_options_concat(builtin_multi_pack_index_options, common_opts);
 
        git_config(git_default_config, NULL);
 
@@ -256,31 +275,12 @@ int cmd_multi_pack_index(int argc, const char **argv,
            the_repository->objects->odb)
                opts.object_dir = xstrdup(the_repository->objects->odb->path);
 
-       argc = parse_options(argc, argv, prefix,
-                            builtin_multi_pack_index_options,
-                            builtin_multi_pack_index_usage,
-                            PARSE_OPT_STOP_AT_NON_OPTION);
-
-       if (!argc)
-               goto usage;
-
-       if (!strcmp(argv[0], "repack"))
-               res = cmd_multi_pack_index_repack(argc, argv);
-       else if (!strcmp(argv[0], "write"))
-               res =  cmd_multi_pack_index_write(argc, argv);
-       else if (!strcmp(argv[0], "verify"))
-               res =  cmd_multi_pack_index_verify(argc, argv);
-       else if (!strcmp(argv[0], "expire"))
-               res =  cmd_multi_pack_index_expire(argc, argv);
-       else {
-               error(_("unrecognized subcommand: %s"), argv[0]);
-               goto usage;
-       }
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_multi_pack_index_usage, 0);
+       FREE_AND_NULL(options);
+
+       res = fn(argc, argv, prefix);
 
        free(opts.object_dir);
        return res;
-
-usage:
-       usage_with_options(builtin_multi_pack_index_usage,
-                          builtin_multi_pack_index_options);
 }
index 83a465ba831adf94561a13f3d358926f516fe9ac..4729bb1a1ab9d33829d8abb3328564faa8ed7ef6 100644 (file)
 #include "string-list.h"
 #include "parse-options.h"
 #include "submodule.h"
+#include "entry.h"
 
 static const char * const builtin_mv_usage[] = {
        N_("git mv [<options>] <source>... <destination>"),
        NULL
 };
 
+enum update_mode {
+       BOTH = 0,
+       WORKING_DIRECTORY = (1 << 1),
+       INDEX = (1 << 2),
+       SPARSE = (1 << 3),
+       SKIP_WORKTREE_DIR = (1 << 4),
+};
+
 #define DUP_BASENAME 1
 #define KEEP_TRAILING_SLASH 2
 
@@ -115,6 +124,36 @@ static int index_range_of_same_dir(const char *src, int length,
        return last - first;
 }
 
+/*
+ * Check if an out-of-cone directory should be in the index. Imagine this case
+ * that all the files under a directory are marked with 'CE_SKIP_WORKTREE' bit
+ * and thus the directory is sparsified.
+ *
+ * Return 0 if such directory exist (i.e. with any of its contained files not
+ * marked with CE_SKIP_WORKTREE, the directory would be present in working tree).
+ * Return 1 otherwise.
+ */
+static int check_dir_in_index(const char *name)
+{
+       const char *with_slash = add_slash(name);
+       int length = strlen(with_slash);
+
+       int pos = cache_name_pos(with_slash, length);
+       const struct cache_entry *ce;
+
+       if (pos < 0) {
+               pos = -pos - 1;
+               if (pos >= the_index.cache_nr)
+                       return 1;
+               ce = active_cache[pos];
+               if (strncmp(with_slash, ce->name, length))
+                       return 1;
+               if (ce_skip_worktree(ce))
+                       return 0;
+       }
+       return 1;
+}
+
 int cmd_mv(int argc, const char **argv, const char *prefix)
 {
        int i, flags, gitmodules_modified = 0;
@@ -129,7 +168,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
                OPT_END(),
        };
        const char **source, **destination, **dest_path, **submodule_gitfile;
-       enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX, SPARSE } *modes;
+       enum update_mode *modes;
        struct stat st;
        struct string_list src_for_dst = STRING_LIST_INIT_NODUP;
        struct lock_file lock_file = LOCK_INIT;
@@ -148,7 +187,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
                die(_("index file corrupt"));
 
        source = internal_prefix_pathspec(prefix, argv, argc, 0);
-       modes = xcalloc(argc, sizeof(enum update_mode));
+       CALLOC_ARRAY(modes, argc);
+
        /*
         * Keep trailing slash, needed to let
         * "git mv file no-such-dir/" error out, except in the case
@@ -176,7 +216,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
        /* Checking */
        for (i = 0; i < argc; i++) {
                const char *src = source[i], *dst = destination[i];
-               int length, src_is_dir;
+               int length;
                const char *bad = NULL;
                int skip_sparse = 0;
 
@@ -185,54 +225,103 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
 
                length = strlen(src);
                if (lstat(src, &st) < 0) {
-                       /* only error if existence is expected. */
-                       if (modes[i] != SPARSE)
+                       int pos;
+                       const struct cache_entry *ce;
+
+                       pos = cache_name_pos(src, length);
+                       if (pos < 0) {
+                               const char *src_w_slash = add_slash(src);
+                               if (!path_in_sparse_checkout(src_w_slash, &the_index) &&
+                                   !check_dir_in_index(src)) {
+                                       modes[i] |= SKIP_WORKTREE_DIR;
+                                       goto dir_check;
+                               }
+                               /* only error if existence is expected. */
+                               if (!(modes[i] & SPARSE))
+                                       bad = _("bad source");
+                               goto act_on_entry;
+                       }
+                       ce = active_cache[pos];
+                       if (!ce_skip_worktree(ce)) {
                                bad = _("bad source");
-               } else if (!strncmp(src, dst, length) &&
-                               (dst[length] == 0 || dst[length] == '/')) {
+                               goto act_on_entry;
+                       }
+                       if (!ignore_sparse) {
+                               string_list_append(&only_match_skip_worktree, src);
+                               goto act_on_entry;
+                       }
+                       /* Check if dst exists in index */
+                       if (cache_name_pos(dst, strlen(dst)) < 0) {
+                               modes[i] |= SPARSE;
+                               goto act_on_entry;
+                       }
+                       if (!force) {
+                               bad = _("destination exists");
+                               goto act_on_entry;
+                       }
+                       modes[i] |= SPARSE;
+                       goto act_on_entry;
+               }
+               if (!strncmp(src, dst, length) &&
+                   (dst[length] == 0 || dst[length] == '/')) {
                        bad = _("can not move directory into itself");
-               } else if ((src_is_dir = S_ISDIR(st.st_mode))
-                               && lstat(dst, &st) == 0)
+                       goto act_on_entry;
+               }
+               if (S_ISDIR(st.st_mode)
+                   && lstat(dst, &st) == 0) {
                        bad = _("cannot move directory over file");
-               else if (src_is_dir) {
+                       goto act_on_entry;
+               }
+
+dir_check:
+               if (S_ISDIR(st.st_mode)) {
+                       int j, dst_len, n;
                        int first = cache_name_pos(src, length), last;
 
-                       if (first >= 0)
+                       if (first >= 0) {
                                prepare_move_submodule(src, first,
                                                       submodule_gitfile + i);
-                       else if (index_range_of_same_dir(src, length,
-                                                        &first, &last) < 1)
+                               goto act_on_entry;
+                       } else if (index_range_of_same_dir(src, length,
+                                                          &first, &last) < 1) {
                                bad = _("source directory is empty");
-                       else { /* last - first >= 1 */
-                               int j, dst_len, n;
-
-                               modes[i] = WORKING_DIRECTORY;
-                               n = argc + last - first;
-                               REALLOC_ARRAY(source, n);
-                               REALLOC_ARRAY(destination, n);
-                               REALLOC_ARRAY(modes, n);
-                               REALLOC_ARRAY(submodule_gitfile, n);
-
-                               dst = add_slash(dst);
-                               dst_len = strlen(dst);
-
-                               for (j = 0; j < last - first; j++) {
-                                       const struct cache_entry *ce = active_cache[first + j];
-                                       const char *path = ce->name;
-                                       source[argc + j] = path;
-                                       destination[argc + j] =
-                                               prefix_path(dst, dst_len, path + length + 1);
-                                       modes[argc + j] = ce_skip_worktree(ce) ? SPARSE : INDEX;
-                                       submodule_gitfile[argc + j] = NULL;
-                               }
-                               argc += last - first;
+                               goto act_on_entry;
                        }
-               } else if (!(ce = cache_file_exists(src, length, 0))) {
+
+                       /* last - first >= 1 */
+                       modes[i] |= WORKING_DIRECTORY;
+                       n = argc + last - first;
+                       REALLOC_ARRAY(source, n);
+                       REALLOC_ARRAY(destination, n);
+                       REALLOC_ARRAY(modes, n);
+                       REALLOC_ARRAY(submodule_gitfile, n);
+
+                       dst = add_slash(dst);
+                       dst_len = strlen(dst);
+
+                       for (j = 0; j < last - first; j++) {
+                               const struct cache_entry *ce = active_cache[first + j];
+                               const char *path = ce->name;
+                               source[argc + j] = path;
+                               destination[argc + j] =
+                                       prefix_path(dst, dst_len, path + length + 1);
+                               memset(modes + argc + j, 0, sizeof(enum update_mode));
+                               modes[argc + j] |= ce_skip_worktree(ce) ? SPARSE : INDEX;
+                               submodule_gitfile[argc + j] = NULL;
+                       }
+                       argc += last - first;
+                       goto act_on_entry;
+               }
+               if (!(ce = cache_file_exists(src, length, 0))) {
                        bad = _("not under version control");
-               } else if (ce_stage(ce)) {
+                       goto act_on_entry;
+               }
+               if (ce_stage(ce)) {
                        bad = _("conflicted");
-               } else if (lstat(dst, &st) == 0 &&
-                        (!ignore_case || strcasecmp(src, dst))) {
+                       goto act_on_entry;
+               }
+               if (lstat(dst, &st) == 0 &&
+                   (!ignore_case || strcasecmp(src, dst))) {
                        bad = _("destination exists");
                        if (force) {
                                /*
@@ -246,34 +335,40 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
                                } else
                                        bad = _("Cannot overwrite");
                        }
-               } else if (string_list_has_string(&src_for_dst, dst))
+                       goto act_on_entry;
+               }
+               if (string_list_has_string(&src_for_dst, dst)) {
                        bad = _("multiple sources for the same target");
-               else if (is_dir_sep(dst[strlen(dst) - 1]))
+                       goto act_on_entry;
+               }
+               if (is_dir_sep(dst[strlen(dst) - 1])) {
                        bad = _("destination directory does not exist");
-               else {
-                       /*
-                        * We check if the paths are in the sparse-checkout
-                        * definition as a very final check, since that
-                        * allows us to point the user to the --sparse
-                        * option as a way to have a successful run.
-                        */
-                       if (!ignore_sparse &&
-                           !path_in_sparse_checkout(src, &the_index)) {
-                               string_list_append(&only_match_skip_worktree, src);
-                               skip_sparse = 1;
-                       }
-                       if (!ignore_sparse &&
-                           !path_in_sparse_checkout(dst, &the_index)) {
-                               string_list_append(&only_match_skip_worktree, dst);
-                               skip_sparse = 1;
-                       }
-
-                       if (skip_sparse)
-                               goto remove_entry;
+                       goto act_on_entry;
+               }
 
-                       string_list_insert(&src_for_dst, dst);
+               /*
+                * We check if the paths are in the sparse-checkout
+                * definition as a very final check, since that
+                * allows us to point the user to the --sparse
+                * option as a way to have a successful run.
+                */
+               if (!ignore_sparse &&
+                   !path_in_sparse_checkout(src, &the_index)) {
+                       string_list_append(&only_match_skip_worktree, src);
+                       skip_sparse = 1;
                }
+               if (!ignore_sparse &&
+                   !path_in_sparse_checkout(dst, &the_index)) {
+                       string_list_append(&only_match_skip_worktree, dst);
+                       skip_sparse = 1;
+               }
+
+               if (skip_sparse)
+                       goto remove_entry;
+
+               string_list_insert(&src_for_dst, dst);
 
+act_on_entry:
                if (!bad)
                        continue;
                if (!ignore_errors)
@@ -282,14 +377,11 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
 remove_entry:
                if (--argc > 0) {
                        int n = argc - i;
-                       memmove(source + i, source + i + 1,
-                               n * sizeof(char *));
-                       memmove(destination + i, destination + i + 1,
-                               n * sizeof(char *));
-                       memmove(modes + i, modes + i + 1,
-                               n * sizeof(enum update_mode));
-                       memmove(submodule_gitfile + i, submodule_gitfile + i + 1,
-                               n * sizeof(char *));
+                       MOVE_ARRAY(source + i, source + i + 1, n);
+                       MOVE_ARRAY(destination + i, destination + i + 1, n);
+                       MOVE_ARRAY(modes + i, modes + i + 1, n);
+                       MOVE_ARRAY(submodule_gitfile + i,
+                                  submodule_gitfile + i + 1, n);
                        i--;
                }
        }
@@ -304,11 +396,17 @@ remove_entry:
                const char *src = source[i], *dst = destination[i];
                enum update_mode mode = modes[i];
                int pos;
+               struct checkout state = CHECKOUT_INIT;
+               state.istate = &the_index;
+
+               if (force)
+                       state.force = 1;
                if (show_only || verbose)
                        printf(_("Renaming %s to %s\n"), src, dst);
                if (show_only)
                        continue;
-               if (mode != INDEX && mode != SPARSE && rename(src, dst) < 0) {
+               if (!(mode & (INDEX | SPARSE | SKIP_WORKTREE_DIR)) &&
+                   rename(src, dst) < 0) {
                        if (ignore_errors)
                                continue;
                        die_errno(_("renaming '%s' failed"), src);
@@ -322,12 +420,23 @@ remove_entry:
                                                              1);
                }
 
-               if (mode == WORKING_DIRECTORY)
+               if (mode & (WORKING_DIRECTORY | SKIP_WORKTREE_DIR))
                        continue;
 
                pos = cache_name_pos(src, strlen(src));
                assert(pos >= 0);
                rename_cache_entry_at(pos, dst);
+
+               if ((mode & SPARSE) &&
+                   (path_in_sparse_checkout(dst, &the_index))) {
+                       int dst_pos;
+
+                       dst_pos = cache_name_pos(dst, strlen(dst));
+                       active_cache[dst_pos]->ce_flags &= ~CE_SKIP_WORKTREE;
+
+                       if (checkout_entry(active_cache[dst_pos], &state, NULL, NULL))
+                               die(_("cannot checkout %s"), active_cache[dst_pos]->name);
+               }
        }
 
        if (gitmodules_modified)
index 580b1eb170ef5793e8f3f6a231291c87aec47fe1..15535e914a6939d661296798e0b325da9f1ac7b6 100644 (file)
@@ -344,7 +344,8 @@ static int cmp_by_tag_and_age(const void *a_, const void *b_)
        return a->taggerdate != b->taggerdate;
 }
 
-static int name_ref(const char *path, const struct object_id *oid, int flags, void *cb_data)
+static int name_ref(const char *path, const struct object_id *oid,
+                   int flags UNUSED, void *cb_data)
 {
        struct object *o = parse_object(the_repository, oid);
        struct name_ref_data *data = cb_data;
index a3d0d15a227f2142f2d9d5fddbe0017d0e7a5c7f..be51f692257f67e2765feb6ff63e8037f3a4f00d 100644 (file)
@@ -994,17 +994,34 @@ static int get_ref(int argc, const char **argv, const char *prefix)
 
 int cmd_notes(int argc, const char **argv, const char *prefix)
 {
-       int result;
        const char *override_notes_ref = NULL;
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
                OPT_STRING(0, "ref", &override_notes_ref, N_("notes-ref"),
                           N_("use notes from <notes-ref>")),
+               OPT_SUBCOMMAND("list", &fn, list),
+               OPT_SUBCOMMAND("add", &fn, add),
+               OPT_SUBCOMMAND("copy", &fn, copy),
+               OPT_SUBCOMMAND("append", &fn, append_edit),
+               OPT_SUBCOMMAND("edit", &fn, append_edit),
+               OPT_SUBCOMMAND("show", &fn, show),
+               OPT_SUBCOMMAND("merge", &fn, merge),
+               OPT_SUBCOMMAND("remove", &fn, remove_cmd),
+               OPT_SUBCOMMAND("prune", &fn, prune),
+               OPT_SUBCOMMAND("get-ref", &fn, get_ref),
                OPT_END()
        };
 
        git_config(git_default_config, NULL);
        argc = parse_options(argc, argv, prefix, options, git_notes_usage,
-                            PARSE_OPT_STOP_AT_NON_OPTION);
+                            PARSE_OPT_SUBCOMMAND_OPTIONAL);
+       if (!fn) {
+               if (argc) {
+                       error(_("unknown subcommand: `%s'"), argv[0]);
+                       usage_with_options(git_notes_usage, options);
+               }
+               fn = list;
+       }
 
        if (override_notes_ref) {
                struct strbuf sb = STRBUF_INIT;
@@ -1014,28 +1031,5 @@ int cmd_notes(int argc, const char **argv, const char *prefix)
                strbuf_release(&sb);
        }
 
-       if (argc < 1 || !strcmp(argv[0], "list"))
-               result = list(argc, argv, prefix);
-       else if (!strcmp(argv[0], "add"))
-               result = add(argc, argv, prefix);
-       else if (!strcmp(argv[0], "copy"))
-               result = copy(argc, argv, prefix);
-       else if (!strcmp(argv[0], "append") || !strcmp(argv[0], "edit"))
-               result = append_edit(argc, argv, prefix);
-       else if (!strcmp(argv[0], "show"))
-               result = show(argc, argv, prefix);
-       else if (!strcmp(argv[0], "merge"))
-               result = merge(argc, argv, prefix);
-       else if (!strcmp(argv[0], "remove"))
-               result = remove_cmd(argc, argv, prefix);
-       else if (!strcmp(argv[0], "prune"))
-               result = prune(argc, argv, prefix);
-       else if (!strcmp(argv[0], "get-ref"))
-               result = get_ref(argc, argv, prefix);
-       else {
-               result = error(_("unknown subcommand: %s"), argv[0]);
-               usage_with_options(git_notes_usage, options);
-       }
-
-       return result ? 1 : 0;
+       return !!fn(argc, argv, prefix);
 }
index 39e28cfcafc3d361dbea42d4c4f7f4c1ca09728b..3658c05cafce7a1735ece0510a9aaacadb46fae9 100644 (file)
@@ -759,8 +759,8 @@ static enum write_one_status write_one(struct hashfile *f,
        return WRITE_ONE_WRITTEN;
 }
 
-static int mark_tagged(const char *path, const struct object_id *oid, int flag,
-                      void *cb_data)
+static int mark_tagged(const char *path UNUSED, const struct object_id *oid,
+                      int flag UNUSED, void *cb_data UNUSED)
 {
        struct object_id peeled;
        struct object_entry *entry = packlist_find(&to_pack, oid);
@@ -3035,7 +3035,8 @@ static void add_tag_chain(const struct object_id *oid)
        }
 }
 
-static int add_ref_tag(const char *tag, const struct object_id *oid, int flag, void *cb_data)
+static int add_ref_tag(const char *tag UNUSED, const struct object_id *oid,
+                      int flag UNUSED, void *cb_data UNUSED)
 {
        struct object_id peeled;
 
@@ -3148,6 +3149,14 @@ static int git_pack_config(const char *k, const char *v, void *cb)
                else
                        write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
        }
+
+       if (!strcmp(k, "pack.writebitmaplookuptable")) {
+               if (git_config_bool(k, v))
+                       write_bitmap_options |= BITMAP_OPT_LOOKUP_TABLE;
+               else
+                       write_bitmap_options &= ~BITMAP_OPT_LOOKUP_TABLE;
+       }
+
        if (!strcmp(k, "pack.usebitmaps")) {
                use_bitmap_index_default = git_config_bool(k, v);
                return 0;
@@ -3950,8 +3959,9 @@ static void record_recent_commit(struct commit *commit, void *data)
 }
 
 static int mark_bitmap_preferred_tip(const char *refname,
-                                    const struct object_id *oid, int flags,
-                                    void *_data)
+                                    const struct object_id *oid,
+                                    int flags UNUSED,
+                                    void *data UNUSED)
 {
        struct object_id peeled;
        struct object *object;
index 01155ba67b20d6593e09b239d8a5bbb3eb195535..403a24d7ca670f9a5c56ebfb6d8429b5e5817b54 100644 (file)
@@ -990,6 +990,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
        int rebase_unspecified = 0;
        int can_ff;
        int divergent;
+       int ret;
 
        if (!getenv("GIT_REFLOG_ACTION"))
                set_reflog_message(argc, argv);
@@ -1100,7 +1101,8 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
        if (is_null_oid(&orig_head)) {
                if (merge_heads.nr > 1)
                        die(_("Cannot merge multiple branches into empty head."));
-               return pull_into_void(merge_heads.oid, &curr_head);
+               ret = pull_into_void(merge_heads.oid, &curr_head);
+               goto cleanup;
        }
        if (merge_heads.nr > 1) {
                if (opt_rebase)
@@ -1125,8 +1127,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
        }
 
        if (opt_rebase) {
-               int ret = 0;
-
                struct object_id newbase;
                struct object_id upstream;
                get_rebase_newbase_and_upstream(&newbase, &upstream, &curr_head,
@@ -1149,12 +1149,16 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
                             recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
                        ret = rebase_submodules();
 
-               return ret;
+               goto cleanup;
        } else {
-               int ret = run_merge();
+               ret = run_merge();
                if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON ||
                             recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
                        ret = update_submodules();
-               return ret;
+               goto cleanup;
        }
+
+cleanup:
+       oid_array_clear(&merge_heads);
+       return ret;
 }
index 50318849d657ea298ae8984decdc2ae870a62741..e2a74efb42a795a5139932bd052999d050cab420 100644 (file)
@@ -38,8 +38,10 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
                OPT_END()
        };
        struct option *options;
-       int res = 0;
+       int i, dash_dash = -1, res = 0;
        struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
+       struct object_id oid;
+       const char *three_dots = NULL;
 
        git_config(git_diff_ui_config, NULL);
 
@@ -47,7 +49,7 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
 
        options = parse_options_concat(range_diff_options, diffopt.parseopts);
        argc = parse_options(argc, argv, prefix, options,
-                            builtin_range_diff_usage, 0);
+                            builtin_range_diff_usage, PARSE_OPT_KEEP_DASHDASH);
 
        diff_setup_done(&diffopt);
 
@@ -55,40 +57,91 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
        if (!simple_color)
                diffopt.use_color = 1;
 
-       if (argc == 2) {
-               if (!is_range_diff_range(argv[0]))
-                       die(_("not a commit range: '%s'"), argv[0]);
-               strbuf_addstr(&range1, argv[0]);
+       for (i = 0; i < argc; i++)
+               if (!strcmp(argv[i], "--")) {
+                       dash_dash = i;
+                       break;
+               }
+
+       if (dash_dash == 3 ||
+           (dash_dash < 0 && argc > 2 &&
+            !get_oid_committish(argv[0], &oid) &&
+            !get_oid_committish(argv[1], &oid) &&
+            !get_oid_committish(argv[2], &oid))) {
+               if (dash_dash < 0)
+                       ; /* already validated arguments */
+               else if (get_oid_committish(argv[0], &oid))
+                       usage_msg_optf(_("not a revision: '%s'"),
+                                      builtin_range_diff_usage, options,
+                                      argv[0]);
+               else if (get_oid_committish(argv[1], &oid))
+                       usage_msg_optf(_("not a revision: '%s'"),
+                                      builtin_range_diff_usage, options,
+                                      argv[1]);
+               else if (get_oid_committish(argv[2], &oid))
+                       usage_msg_optf(_("not a revision: '%s'"),
+                                      builtin_range_diff_usage, options,
+                                      argv[2]);
 
-               if (!is_range_diff_range(argv[1]))
-                       die(_("not a commit range: '%s'"), argv[1]);
-               strbuf_addstr(&range2, argv[1]);
-       } else if (argc == 3) {
                strbuf_addf(&range1, "%s..%s", argv[0], argv[1]);
                strbuf_addf(&range2, "%s..%s", argv[0], argv[2]);
-       } else if (argc == 1) {
-               const char *b = strstr(argv[0], "..."), *a = argv[0];
+
+               strvec_pushv(&other_arg, argv +
+                            (dash_dash < 0 ? 3 : dash_dash));
+       } else if (dash_dash == 2 ||
+                  (dash_dash < 0 && argc > 1 &&
+                   is_range_diff_range(argv[0]) &&
+                   is_range_diff_range(argv[1]))) {
+               if (dash_dash < 0)
+                       ; /* already validated arguments */
+               else if (!is_range_diff_range(argv[0]))
+                       usage_msg_optf(_("not a commit range: '%s'"),
+                                      builtin_range_diff_usage, options,
+                                      argv[0]);
+               else if (!is_range_diff_range(argv[1]))
+                       usage_msg_optf(_("not a commit range: '%s'"),
+                                      builtin_range_diff_usage, options,
+                                      argv[1]);
+
+               strbuf_addstr(&range1, argv[0]);
+               strbuf_addstr(&range2, argv[1]);
+
+               strvec_pushv(&other_arg, argv +
+                            (dash_dash < 0 ? 2 : dash_dash));
+       } else if (dash_dash == 1 ||
+                  (dash_dash < 0 && argc > 0 &&
+                   (three_dots = strstr(argv[0], "...")))) {
+               const char *a, *b;
                int a_len;
 
-               if (!b) {
-                       error(_("single arg format must be symmetric range"));
-                       usage_with_options(builtin_range_diff_usage, options);
-               }
+               if (dash_dash < 0)
+                       ; /* already validated arguments */
+               else if (!(three_dots = strstr(argv[0], "...")))
+                       usage_msg_optf(_("not a symmetric range: '%s'"),
+                                        builtin_range_diff_usage, options,
+                                        argv[0]);
 
-               a_len = (int)(b - a);
-               if (!a_len) {
+               if (three_dots == argv[0]) {
                        a = "HEAD";
                        a_len = strlen(a);
+               } else {
+                       a = argv[0];
+                       a_len = (int)(three_dots - a);
                }
-               b += 3;
-               if (!*b)
+
+               if (three_dots[3])
+                       b = three_dots + 3;
+               else
                        b = "HEAD";
+
                strbuf_addf(&range1, "%s..%.*s", b, a_len, a);
                strbuf_addf(&range2, "%.*s..%s", a_len, a, b);
-       } else {
-               error(_("need two commit ranges"));
-               usage_with_options(builtin_range_diff_usage, options);
-       }
+
+               strvec_pushv(&other_arg, argv +
+                            (dash_dash < 0 ? 1 : dash_dash));
+       } else
+               usage_msg_opt(_("need two commit ranges"),
+                             builtin_range_diff_usage, options);
        FREE_AND_NULL(options);
 
        range_diff_opts.dual_color = simple_color < 1;
index 70aa7c842f34b2a8cfbdf2c33d3b699bbda0404c..56e4214b44104a445f8b68d185c0aadd3442247d 100644 (file)
@@ -102,6 +102,7 @@ struct rebase_options {
        int reschedule_failed_exec;
        int reapply_cherry_picks;
        int fork_point;
+       int update_refs;
 };
 
 #define REBASE_OPTIONS_INIT {                          \
@@ -298,6 +299,7 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
                ret = complete_action(the_repository, &replay, flags,
                        shortrevisions, opts->onto_name, opts->onto,
                        &opts->orig_head, &commands, opts->autosquash,
+                       opts->update_refs,
                        &todo_list);
        }
 
@@ -800,6 +802,11 @@ static int rebase_config(const char *var, const char *value, void *data)
                return 0;
        }
 
+       if (!strcmp(var, "rebase.updaterefs")) {
+               opts->update_refs = git_config_bool(var, value);
+               return 0;
+       }
+
        if (!strcmp(var, "rebase.reschedulefailedexec")) {
                opts->reschedule_failed_exec = git_config_bool(var, value);
                return 0;
@@ -1124,6 +1131,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
                OPT_BOOL(0, "autosquash", &options.autosquash,
                         N_("move commits that begin with "
                            "squash!/fixup! under -i")),
+               OPT_BOOL(0, "update-refs", &options.update_refs,
+                        N_("update branches that point to commits "
+                           "that are being rebased")),
                { OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"),
                        N_("GPG-sign commits"),
                        PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
index 31b48e728bedef2fae4c9024726a1222dcc81d7d..44bcea3a5b3add614c72531d112b7b4a9777751a 100644 (file)
@@ -291,7 +291,7 @@ static void show_ref(const char *path, const struct object_id *oid)
 }
 
 static int show_ref_cb(const char *path_full, const struct object_id *oid,
-                      int flag, void *data)
+                      int flag UNUSED, void *data)
 {
        struct oidset *seen = data;
        const char *path = strip_namespace(path_full);
@@ -465,7 +465,7 @@ static void rp_error(const char *err, ...)
        va_end(params);
 }
 
-static int copy_to_sideband(int in, int out, void *arg)
+static int copy_to_sideband(int in, int out UNUSED, void *arg UNUSED)
 {
        char data[128];
        int keepalive_active = 0;
index 4dd297dce86e5212087ee1202d5b9dd1f754f9d1..57c5c0d061c449a0cf782c2cc41688cc042e3c47 100644 (file)
@@ -56,7 +56,8 @@ struct worktree_reflogs {
        struct string_list reflogs;
 };
 
-static int collect_reflog(const char *ref, const struct object_id *oid, int unused, void *cb_data)
+static int collect_reflog(const char *ref, const struct object_id *oid UNUSED,
+                         int flags UNUSED, void *cb_data)
 {
        struct worktree_reflogs *cb = cb_data;
        struct worktree *worktree = cb->worktree;
@@ -193,6 +194,8 @@ static int expire_unreachable_callback(const struct option *opt,
 {
        struct cmd_reflog_expire_cb *cmd = opt->value;
 
+       BUG_ON_OPT_NEG(unset);
+
        if (parse_expiry_date(arg, &cmd->expire_unreachable))
                die(_("invalid timestamp '%s' given to '--%s'"),
                    arg, opt->long_name);
@@ -207,6 +210,8 @@ static int expire_total_callback(const struct option *opt,
 {
        struct cmd_reflog_expire_cb *cmd = opt->value;
 
+       BUG_ON_OPT_NEG(unset);
+
        if (parse_expiry_date(arg, &cmd->expire_total))
                die(_("invalid timestamp '%s' given to '--%s'"),
                    arg, opt->long_name);
@@ -223,7 +228,7 @@ static int cmd_reflog_show(int argc, const char **argv, const char *prefix)
 
        parse_options(argc, argv, prefix, options, reflog_show_usage,
                      PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
-                     PARSE_OPT_KEEP_UNKNOWN);
+                     PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        return cmd_log_reflog(argc, argv, prefix);
 }
@@ -404,40 +409,21 @@ static int cmd_reflog_exists(int argc, const char **argv, const char *prefix)
 
 int cmd_reflog(int argc, const char **argv, const char *prefix)
 {
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
+               OPT_SUBCOMMAND("show", &fn, cmd_reflog_show),
+               OPT_SUBCOMMAND("expire", &fn, cmd_reflog_expire),
+               OPT_SUBCOMMAND("delete", &fn, cmd_reflog_delete),
+               OPT_SUBCOMMAND("exists", &fn, cmd_reflog_exists),
                OPT_END()
        };
 
        argc = parse_options(argc, argv, prefix, options, reflog_usage,
+                            PARSE_OPT_SUBCOMMAND_OPTIONAL |
                             PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
-                            PARSE_OPT_KEEP_UNKNOWN |
-                            PARSE_OPT_NO_INTERNAL_HELP);
-
-       /*
-        * With "git reflog" we default to showing it. !argc is
-        * impossible with PARSE_OPT_KEEP_ARGV0.
-        */
-       if (argc == 1)
-               goto log_reflog;
-
-       if (!strcmp(argv[1], "-h"))
-               usage_with_options(reflog_usage, options);
-       else if (*argv[1] == '-')
-               goto log_reflog;
-
-       if (!strcmp(argv[1], "show"))
-               return cmd_reflog_show(argc - 1, argv + 1, prefix);
-       else if (!strcmp(argv[1], "expire"))
-               return cmd_reflog_expire(argc - 1, argv + 1, prefix);
-       else if (!strcmp(argv[1], "delete"))
-               return cmd_reflog_delete(argc - 1, argv + 1, prefix);
-       else if (!strcmp(argv[1], "exists"))
-               return cmd_reflog_exists(argc - 1, argv + 1, prefix);
-
-       /*
-        * Fall-through for e.g. "git reflog -1", "git reflog master",
-        * as well as the plain "git reflog" above goto above.
-        */
-log_reflog:
-       return cmd_log_reflog(argc, argv, prefix);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
+       if (fn)
+               return fn(argc - 1, argv + 1, prefix);
+       else
+               return cmd_log_reflog(argc, argv, prefix);
 }
index a3a0c27d7a09da9f5df1f258d3c21fe426690ea0..985b845a18bae82bfa90d63da78fca8445563d51 100644 (file)
@@ -150,7 +150,7 @@ static int parse_mirror_opt(const struct option *opt, const char *arg, int not)
        return 0;
 }
 
-static int add(int argc, const char **argv)
+static int add(int argc, const char **argv, const char *prefix)
 {
        int fetch = 0, fetch_tags = TAGS_DEFAULT;
        unsigned mirror = MIRROR_NONE;
@@ -177,8 +177,8 @@ static int add(int argc, const char **argv)
                OPT_END()
        };
 
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_add_usage,
-                            0);
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_add_usage, 0);
 
        if (argc != 2)
                usage_with_options(builtin_remote_add_usage, options);
@@ -264,7 +264,8 @@ static const char *abbrev_ref(const char *name, const char *prefix)
 }
 #define abbrev_branch(name) abbrev_ref((name), "refs/heads/")
 
-static int config_read_branches(const char *key, const char *value, void *cb)
+static int config_read_branches(const char *key, const char *value,
+                               void *data UNUSED)
 {
        const char *orig_key = key;
        char *name;
@@ -344,12 +345,13 @@ static void read_branches(void)
 
 struct ref_states {
        struct remote *remote;
-       struct string_list new_refs, stale, tracked, heads, push;
+       struct string_list new_refs, skipped, stale, tracked, heads, push;
        int queried;
 };
 
 #define REF_STATES_INIT { \
        .new_refs = STRING_LIST_INIT_DUP, \
+       .skipped = STRING_LIST_INIT_DUP, \
        .stale = STRING_LIST_INIT_DUP, \
        .tracked = STRING_LIST_INIT_DUP, \
        .heads = STRING_LIST_INIT_DUP, \
@@ -368,7 +370,9 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
                                states->remote->fetch.raw[i]);
 
        for (ref = fetch_map; ref; ref = ref->next) {
-               if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
+               if (omit_name_by_refspec(ref->name, &states->remote->fetch))
+                       string_list_append(&states->skipped, abbrev_branch(ref->name));
+               else if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
                        string_list_append(&states->new_refs, abbrev_branch(ref->name));
                else
                        string_list_append(&states->tracked, abbrev_branch(ref->name));
@@ -383,6 +387,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
        free_refs(fetch_map);
 
        string_list_sort(&states->new_refs);
+       string_list_sort(&states->skipped);
        string_list_sort(&states->tracked);
        string_list_sort(&states->stale);
 
@@ -534,7 +539,8 @@ struct branches_for_remote {
 };
 
 static int add_branch_for_removal(const char *refname,
-       const struct object_id *oid, int flags, void *cb_data)
+                                 const struct object_id *oid UNUSED,
+                                 int flags UNUSED, void *cb_data)
 {
        struct branches_for_remote *branches = cb_data;
        struct refspec_item refspec;
@@ -576,7 +582,8 @@ struct rename_info {
 };
 
 static int read_remote_branches(const char *refname,
-       const struct object_id *oid, int flags, void *cb_data)
+                               const struct object_id *oid UNUSED,
+                               int flags UNUSED, void *cb_data)
 {
        struct rename_info *rename = cb_data;
        struct strbuf buf = STRBUF_INIT;
@@ -676,7 +683,7 @@ static void handle_push_default(const char* old_name, const char* new_name)
 }
 
 
-static int mv(int argc, const char **argv)
+static int mv(int argc, const char **argv, const char *prefix)
 {
        int show_progress = isatty(2);
        struct option options[] = {
@@ -691,7 +698,7 @@ static int mv(int argc, const char **argv)
        int i, refs_renamed_nr = 0, refspec_updated = 0;
        struct progress *progress = NULL;
 
-       argc = parse_options(argc, argv, NULL, options,
+       argc = parse_options(argc, argv, prefix, options,
                             builtin_remote_rename_usage, 0);
 
        if (argc != 2)
@@ -840,7 +847,7 @@ static int mv(int argc, const char **argv)
        return 0;
 }
 
-static int rm(int argc, const char **argv)
+static int rm(int argc, const char **argv, const char *prefix)
 {
        struct option options[] = {
                OPT_END()
@@ -858,12 +865,14 @@ static int rm(int argc, const char **argv)
        cb_data.skipped = &skipped;
        cb_data.keep = &known_remotes;
 
-       if (argc != 2)
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_rm_usage, 0);
+       if (argc != 1)
                usage_with_options(builtin_remote_rm_usage, options);
 
-       remote = remote_get(argv[1]);
+       remote = remote_get(argv[0]);
        if (!remote_is_configured(remote, 1)) {
-               error(_("No such remote: '%s'"), argv[1]);
+               error(_("No such remote: '%s'"), argv[0]);
                exit(2);
        }
 
@@ -941,6 +950,7 @@ static void clear_push_info(void *util, const char *string)
 static void free_remote_ref_states(struct ref_states *states)
 {
        string_list_clear(&states->new_refs, 0);
+       string_list_clear(&states->skipped, 0);
        string_list_clear(&states->stale, 1);
        string_list_clear(&states->tracked, 0);
        string_list_clear(&states->heads, 0);
@@ -948,7 +958,8 @@ static void free_remote_ref_states(struct ref_states *states)
 }
 
 static int append_ref_to_tracked_list(const char *refname,
-       const struct object_id *oid, int flags, void *cb_data)
+                                     const struct object_id *oid UNUSED,
+                                     int flags, void *cb_data)
 {
        struct ref_states *states = cb_data;
        struct refspec_item refspec;
@@ -1035,6 +1046,8 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data)
                        arg = states->remote->name;
                } else if (string_list_has_string(&states->tracked, name))
                        arg = _(" tracked");
+               else if (string_list_has_string(&states->skipped, name))
+                       arg = _(" skipped");
                else if (string_list_has_string(&states->stale, name))
                        arg = _(" stale (use 'git remote prune' to remove)");
                else
@@ -1247,7 +1260,7 @@ static int show_all(void)
        return result;
 }
 
-static int show(int argc, const char **argv)
+static int show(int argc, const char **argv, const char *prefix)
 {
        int no_query = 0, result = 0, query_flag = 0;
        struct option options[] = {
@@ -1256,7 +1269,8 @@ static int show(int argc, const char **argv)
        };
        struct show_info info = SHOW_INFO_INIT;
 
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_show_usage,
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_show_usage,
                             0);
 
        if (argc < 1)
@@ -1307,6 +1321,7 @@ static int show(int argc, const char **argv)
                /* remote branch info */
                info.width = 0;
                for_each_string_list(&info.states.new_refs, add_remote_to_show_info, &info);
+               for_each_string_list(&info.states.skipped, add_remote_to_show_info, &info);
                for_each_string_list(&info.states.tracked, add_remote_to_show_info, &info);
                for_each_string_list(&info.states.stale, add_remote_to_show_info, &info);
                if (info.list.nr)
@@ -1349,7 +1364,7 @@ static int show(int argc, const char **argv)
        return result;
 }
 
-static int set_head(int argc, const char **argv)
+static int set_head(int argc, const char **argv, const char *prefix)
 {
        int i, opt_a = 0, opt_d = 0, result = 0;
        struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT;
@@ -1362,8 +1377,8 @@ static int set_head(int argc, const char **argv)
                         N_("delete refs/remotes/<name>/HEAD")),
                OPT_END()
        };
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_sethead_usage,
-                            0);
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_sethead_usage, 0);
        if (argc)
                strbuf_addf(&buf, "refs/remotes/%s/HEAD", argv[0]);
 
@@ -1454,7 +1469,7 @@ static int prune_remote(const char *remote, int dry_run)
        return result;
 }
 
-static int prune(int argc, const char **argv)
+static int prune(int argc, const char **argv, const char *prefix)
 {
        int dry_run = 0, result = 0;
        struct option options[] = {
@@ -1462,8 +1477,8 @@ static int prune(int argc, const char **argv)
                OPT_END()
        };
 
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_prune_usage,
-                            0);
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_prune_usage, 0);
 
        if (argc < 1)
                usage_with_options(builtin_remote_prune_usage, options);
@@ -1474,7 +1489,7 @@ static int prune(int argc, const char **argv)
        return result;
 }
 
-static int get_remote_default(const char *key, const char *value, void *priv)
+static int get_remote_default(const char *key, const char *value UNUSED, void *priv)
 {
        if (strcmp(key, "remotes.default") == 0) {
                int *found = priv;
@@ -1483,7 +1498,7 @@ static int get_remote_default(const char *key, const char *value, void *priv)
        return 0;
 }
 
-static int update(int argc, const char **argv)
+static int update(int argc, const char **argv, const char *prefix)
 {
        int i, prune = -1;
        struct option options[] = {
@@ -1495,7 +1510,8 @@ static int update(int argc, const char **argv)
        int default_defined = 0;
        int retval;
 
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_update_usage,
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_update_usage,
                             PARSE_OPT_KEEP_ARGV0);
 
        strvec_push(&fetch_argv, "fetch");
@@ -1566,7 +1582,7 @@ static int set_remote_branches(const char *remotename, const char **branches,
        return 0;
 }
 
-static int set_branches(int argc, const char **argv)
+static int set_branches(int argc, const char **argv, const char *prefix)
 {
        int add_mode = 0;
        struct option options[] = {
@@ -1574,7 +1590,7 @@ static int set_branches(int argc, const char **argv)
                OPT_END()
        };
 
-       argc = parse_options(argc, argv, NULL, options,
+       argc = parse_options(argc, argv, prefix, options,
                             builtin_remote_setbranches_usage, 0);
        if (argc == 0) {
                error(_("no remote specified"));
@@ -1585,7 +1601,7 @@ static int set_branches(int argc, const char **argv)
        return set_remote_branches(argv[0], argv + 1, add_mode);
 }
 
-static int get_url(int argc, const char **argv)
+static int get_url(int argc, const char **argv, const char *prefix)
 {
        int i, push_mode = 0, all_mode = 0;
        const char *remotename = NULL;
@@ -1599,7 +1615,8 @@ static int get_url(int argc, const char **argv)
                         N_("return all URLs")),
                OPT_END()
        };
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_geturl_usage, 0);
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_geturl_usage, 0);
 
        if (argc != 1)
                usage_with_options(builtin_remote_geturl_usage, options);
@@ -1638,7 +1655,7 @@ static int get_url(int argc, const char **argv)
        return 0;
 }
 
-static int set_url(int argc, const char **argv)
+static int set_url(int argc, const char **argv, const char *prefix)
 {
        int i, push_mode = 0, add_mode = 0, delete_mode = 0;
        int matches = 0, negative_matches = 0;
@@ -1659,7 +1676,8 @@ static int set_url(int argc, const char **argv)
                            N_("delete URLs")),
                OPT_END()
        };
-       argc = parse_options(argc, argv, NULL, options, builtin_remote_seturl_usage,
+       argc = parse_options(argc, argv, prefix, options,
+                            builtin_remote_seturl_usage,
                             PARSE_OPT_KEEP_ARGV0);
 
        if (add_mode && delete_mode)
@@ -1730,41 +1748,33 @@ out:
 
 int cmd_remote(int argc, const char **argv, const char *prefix)
 {
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
                OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")),
+               OPT_SUBCOMMAND("add", &fn, add),
+               OPT_SUBCOMMAND("rename", &fn, mv),
+               OPT_SUBCOMMAND_F("rm", &fn, rm, PARSE_OPT_NOCOMPLETE),
+               OPT_SUBCOMMAND("remove", &fn, rm),
+               OPT_SUBCOMMAND("set-head", &fn, set_head),
+               OPT_SUBCOMMAND("set-branches", &fn, set_branches),
+               OPT_SUBCOMMAND("get-url", &fn, get_url),
+               OPT_SUBCOMMAND("set-url", &fn, set_url),
+               OPT_SUBCOMMAND("show", &fn, show),
+               OPT_SUBCOMMAND("prune", &fn, prune),
+               OPT_SUBCOMMAND("update", &fn, update),
                OPT_END()
        };
-       int result;
 
        argc = parse_options(argc, argv, prefix, options, builtin_remote_usage,
-               PARSE_OPT_STOP_AT_NON_OPTION);
+                            PARSE_OPT_SUBCOMMAND_OPTIONAL);
 
-       if (argc < 1)
-               result = show_all();
-       else if (!strcmp(argv[0], "add"))
-               result = add(argc, argv);
-       else if (!strcmp(argv[0], "rename"))
-               result = mv(argc, argv);
-       else if (!strcmp(argv[0], "rm") || !strcmp(argv[0], "remove"))
-               result = rm(argc, argv);
-       else if (!strcmp(argv[0], "set-head"))
-               result = set_head(argc, argv);
-       else if (!strcmp(argv[0], "set-branches"))
-               result = set_branches(argc, argv);
-       else if (!strcmp(argv[0], "get-url"))
-               result = get_url(argc, argv);
-       else if (!strcmp(argv[0], "set-url"))
-               result = set_url(argc, argv);
-       else if (!strcmp(argv[0], "show"))
-               result = show(argc, argv);
-       else if (!strcmp(argv[0], "prune"))
-               result = prune(argc, argv);
-       else if (!strcmp(argv[0], "update"))
-               result = update(argc, argv);
-       else {
-               error(_("Unknown subcommand: %s"), argv[0]);
-               usage_with_options(builtin_remote_usage, options);
+       if (fn) {
+               return !!fn(argc, argv, prefix);
+       } else {
+               if (argc) {
+                       error(_("unknown subcommand: `%s'"), argv[0]);
+                       usage_with_options(builtin_remote_usage, options);
+               }
+               return !!show_all();
        }
-
-       return result ? 1 : 0;
 }
index 4a7ae4cf489a4c34aaabeca064535f07c2df55f7..a5bacc7797435696cd6e23e73f847fc00d39bb7f 100644 (file)
@@ -514,9 +514,9 @@ struct midx_snapshot_ref_data {
        int preferred;
 };
 
-static int midx_snapshot_ref_one(const char *refname,
+static int midx_snapshot_ref_one(const char *refname UNUSED,
                                 const struct object_id *oid,
-                                int flag, void *_data)
+                                int flag UNUSED, void *_data)
 {
        struct midx_snapshot_ref_data *data = _data;
        struct object_id peeled;
@@ -727,7 +727,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        struct child_process cmd = CHILD_PROCESS_INIT;
        struct string_list_item *item;
        struct string_list names = STRING_LIST_INIT_DUP;
-       struct string_list rollback = STRING_LIST_INIT_NODUP;
        struct string_list existing_nonkept_packs = STRING_LIST_INIT_DUP;
        struct string_list existing_kept_packs = STRING_LIST_INIT_DUP;
        struct pack_geometry *geometry = NULL;
@@ -1117,7 +1116,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        }
 
        string_list_clear(&names, 0);
-       string_list_clear(&rollback, 0);
        string_list_clear(&existing_nonkept_packs, 0);
        string_list_clear(&existing_kept_packs, 0);
        clear_pack_geometry(geometry);
index 583702a0980e8ced3f91e1b88a9a1698be16fc6e..a29e911d3099be9d327ecc030d68c56fe046a04a 100644 (file)
@@ -106,6 +106,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn)
        size_t base_len;
        int had_error = 0;
        struct object_id oid;
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
 
        strbuf_addstr(&ref, git_replace_ref_base);
        base_len = ref.len;
@@ -147,6 +148,8 @@ static int check_ref_valid(struct object_id *object,
                            struct strbuf *ref,
                            int force)
 {
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
+
        strbuf_reset(ref);
        strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
        if (check_refname_format(ref->buf, 0))
index 344fff8f3a9322a7895b607a8236c5461625407d..fdce6f8c85670c8b2b0e20304336debba3190408 100644 (file)
@@ -174,88 +174,6 @@ static void update_index_from_diff(struct diff_queue_struct *q,
        }
 }
 
-static int pathspec_needs_expanded_index(const struct pathspec *pathspec)
-{
-       unsigned int i, pos;
-       int res = 0;
-       char *skip_worktree_seen = NULL;
-
-       /*
-        * When using a magic pathspec, assume for the sake of simplicity that
-        * the index needs to be expanded to match all matchable files.
-        */
-       if (pathspec->magic)
-               return 1;
-
-       for (i = 0; i < pathspec->nr; i++) {
-               struct pathspec_item item = pathspec->items[i];
-
-               /*
-                * If the pathspec item has a wildcard, the index should be expanded
-                * if the pathspec has the possibility of matching a subset of entries inside
-                * of a sparse directory (but not the entire directory).
-                *
-                * If the pathspec item is a literal path, the index only needs to be expanded
-                * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't
-                * expand for in-cone files) and b) it doesn't match any sparse directories
-                * (since we can reset whole sparse directories without expanding them).
-                */
-               if (item.nowildcard_len < item.len) {
-                       /*
-                        * Special case: if the pattern is a path inside the cone
-                        * followed by only wildcards, the pattern cannot match
-                        * partial sparse directories, so we know we don't need to
-                        * expand the index.
-                        *
-                        * Examples:
-                        * - in-cone/foo***: doesn't need expanded index
-                        * - not-in-cone/bar*: may need expanded index
-                        * - **.c: may need expanded index
-                        */
-                       if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len &&
-                           path_in_cone_mode_sparse_checkout(item.original, &the_index))
-                               continue;
-
-                       for (pos = 0; pos < active_nr; pos++) {
-                               struct cache_entry *ce = active_cache[pos];
-
-                               if (!S_ISSPARSEDIR(ce->ce_mode))
-                                       continue;
-
-                               /*
-                                * If the pre-wildcard length is longer than the sparse
-                                * directory name and the sparse directory is the first
-                                * component of the pathspec, need to expand the index.
-                                */
-                               if (item.nowildcard_len > ce_namelen(ce) &&
-                                   !strncmp(item.original, ce->name, ce_namelen(ce))) {
-                                       res = 1;
-                                       break;
-                               }
-
-                               /*
-                                * If the pre-wildcard length is shorter than the sparse
-                                * directory and the pathspec does not match the whole
-                                * directory, need to expand the index.
-                                */
-                               if (!strncmp(item.original, ce->name, item.nowildcard_len) &&
-                                   wildmatch(item.original, ce->name, 0)) {
-                                       res = 1;
-                                       break;
-                               }
-                       }
-               } else if (!path_in_cone_mode_sparse_checkout(item.original, &the_index) &&
-                          !matches_skip_worktree(pathspec, i, &skip_worktree_seen))
-                       res = 1;
-
-               if (res > 0)
-                       break;
-       }
-
-       free(skip_worktree_seen);
-       return res;
-}
-
 static int read_from_tree(const struct pathspec *pathspec,
                          struct object_id *tree_oid,
                          int intent_to_add)
@@ -273,7 +191,7 @@ static int read_from_tree(const struct pathspec *pathspec,
        opt.change = diff_change;
        opt.add_remove = diff_addremove;
 
-       if (pathspec->nr && the_index.sparse_index && pathspec_needs_expanded_index(pathspec))
+       if (pathspec->nr && pathspec_needs_expanded_index(&the_index, pathspec))
                ensure_full_index(&the_index);
 
        if (do_diff_cache(tree_oid, &opt))
index 30fd8e83eaf2ca767802343d2dcff1825f6b1b50..fba6f5d51f32d1217b89298e1361169b43feb38b 100644 (file)
@@ -46,6 +46,7 @@ static const char rev_list_usage[] =
 "    --parents\n"
 "    --children\n"
 "    --objects | --objects-edge\n"
+"    --disk-usage[=human]\n"
 "    --unpacked\n"
 "    --header | --pretty\n"
 "    --[no-]object-names\n"
@@ -81,6 +82,7 @@ static int arg_show_object_names = 1;
 
 static int show_disk_usage;
 static off_t total_disk_usage;
+static int human_readable;
 
 static off_t get_object_disk_usage(struct object *obj)
 {
@@ -368,6 +370,17 @@ static int show_object_fast(
        return 1;
 }
 
+static void print_disk_usage(off_t size)
+{
+       struct strbuf sb = STRBUF_INIT;
+       if (human_readable)
+               strbuf_humanise_bytes(&sb, size);
+       else
+               strbuf_addf(&sb, "%"PRIuMAX, (uintmax_t)size);
+       puts(sb.buf);
+       strbuf_release(&sb);
+}
+
 static inline int parse_missing_action_value(const char *value)
 {
        if (!strcmp(value, "error")) {
@@ -473,6 +486,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
                                 int filter_provided_objects)
 {
        struct bitmap_index *bitmap_git;
+       off_t size_from_bitmap;
 
        if (!show_disk_usage)
                return -1;
@@ -481,8 +495,8 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
        if (!bitmap_git)
                return -1;
 
-       printf("%"PRIuMAX"\n",
-              (uintmax_t)get_disk_usage_from_bitmap(bitmap_git, revs));
+       size_from_bitmap = get_disk_usage_from_bitmap(bitmap_git, revs);
+       print_disk_usage(size_from_bitmap);
        return 0;
 }
 
@@ -624,7 +638,21 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                        continue;
                }
 
-               if (!strcmp(arg, "--disk-usage")) {
+               if (skip_prefix(arg, "--disk-usage", &arg)) {
+                       if (*arg == '=') {
+                               if (!strcmp(++arg, "human")) {
+                                       human_readable = 1;
+                               } else
+                                       die(_("invalid value for '%s': '%s', the only allowed format is '%s'"),
+                                           "--disk-usage=<format>", arg, "human");
+                       } else if (*arg) {
+                               /*
+                                * Arguably should goto a label to continue chain of ifs?
+                                * Doesn't matter unless we try to add --disk-usage-foo
+                                * afterwards.
+                                */
+                               usage(rev_list_usage);
+                       }
                        show_disk_usage = 1;
                        info.flags |= REV_LIST_QUIET;
                        continue;
@@ -753,7 +781,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
        }
 
        if (show_disk_usage)
-               printf("%"PRIuMAX"\n", (uintmax_t)total_disk_usage);
+               print_disk_usage(total_disk_usage);
 
 cleanup:
        release_revisions(&revs);
index b259d8990a681d1804d434cea2f1e28193e6693f..8f61050bde884303fe1ac0fcb8162a99224bf5fd 100644 (file)
@@ -195,7 +195,8 @@ static int show_default(void)
        return 0;
 }
 
-static int show_reference(const char *refname, const struct object_id *oid, int flag, void *cb_data)
+static int show_reference(const char *refname, const struct object_id *oid,
+                         int flag UNUSED, void *cb_data UNUSED)
 {
        if (ref_excluded(ref_excludes, refname))
                return 0;
@@ -203,7 +204,8 @@ static int show_reference(const char *refname, const struct object_id *oid, int
        return 0;
 }
 
-static int anti_reference(const char *refname, const struct object_id *oid, int flag, void *cb_data)
+static int anti_reference(const char *refname, const struct object_id *oid,
+                         int flag UNUSED, void *cb_data UNUSED)
 {
        show_rev(REVERSED, oid, refname);
        return 0;
@@ -479,6 +481,9 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
                if (!s)
                        s = help;
 
+               if (s == sb.buf)
+                       die(_("missing opt-spec before option flags"));
+
                if (s - sb.buf == 1) /* short option only */
                        o->short_name = *sb.buf;
                else if (sb.buf[1] != ',') /* long option only */
index f84c253f4c6f65ba273263d92658c74378b0de74..ee2a0807f011e817801f1dc45e858b99ced613a3 100644 (file)
@@ -141,7 +141,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
 
        argc = parse_options(argc, argv, NULL, options, usage_str,
                        PARSE_OPT_KEEP_ARGV0 |
-                       PARSE_OPT_KEEP_UNKNOWN);
+                       PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        prepare_repo_settings(the_repository);
        the_repository->settings.command_requires_full_index = 0;
@@ -246,6 +246,9 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
        res = run_sequencer(argc, argv, &opts);
        if (res < 0)
                die(_("revert failed"));
+       if (opts.revs)
+               release_revisions(opts.revs);
+       free(opts.revs);
        return res;
 }
 
index 84a935a16e8be447d0bc95ad2a1e5d133452e635..b6ba859fe42571fd868697db1da8dac6ffd2c32e 100644 (file)
@@ -287,6 +287,8 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
        if (!index_only)
                setup_work_tree();
 
+       prepare_repo_settings(the_repository);
+       the_repository->settings.command_requires_full_index = 0;
        hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
 
        if (read_cache() < 0)
@@ -296,8 +298,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
 
        seen = xcalloc(pathspec.nr, 1);
 
-       /* TODO: audit for interaction with sparse-index. */
-       ensure_full_index(&the_index);
+       if (pathspec_needs_expanded_index(&the_index, &pathspec))
+               ensure_full_index(&the_index);
+
        for (i = 0; i < active_nr; i++) {
                const struct cache_entry *ce = active_cache[i];
 
index 086dfee45aa8be8cc316132ce29bfb93c2ef295a..7a1e1fe7c0ed6d97789c352d37e4870d6f2e2b04 100644 (file)
@@ -381,6 +381,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
                        break;
                case PARSE_OPT_HELP:
                case PARSE_OPT_ERROR:
+               case PARSE_OPT_SUBCOMMAND:
                        exit(129);
                case PARSE_OPT_COMPLETE:
                        exit(0);
index 64c649c6a238605cb847bbf515cbdca58cde990c..d3f5715e3e3af468fa7b1b444454ef30f6313f0c 100644 (file)
@@ -404,7 +404,7 @@ static int append_ref(const char *refname, const struct object_id *oid,
 }
 
 static int append_head_ref(const char *refname, const struct object_id *oid,
-                          int flag, void *cb_data)
+                          int flag UNUSED, void *cb_data UNUSED)
 {
        struct object_id tmp;
        int ofs = 11;
@@ -419,7 +419,7 @@ static int append_head_ref(const char *refname, const struct object_id *oid,
 }
 
 static int append_remote_ref(const char *refname, const struct object_id *oid,
-                            int flag, void *cb_data)
+                            int flag UNUSED, void *cb_data UNUSED)
 {
        struct object_id tmp;
        int ofs = 13;
@@ -434,7 +434,7 @@ static int append_remote_ref(const char *refname, const struct object_id *oid,
 }
 
 static int append_tag_ref(const char *refname, const struct object_id *oid,
-                         int flag, void *cb_data)
+                         int flag UNUSED, void *cb_data UNUSED)
 {
        if (!starts_with(refname, "refs/tags/"))
                return 0;
index 5fa207a044e0691b9f82627b074645595603c2dd..48569061087416ee0cc78f777094e8b1ed0660db 100644 (file)
@@ -47,7 +47,7 @@ static void show_one(const char *refname, const struct object_id *oid)
 }
 
 static int show_ref(const char *refname, const struct object_id *oid,
-                   int flag, void *cbdata)
+                   int flag UNUSED, void *cbdata UNUSED)
 {
        if (show_head && !strcmp(refname, "HEAD"))
                goto match;
@@ -77,8 +77,9 @@ match:
        return 0;
 }
 
-static int add_existing(const char *refname, const struct object_id *oid,
-                       int flag, void *cbdata)
+static int add_existing(const char *refname,
+                       const struct object_id *oid UNUSED,
+                       int flag UNUSED, void *cbdata)
 {
        struct string_list *list = (struct string_list *)cbdata;
        string_list_insert(list, refname);
index f91e29b56a97eea0f8814a0bfcca45c512a3ab06..287716db68e4198bdec97517ff2cd9c053c9f0b8 100644 (file)
@@ -48,7 +48,7 @@ static char const * const builtin_sparse_checkout_list_usage[] = {
        NULL
 };
 
-static int sparse_checkout_list(int argc, const char **argv)
+static int sparse_checkout_list(int argc, const char **argv, const char *prefix)
 {
        static struct option builtin_sparse_checkout_list_options[] = {
                OPT_END(),
@@ -60,7 +60,7 @@ static int sparse_checkout_list(int argc, const char **argv)
        if (!core_apply_sparse_checkout)
                die(_("this worktree is not sparse"));
 
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_list_options,
                             builtin_sparse_checkout_list_usage, 0);
 
@@ -431,7 +431,7 @@ static struct sparse_checkout_init_opts {
        int sparse_index;
 } init_opts;
 
-static int sparse_checkout_init(int argc, const char **argv)
+static int sparse_checkout_init(int argc, const char **argv, const char *prefix)
 {
        struct pattern_list pl;
        char *sparse_filename;
@@ -452,7 +452,7 @@ static int sparse_checkout_init(int argc, const char **argv)
        init_opts.cone_mode = -1;
        init_opts.sparse_index = -1;
 
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_init_options,
                             builtin_sparse_checkout_init_usage, 0);
 
@@ -767,7 +767,7 @@ static int sparse_checkout_add(int argc, const char **argv, const char *prefix)
        argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_add_options,
                             builtin_sparse_checkout_add_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        sanitize_paths(argc, argv, prefix, add_opts.skip_checks);
 
@@ -813,7 +813,7 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix)
        argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_set_options,
                             builtin_sparse_checkout_set_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        if (update_modes(&set_opts.cone_mode, &set_opts.sparse_index))
                return 1;
@@ -843,7 +843,8 @@ static struct sparse_checkout_reapply_opts {
        int sparse_index;
 } reapply_opts;
 
-static int sparse_checkout_reapply(int argc, const char **argv)
+static int sparse_checkout_reapply(int argc, const char **argv,
+                                  const char *prefix)
 {
        static struct option builtin_sparse_checkout_reapply_options[] = {
                OPT_BOOL(0, "cone", &reapply_opts.cone_mode,
@@ -859,7 +860,7 @@ static int sparse_checkout_reapply(int argc, const char **argv)
        reapply_opts.cone_mode = -1;
        reapply_opts.sparse_index = -1;
 
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_reapply_options,
                             builtin_sparse_checkout_reapply_usage, 0);
 
@@ -876,7 +877,8 @@ static char const * const builtin_sparse_checkout_disable_usage[] = {
        NULL
 };
 
-static int sparse_checkout_disable(int argc, const char **argv)
+static int sparse_checkout_disable(int argc, const char **argv,
+                                  const char *prefix)
 {
        static struct option builtin_sparse_checkout_disable_options[] = {
                OPT_END(),
@@ -895,7 +897,7 @@ static int sparse_checkout_disable(int argc, const char **argv)
         * forcibly return to a dense checkout regardless of initial state.
         */
 
-       argc = parse_options(argc, argv, NULL,
+       argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_disable_options,
                             builtin_sparse_checkout_disable_usage, 0);
 
@@ -922,39 +924,25 @@ static int sparse_checkout_disable(int argc, const char **argv)
 
 int cmd_sparse_checkout(int argc, const char **argv, const char *prefix)
 {
-       static struct option builtin_sparse_checkout_options[] = {
+       parse_opt_subcommand_fn *fn = NULL;
+       struct option builtin_sparse_checkout_options[] = {
+               OPT_SUBCOMMAND("list", &fn, sparse_checkout_list),
+               OPT_SUBCOMMAND("init", &fn, sparse_checkout_init),
+               OPT_SUBCOMMAND("set", &fn, sparse_checkout_set),
+               OPT_SUBCOMMAND("add", &fn, sparse_checkout_add),
+               OPT_SUBCOMMAND("reapply", &fn, sparse_checkout_reapply),
+               OPT_SUBCOMMAND("disable", &fn, sparse_checkout_disable),
                OPT_END(),
        };
 
-       if (argc == 2 && !strcmp(argv[1], "-h"))
-               usage_with_options(builtin_sparse_checkout_usage,
-                                  builtin_sparse_checkout_options);
-
        argc = parse_options(argc, argv, prefix,
                             builtin_sparse_checkout_options,
-                            builtin_sparse_checkout_usage,
-                            PARSE_OPT_STOP_AT_NON_OPTION);
+                            builtin_sparse_checkout_usage, 0);
 
        git_config(git_default_config, NULL);
 
        prepare_repo_settings(the_repository);
        the_repository->settings.command_requires_full_index = 0;
 
-       if (argc > 0) {
-               if (!strcmp(argv[0], "list"))
-                       return sparse_checkout_list(argc, argv);
-               if (!strcmp(argv[0], "init"))
-                       return sparse_checkout_init(argc, argv);
-               if (!strcmp(argv[0], "set"))
-                       return sparse_checkout_set(argc, argv, prefix);
-               if (!strcmp(argv[0], "add"))
-                       return sparse_checkout_add(argc, argv, prefix);
-               if (!strcmp(argv[0], "reapply"))
-                       return sparse_checkout_reapply(argc, argv);
-               if (!strcmp(argv[0], "disable"))
-                       return sparse_checkout_disable(argc, argv);
-       }
-
-       usage_with_options(builtin_sparse_checkout_usage,
-                          builtin_sparse_checkout_options);
+       return fn(argc, argv, prefix);
 }
index 30fa1014605425b3731d5f65c16338a7164f5140..2274aae2556cc1837a3c7036fb52be2e6653a0e7 100644 (file)
@@ -638,9 +638,12 @@ cleanup:
        return ret;
 }
 
-static int reject_reflog_ent(struct object_id *ooid, struct object_id *noid,
-                            const char *email, timestamp_t timestamp, int tz,
-                            const char *message, void *cb_data)
+static int reject_reflog_ent(struct object_id *ooid UNUSED,
+                            struct object_id *noid UNUSED,
+                            const char *email UNUSED,
+                            timestamp_t timestamp UNUSED,
+                            int tz UNUSED, const char *message UNUSED,
+                            void *cb_data UNUSED)
 {
        return 1;
 }
@@ -782,7 +785,7 @@ static int list_stash(int argc, const char **argv, const char *prefix)
 
        argc = parse_options(argc, argv, prefix, options,
                             git_stash_list_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        if (!ref_exists(ref_stash))
                return 0;
@@ -873,7 +876,7 @@ static int show_stash(int argc, const char **argv, const char *prefix)
        init_revisions(&rev, prefix);
 
        argc = parse_options(argc, argv, prefix, options, git_stash_show_usage,
-                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN |
+                            PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
                             PARSE_OPT_KEEP_DASHDASH);
 
        strvec_push(&revision_args, argv[0]);
@@ -979,7 +982,7 @@ static int store_stash(int argc, const char **argv, const char *prefix)
 
        argc = parse_options(argc, argv, prefix, options,
                             git_stash_store_usage,
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        if (argc != 1) {
                if (!quiet)
@@ -1739,6 +1742,11 @@ static int push_stash(int argc, const char **argv, const char *prefix,
                             include_untracked, only_staged);
 }
 
+static int push_stash_unassumed(int argc, const char **argv, const char *prefix)
+{
+       return push_stash(argc, argv, prefix, 0);
+}
+
 static int save_stash(int argc, const char **argv, const char *prefix)
 {
        int keep_index = -1;
@@ -1787,15 +1795,28 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
        pid_t pid = getpid();
        const char *index_file;
        struct strvec args = STRVEC_INIT;
-
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
+               OPT_SUBCOMMAND("apply", &fn, apply_stash),
+               OPT_SUBCOMMAND("clear", &fn, clear_stash),
+               OPT_SUBCOMMAND("drop", &fn, drop_stash),
+               OPT_SUBCOMMAND("pop", &fn, pop_stash),
+               OPT_SUBCOMMAND("branch", &fn, branch_stash),
+               OPT_SUBCOMMAND("list", &fn, list_stash),
+               OPT_SUBCOMMAND("show", &fn, show_stash),
+               OPT_SUBCOMMAND("store", &fn, store_stash),
+               OPT_SUBCOMMAND("create", &fn, create_stash),
+               OPT_SUBCOMMAND("push", &fn, push_stash_unassumed),
+               OPT_SUBCOMMAND_F("save", &fn, save_stash, PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
        git_config(git_stash_config, NULL);
 
        argc = parse_options(argc, argv, prefix, options, git_stash_usage,
-                            PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
+                            PARSE_OPT_SUBCOMMAND_OPTIONAL |
+                            PARSE_OPT_KEEP_UNKNOWN_OPT |
+                            PARSE_OPT_KEEP_DASHDASH);
 
        prepare_repo_settings(the_repository);
        the_repository->settings.command_requires_full_index = 0;
@@ -1804,33 +1825,10 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
        strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
                    (uintmax_t)pid);
 
-       if (!argc)
-               return !!push_stash(0, NULL, prefix, 0);
-       else if (!strcmp(argv[0], "apply"))
-               return !!apply_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "clear"))
-               return !!clear_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "drop"))
-               return !!drop_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "pop"))
-               return !!pop_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "branch"))
-               return !!branch_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "list"))
-               return !!list_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "show"))
-               return !!show_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "store"))
-               return !!store_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "create"))
-               return !!create_stash(argc, argv, prefix);
-       else if (!strcmp(argv[0], "push"))
-               return !!push_stash(argc, argv, prefix, 0);
-       else if (!strcmp(argv[0], "save"))
-               return !!save_stash(argc, argv, prefix);
-       else if (*argv[0] != '-')
-               usage_msg_optf(_("unknown subcommand: %s"),
-                              git_stash_usage, options, argv[0]);
+       if (fn)
+               return !!fn(argc, argv, prefix);
+       else if (!argc)
+               return !!push_stash_unassumed(0, NULL, prefix);
 
        /* Assume 'stash push' */
        strvec_push(&args, "push");
index c597df7528ee56cd1e35c65f409e73a99769a5ee..ad834f9ed5852c0068434eca8f94c4304ab384d1 100644 (file)
 typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
                                  void *cb_data);
 
-static char *repo_get_default_remote(struct repository *repo)
+static int repo_get_default_remote(struct repository *repo, char **default_remote)
 {
-       char *dest = NULL, *ret;
+       char *dest = NULL;
        struct strbuf sb = STRBUF_INIT;
        struct ref_store *store = get_main_ref_store(repo);
        const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL,
                                                      NULL);
 
        if (!refname)
-               die(_("No such ref: %s"), "HEAD");
+               return die_message(_("No such ref: %s"), "HEAD");
 
        /* detached HEAD */
-       if (!strcmp(refname, "HEAD"))
-               return xstrdup("origin");
+       if (!strcmp(refname, "HEAD")) {
+               *default_remote = xstrdup("origin");
+               return 0;
+       }
 
        if (!skip_prefix(refname, "refs/heads/", &refname))
-               die(_("Expecting a full ref name, got %s"), refname);
+               return die_message(_("Expecting a full ref name, got %s"),
+                                  refname);
 
        strbuf_addf(&sb, "branch.%s.remote", refname);
        if (repo_config_get_string(repo, sb.buf, &dest))
-               ret = xstrdup("origin");
+               *default_remote = xstrdup("origin");
        else
-               ret = dest;
+               *default_remote = dest;
 
        strbuf_release(&sb);
-       return ret;
+       return 0;
 }
 
-static char *get_default_remote_submodule(const char *module_path)
+static int get_default_remote_submodule(const char *module_path, char **default_remote)
 {
        struct repository subrepo;
+       int ret;
 
-       repo_submodule_init(&subrepo, the_repository, module_path, null_oid());
-       return repo_get_default_remote(&subrepo);
+       if (repo_submodule_init(&subrepo, the_repository, module_path,
+                               null_oid()) < 0)
+               return die_message(_("could not get a repository handle for submodule '%s'"),
+                                  module_path);
+       ret = repo_get_default_remote(&subrepo, default_remote);
+       repo_clear(&subrepo);
+
+       return ret;
 }
 
 static char *get_default_remote(void)
 {
-       return repo_get_default_remote(the_repository);
+       char *default_remote;
+       int code = repo_get_default_remote(the_repository, &default_remote);
+
+       if (code)
+               exit(code);
+
+       return default_remote;
 }
 
 static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
@@ -96,32 +112,11 @@ static char *resolve_relative_url(const char *rel_url, const char *up_path, int
        return resolved_url;
 }
 
-static int resolve_relative_url_test(int argc, const char **argv, const char *prefix)
+/* the result should be freed by the caller. */
+static char *get_submodule_displaypath(const char *path, const char *prefix)
 {
-       char *remoteurl, *res;
-       const char *up_path, *url;
-
-       if (argc != 4)
-               die("resolve-relative-url-test only accepts three arguments: <up_path> <remoteurl> <url>");
-
-       up_path = argv[1];
-       remoteurl = xstrdup(argv[2]);
-       url = argv[3];
-
-       if (!strcmp(up_path, "(null)"))
-               up_path = NULL;
-
-       res = relative_url(remoteurl, url, up_path);
-       puts(res);
-       free(res);
-       free(remoteurl);
-       return 0;
-}
+       const char *super_prefix = get_super_prefix();
 
-static char *do_get_submodule_displaypath(const char *path,
-                                         const char *prefix,
-                                         const char *super_prefix)
-{
        if (prefix && super_prefix) {
                BUG("cannot have prefix '%s' and superprefix '%s'",
                    prefix, super_prefix);
@@ -137,13 +132,6 @@ static char *do_get_submodule_displaypath(const char *path,
        }
 }
 
-/* the result should be freed by the caller. */
-static char *get_submodule_displaypath(const char *path, const char *prefix)
-{
-       const char *super_prefix = get_super_prefix();
-       return do_get_submodule_displaypath(path, prefix, super_prefix);
-}
-
 static char *compute_rev_name(const char *sub_path, const char* object_id)
 {
        struct strbuf sb = STRBUF_INIT;
@@ -188,6 +176,11 @@ struct module_list {
 };
 #define MODULE_LIST_INIT { 0 }
 
+static void module_list_release(struct module_list *ml)
+{
+       free(ml->entries);
+}
+
 static int module_list_compute(int argc, const char **argv,
                               const char *prefix,
                               struct pathspec *pathspec,
@@ -195,6 +188,7 @@ static int module_list_compute(int argc, const char **argv,
 {
        int i, result = 0;
        char *ps_matched = NULL;
+
        parse_pathspec(pathspec, 0,
                       PATHSPEC_PREFER_FULL,
                       prefix, argv);
@@ -249,7 +243,7 @@ static void module_list_active(struct module_list *list)
                active_modules.entries[active_modules.nr++] = ce;
        }
 
-       free(list->entries);
+       module_list_release(list);
        *list = active_modules;
 }
 
@@ -272,49 +266,11 @@ static char *get_up_path(const char *path)
        return strbuf_detach(&sb, NULL);
 }
 
-static int module_list(int argc, const char **argv, const char *prefix)
-{
-       int i;
-       struct pathspec pathspec;
-       struct module_list list = MODULE_LIST_INIT;
-
-       struct option module_list_options[] = {
-               OPT_STRING(0, "prefix", &prefix,
-                          N_("path"),
-                          N_("alternative anchor for relative paths")),
-               OPT_END()
-       };
-
-       const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper list [--prefix=<path>] [<path>...]"),
-               NULL
-       };
-
-       argc = parse_options(argc, argv, prefix, module_list_options,
-                            git_submodule_helper_usage, 0);
-
-       if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
-
-       for (i = 0; i < list.nr; i++) {
-               const struct cache_entry *ce = list.entries[i];
-
-               if (ce_stage(ce))
-                       printf("%06o %s U\t", ce->ce_mode,
-                              oid_to_hex(null_oid()));
-               else
-                       printf("%06o %s %d\t", ce->ce_mode,
-                              oid_to_hex(&ce->oid), ce_stage(ce));
-
-               fprintf(stdout, "%s\n", ce->name);
-       }
-       return 0;
-}
-
 static void for_each_listed_submodule(const struct module_list *list,
                                      each_submodule_fn fn, void *cb_data)
 {
        int i;
+
        for (i = 0; i < list->nr; i++)
                fn(list->entries[i], cb_data);
 }
@@ -334,7 +290,6 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
        struct foreach_cb *info = cb_data;
        const char *path = list_item->name;
        const struct object_id *ce_oid = &list_item->oid;
-
        const struct submodule *sub;
        struct child_process cp = CHILD_PROCESS_INIT;
        char *displaypath;
@@ -433,26 +388,25 @@ cleanup:
 static int module_foreach(int argc, const char **argv, const char *prefix)
 {
        struct foreach_cb info = FOREACH_CB_INIT;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
-
        struct option module_foreach_options[] = {
                OPT__QUIET(&info.quiet, N_("suppress output of entering each submodule command")),
                OPT_BOOL(0, "recursive", &info.recursive,
                         N_("recurse into nested submodules")),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper foreach [--quiet] [--recursive] [--] <command>"),
+               N_("git submodule foreach [--quiet] [--recursive] [--] <command>"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, module_foreach_options,
                             git_submodule_helper_usage, 0);
 
        if (module_list_compute(0, NULL, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        info.argc = argc;
        info.argv = argv;
@@ -460,7 +414,11 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
 
        for_each_listed_submodule(&list, runcommand_in_submodule_cb, &info);
 
-       return 0;
+       ret = 0;
+cleanup:
+       module_list_release(&list);
+       clear_pathspec(&pathspec);
+       return ret;
 }
 
 static int starts_with_dot_slash(const char *const path)
@@ -477,22 +435,19 @@ static int starts_with_dot_dot_slash(const char *const path)
 
 struct init_cb {
        const char *prefix;
-       const char *superprefix;
        unsigned int flags;
 };
 #define INIT_CB_INIT { 0 }
 
 static void init_submodule(const char *path, const char *prefix,
-                          const char *superprefix, unsigned int flags)
+                          unsigned int flags)
 {
        const struct submodule *sub;
        struct strbuf sb = STRBUF_INIT;
-       char *upd = NULL, *url = NULL, *displaypath;
+       const char *upd;
+       char *url = NULL, *displaypath;
 
-       /* try superprefix from the environment, if it is not passed explicitly */
-       if (!superprefix)
-               superprefix = get_super_prefix();
-       displaypath = do_get_submodule_displaypath(path, prefix, superprefix);
+       displaypath = get_submodule_displaypath(path, prefix);
 
        sub = submodule_from_path(the_repository, null_oid(), path);
 
@@ -529,6 +484,7 @@ static void init_submodule(const char *path, const char *prefix,
                if (starts_with_dot_dot_slash(url) ||
                    starts_with_dot_slash(url)) {
                        char *oldurl = url;
+
                        url = resolve_relative_url(oldurl, NULL, 0);
                        free(oldurl);
                }
@@ -545,14 +501,15 @@ static void init_submodule(const char *path, const char *prefix,
 
        /* Copy "update" setting when it is not set yet */
        strbuf_addf(&sb, "submodule.%s.update", sub->name);
-       if (git_config_get_string(sb.buf, &upd) &&
+       if (git_config_get_string_tmp(sb.buf, &upd) &&
            sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
                if (sub->update_strategy.type == SM_UPDATE_COMMAND) {
                        fprintf(stderr, _("warning: command update mode suggested for submodule '%s'\n"),
                                sub->name);
-                       upd = xstrdup("none");
-               } else
-                       upd = xstrdup(submodule_strategy_to_string(&sub->update_strategy));
+                       upd = "none";
+               } else {
+                       upd = submodule_update_type_to_string(sub->update_strategy.type);
+               }
 
                if (git_config_set_gently(sb.buf, upd))
                        die(_("Failed to register update mode for submodule path '%s'"), displaypath);
@@ -560,37 +517,36 @@ static void init_submodule(const char *path, const char *prefix,
        strbuf_release(&sb);
        free(displaypath);
        free(url);
-       free(upd);
 }
 
 static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data)
 {
        struct init_cb *info = cb_data;
-       init_submodule(list_item->name, info->prefix, info->superprefix, info->flags);
+
+       init_submodule(list_item->name, info->prefix, info->flags);
 }
 
 static int module_init(int argc, const char **argv, const char *prefix)
 {
        struct init_cb info = INIT_CB_INIT;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
        int quiet = 0;
-
        struct option module_init_options[] = {
                OPT__QUIET(&quiet, N_("suppress output for initializing a submodule")),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper init [<options>] [<path>]"),
+               N_("git submodule init [<options>] [<path>]"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, module_init_options,
                             git_submodule_helper_usage, 0);
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        /*
         * If there are no path args and submodule.active is set then,
@@ -605,7 +561,11 @@ static int module_init(int argc, const char **argv, const char *prefix)
 
        for_each_listed_submodule(&list, init_submodule_cb, &info);
 
-       return 0;
+       ret = 0;
+cleanup:
+       module_list_release(&list);
+       clear_pathspec(&pathspec);
+       return ret;
 }
 
 struct status_cb {
@@ -623,20 +583,23 @@ static void print_status(unsigned int flags, char state, const char *path,
        printf("%c%s %s", state, oid_to_hex(oid), displaypath);
 
        if (state == ' ' || state == '+') {
-               const char *name = compute_rev_name(path, oid_to_hex(oid));
+               char *name = compute_rev_name(path, oid_to_hex(oid));
 
                if (name)
                        printf(" (%s)", name);
+               free(name);
        }
 
        printf("\n");
 }
 
-static int handle_submodule_head_ref(const char *refname,
-                                    const struct object_id *oid, int flags,
+static int handle_submodule_head_ref(const char *refname UNUSED,
+                                    const struct object_id *oid,
+                                    int flags UNUSED,
                                     void *cb_data)
 {
        struct object_id *output = cb_data;
+
        if (oid)
                oidcpy(output, oid);
 
@@ -743,6 +706,7 @@ static void status_submodule_cb(const struct cache_entry *list_item,
                                void *cb_data)
 {
        struct status_cb *info = cb_data;
+
        status_submodule(list_item->name, &list_item->oid, list_item->ce_flags,
                         info->prefix, info->flags);
 }
@@ -750,27 +714,26 @@ static void status_submodule_cb(const struct cache_entry *list_item,
 static int module_status(int argc, const char **argv, const char *prefix)
 {
        struct status_cb info = STATUS_CB_INIT;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
        int quiet = 0;
-
        struct option module_status_options[] = {
                OPT__QUIET(&quiet, N_("suppress submodule status output")),
                OPT_BIT(0, "cached", &info.flags, N_("use commit stored in the index instead of the one stored in the submodule HEAD"), OPT_CACHED),
                OPT_BIT(0, "recursive", &info.flags, N_("recurse into nested submodules"), OPT_RECURSIVE),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
                N_("git submodule status [--quiet] [--cached] [--recursive] [<path>...]"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, module_status_options,
                             git_submodule_helper_usage, 0);
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        info.prefix = prefix;
        if (quiet)
@@ -778,25 +741,11 @@ static int module_status(int argc, const char **argv, const char *prefix)
 
        for_each_listed_submodule(&list, status_submodule_cb, &info);
 
-       return 0;
-}
-
-static int module_name(int argc, const char **argv, const char *prefix)
-{
-       const struct submodule *sub;
-
-       if (argc != 2)
-               usage(_("git submodule--helper name <path>"));
-
-       sub = submodule_from_path(the_repository, null_oid(), argv[1]);
-
-       if (!sub)
-               die(_("no submodule mapping found in .gitmodules for path '%s'"),
-                   argv[1]);
-
-       printf("%s\n", sub->name);
-
-       return 0;
+       ret = 0;
+cleanup:
+       module_list_release(&list);
+       clear_pathspec(&pathspec);
+       return ret;
 }
 
 struct module_cb {
@@ -805,16 +754,34 @@ struct module_cb {
        struct object_id oid_src;
        struct object_id oid_dst;
        char status;
-       const char *sm_path;
+       char *sm_path;
 };
 #define MODULE_CB_INIT { 0 }
 
+static void module_cb_release(struct module_cb *mcb)
+{
+       free(mcb->sm_path);
+}
+
 struct module_cb_list {
        struct module_cb **entries;
        int alloc, nr;
 };
 #define MODULE_CB_LIST_INIT { 0 }
 
+static void module_cb_list_release(struct module_cb_list *mcbl)
+{
+       int i;
+
+       for (i = 0; i < mcbl->nr; i++) {
+               struct module_cb *mcb = mcbl->entries[i];
+
+               module_cb_release(mcb);
+               free(mcb);
+       }
+       free(mcbl->entries);
+}
+
 struct summary_cb {
        int argc;
        const char **argv;
@@ -851,7 +818,7 @@ static char *verify_submodule_committish(const char *sm_path,
        return strbuf_detach(&result, NULL);
 }
 
-static void print_submodule_summary(struct summary_cb *info, char *errmsg,
+static void print_submodule_summary(struct summary_cb *info, const char *errmsg,
                                    int total_commits, const char *displaypath,
                                    const char *src_abbrev, const char *dst_abbrev,
                                    struct module_cb *p)
@@ -909,12 +876,13 @@ static void generate_submodule_summary(struct summary_cb *info,
 {
        char *displaypath, *src_abbrev = NULL, *dst_abbrev;
        int missing_src = 0, missing_dst = 0;
-       char *errmsg = NULL;
+       struct strbuf errmsg = STRBUF_INIT;
        int total_commits = -1;
 
        if (!info->cached && oideq(&p->oid_dst, null_oid())) {
                if (S_ISGITLINK(p->mod_dst)) {
                        struct ref_store *refs = get_submodule_ref_store(p->sm_path);
+
                        if (refs)
                                refs_head_ref(refs, handle_submodule_head_ref, &p->oid_dst);
                } else if (S_ISLNK(p->mod_dst) || S_ISREG(p->mod_dst)) {
@@ -1009,28 +977,27 @@ static void generate_submodule_summary(struct summary_cb *info,
                 * submodule, i.e., deleted or changed to blob
                 */
                if (S_ISGITLINK(p->mod_dst)) {
-                       struct strbuf errmsg_str = STRBUF_INIT;
                        if (missing_src && missing_dst) {
-                               strbuf_addf(&errmsg_str, "  Warn: %s doesn't contain commits %s and %s\n",
+                               strbuf_addf(&errmsg, "  Warn: %s doesn't contain commits %s and %s\n",
                                            displaypath, oid_to_hex(&p->oid_src),
                                            oid_to_hex(&p->oid_dst));
                        } else {
-                               strbuf_addf(&errmsg_str, "  Warn: %s doesn't contain commit %s\n",
+                               strbuf_addf(&errmsg, "  Warn: %s doesn't contain commit %s\n",
                                            displaypath, missing_src ?
                                            oid_to_hex(&p->oid_src) :
                                            oid_to_hex(&p->oid_dst));
                        }
-                       errmsg = strbuf_detach(&errmsg_str, NULL);
                }
        }
 
-       print_submodule_summary(info, errmsg, total_commits,
-                               displaypath, src_abbrev,
+       print_submodule_summary(info, errmsg.len ? errmsg.buf : NULL,
+                               total_commits, displaypath, src_abbrev,
                                dst_abbrev, p);
 
        free(displaypath);
        free(src_abbrev);
        free(dst_abbrev);
+       strbuf_release(&errmsg);
 }
 
 static void prepare_submodule_summary(struct summary_cb *info,
@@ -1114,6 +1081,9 @@ static int compute_summary_module_list(struct object_id *head_oid,
 {
        struct strvec diff_args = STRVEC_INIT;
        struct rev_info rev;
+       struct setup_revision_opt opt = {
+               .free_removed_argv_elements = 1,
+       };
        struct module_cb_list list = MODULE_CB_LIST_INIT;
        int ret = 0;
 
@@ -1131,7 +1101,7 @@ static int compute_summary_module_list(struct object_id *head_oid,
        init_revisions(&rev, info->prefix);
        rev.abbrev = 0;
        precompose_argv_prefix(diff_args.nr, diff_args.v, NULL);
-       setup_revisions(diff_args.nr, diff_args.v, &rev, NULL);
+       setup_revisions(diff_args.nr, diff_args.v, &rev, &opt);
        rev.diffopt.output_format = DIFF_FORMAT_NO_OUTPUT | DIFF_FORMAT_CALLBACK;
        rev.diffopt.format_callback = submodule_summary_callback;
        rev.diffopt.format_callback_data = &list;
@@ -1158,6 +1128,7 @@ static int compute_summary_module_list(struct object_id *head_oid,
 cleanup:
        strvec_clear(&diff_args);
        release_revisions(&rev);
+       module_cb_list_release(&list);
        return ret;
 }
 
@@ -1171,7 +1142,6 @@ static int module_summary(int argc, const char **argv, const char *prefix)
        enum diff_cmd diff_cmd = DIFF_INDEX;
        struct object_id head_oid;
        int ret;
-
        struct option module_summary_options[] = {
                OPT_BOOL(0, "cached", &cached,
                         N_("use the commit stored in the index instead of the submodule HEAD")),
@@ -1183,9 +1153,8 @@ static int module_summary(int argc, const char **argv, const char *prefix)
                             N_("limit the summary size")),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper summary [<options>] [<commit>] [--] [<path>]"),
+               N_("git submodule summary [<options>] [<commit>] [--] [<path>]"),
                NULL
        };
 
@@ -1245,6 +1214,7 @@ static void sync_submodule(const char *path, const char *prefix,
        char *sub_origin_url, *super_config_url, *displaypath, *default_remote;
        struct strbuf sb = STRBUF_INIT;
        char *sub_config_path = NULL;
+       int code;
 
        if (!is_submodule_active(the_repository, path))
                return;
@@ -1255,6 +1225,7 @@ static void sync_submodule(const char *path, const char *prefix,
                if (starts_with_dot_dot_slash(sub->url) ||
                    starts_with_dot_slash(sub->url)) {
                        char *up_path = get_up_path(path);
+
                        sub_origin_url = resolve_relative_url(sub->url, up_path, 1);
                        super_config_url = resolve_relative_url(sub->url, NULL, 1);
                        free(up_path);
@@ -1283,10 +1254,9 @@ static void sync_submodule(const char *path, const char *prefix,
                goto cleanup;
 
        strbuf_reset(&sb);
-       default_remote = get_default_remote_submodule(path);
-       if (!default_remote)
-               die(_("failed to get the default remote for submodule '%s'"),
-                     path);
+       code = get_default_remote_submodule(path, &default_remote);
+       if (code)
+               exit(code);
 
        remote_key = xstrfmt("remote.%s.url", default_remote);
        free(default_remote);
@@ -1330,34 +1300,34 @@ cleanup:
 static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
 {
        struct sync_cb *info = cb_data;
+
        sync_submodule(list_item->name, info->prefix, info->flags);
 }
 
 static int module_sync(int argc, const char **argv, const char *prefix)
 {
        struct sync_cb info = SYNC_CB_INIT;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
        int quiet = 0;
        int recursive = 0;
-
        struct option module_sync_options[] = {
                OPT__QUIET(&quiet, N_("suppress output of synchronizing submodule url")),
                OPT_BOOL(0, "recursive", &recursive,
                        N_("recurse into nested submodules")),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+               N_("git submodule sync [--quiet] [--recursive] [<path>]"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, module_sync_options,
                             git_submodule_helper_usage, 0);
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        info.prefix = prefix;
        if (quiet)
@@ -1367,7 +1337,11 @@ static int module_sync(int argc, const char **argv, const char *prefix)
 
        for_each_listed_submodule(&list, sync_submodule_cb, &info);
 
-       return 0;
+       ret = 0;
+cleanup:
+       module_list_release(&list);
+       clear_pathspec(&pathspec);
+       return ret;
 }
 
 struct deinit_cb {
@@ -1411,6 +1385,7 @@ static void deinit_submodule(const char *path, const char *prefix,
 
                if (!(flags & OPT_FORCE)) {
                        struct child_process cp_rm = CHILD_PROCESS_INIT;
+
                        cp_rm.git_cmd = 1;
                        strvec_pushl(&cp_rm.args, "rm", "-qn",
                                     path, NULL);
@@ -1447,6 +1422,7 @@ static void deinit_submodule(const char *path, const char *prefix,
        /* remove the .git/config entries (unless the user already did it) */
        if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
                char *sub_key = xstrfmt("submodule.%s", sub->name);
+
                /*
                 * remove the whole section so we have a clean state when
                 * the user later decides to init this submodule again
@@ -1474,23 +1450,22 @@ static void deinit_submodule_cb(const struct cache_entry *list_item,
 static int module_deinit(int argc, const char **argv, const char *prefix)
 {
        struct deinit_cb info = DEINIT_CB_INIT;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
        int quiet = 0;
        int force = 0;
        int all = 0;
-
        struct option module_deinit_options[] = {
                OPT__QUIET(&quiet, N_("suppress submodule status output")),
                OPT__FORCE(&force, N_("remove submodule working trees even if they contain local changes"), 0),
                OPT_BOOL(0, "all", &all, N_("unregister all submodules")),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
                N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, module_deinit_options,
                             git_submodule_helper_usage, 0);
@@ -1505,7 +1480,7 @@ static int module_deinit(int argc, const char **argv, const char *prefix)
                die(_("Use '--all' if you really want to deinitialize all submodules"));
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        info.prefix = prefix;
        if (quiet)
@@ -1515,7 +1490,11 @@ static int module_deinit(int argc, const char **argv, const char *prefix)
 
        for_each_listed_submodule(&list, deinit_submodule_cb, &info);
 
-       return 0;
+       ret = 0;
+cleanup:
+       module_list_release(&list);
+       clear_pathspec(&pathspec);
+       return ret;
 }
 
 struct module_clone_data {
@@ -1525,7 +1504,6 @@ struct module_clone_data {
        const char *url;
        const char *depth;
        struct list_objects_filter_options *filter_options;
-       struct string_list reference;
        unsigned int quiet: 1;
        unsigned int progress: 1;
        unsigned int dissociate: 1;
@@ -1533,7 +1511,6 @@ struct module_clone_data {
        int single_branch;
 };
 #define MODULE_CLONE_DATA_INIT { \
-       .reference = STRING_LIST_INIT_NODUP, \
        .single_branch = -1, \
 }
 
@@ -1574,7 +1551,9 @@ static int add_possible_reference_from_superproject(
                struct strbuf err = STRBUF_INIT;
                strbuf_add(&sb, odb->path, len);
 
-               repo_init(&alternate, sb.buf, NULL);
+               if (repo_init(&alternate, sb.buf, NULL) < 0)
+                       die(_("could not get a repository handle for gitdir '%s'"),
+                           sb.buf);
 
                /*
                 * We need to end the new path with '/' to mark it as a dir,
@@ -1589,7 +1568,9 @@ static int add_possible_reference_from_superproject(
 
                sm_alternate = compute_alternate_path(sb.buf, &err);
                if (sm_alternate) {
-                       string_list_append(sas->reference, xstrdup(sb.buf));
+                       char *p = strbuf_detach(&sb, NULL);
+
+                       string_list_append(sas->reference, p)->util = p;
                        free(sm_alternate);
                } else {
                        switch (sas->error_mode) {
@@ -1648,23 +1629,31 @@ static void prepare_possible_alternates(const char *sm_name,
        free(error_strategy);
 }
 
-static int clone_submodule(struct module_clone_data *clone_data)
+static char *clone_submodule_sm_gitdir(const char *name)
 {
-       char *p, *sm_gitdir;
-       char *sm_alternate = NULL, *error_strategy = NULL;
        struct strbuf sb = STRBUF_INIT;
-       struct child_process cp = CHILD_PROCESS_INIT;
+       char *sm_gitdir;
 
-       submodule_name_to_gitdir(&sb, the_repository, clone_data->name);
+       submodule_name_to_gitdir(&sb, the_repository, name);
        sm_gitdir = absolute_pathdup(sb.buf);
-       strbuf_reset(&sb);
+       strbuf_release(&sb);
 
-       if (!is_absolute_path(clone_data->path)) {
-               strbuf_addf(&sb, "%s/%s", get_git_work_tree(), clone_data->path);
-               clone_data->path = strbuf_detach(&sb, NULL);
-       } else {
-               clone_data->path = xstrdup(clone_data->path);
-       }
+       return sm_gitdir;
+}
+
+static int clone_submodule(const struct module_clone_data *clone_data,
+                          struct string_list *reference)
+{
+       char *p;
+       char *sm_gitdir = clone_submodule_sm_gitdir(clone_data->name);
+       char *sm_alternate = NULL, *error_strategy = NULL;
+       struct child_process cp = CHILD_PROCESS_INIT;
+       const char *clone_data_path = clone_data->path;
+       char *to_free = NULL;
+
+       if (!is_absolute_path(clone_data->path))
+               clone_data_path = to_free = xstrfmt("%s/%s", get_git_work_tree(),
+                                                   clone_data->path);
 
        if (validate_submodule_git_dir(sm_gitdir, clone_data->name) < 0)
                die(_("refusing to create/use '%s' in another submodule's "
@@ -1674,7 +1663,7 @@ static int clone_submodule(struct module_clone_data *clone_data)
                if (safe_create_leading_directories_const(sm_gitdir) < 0)
                        die(_("could not create directory '%s'"), sm_gitdir);
 
-               prepare_possible_alternates(clone_data->name, &clone_data->reference);
+               prepare_possible_alternates(clone_data->name, reference);
 
                strvec_push(&cp.args, "clone");
                strvec_push(&cp.args, "--no-checkout");
@@ -1684,9 +1673,10 @@ static int clone_submodule(struct module_clone_data *clone_data)
                        strvec_push(&cp.args, "--progress");
                if (clone_data->depth && *(clone_data->depth))
                        strvec_pushl(&cp.args, "--depth", clone_data->depth, NULL);
-               if (clone_data->reference.nr) {
+               if (reference->nr) {
                        struct string_list_item *item;
-                       for_each_string_list_item(item, &clone_data->reference)
+
+                       for_each_string_list_item(item, reference)
                                strvec_pushl(&cp.args, "--reference",
                                             item->string, NULL);
                }
@@ -1705,7 +1695,7 @@ static int clone_submodule(struct module_clone_data *clone_data)
 
                strvec_push(&cp.args, "--");
                strvec_push(&cp.args, clone_data->url);
-               strvec_push(&cp.args, clone_data->path);
+               strvec_push(&cp.args, clone_data_path);
 
                cp.git_cmd = 1;
                prepare_submodule_repo_env(&cp.env);
@@ -1713,23 +1703,25 @@ static int clone_submodule(struct module_clone_data *clone_data)
 
                if(run_command(&cp))
                        die(_("clone of '%s' into submodule path '%s' failed"),
-                           clone_data->url, clone_data->path);
+                           clone_data->url, clone_data_path);
        } else {
-               if (clone_data->require_init && !access(clone_data->path, X_OK) &&
-                   !is_empty_dir(clone_data->path))
-                       die(_("directory not empty: '%s'"), clone_data->path);
-               if (safe_create_leading_directories_const(clone_data->path) < 0)
-                       die(_("could not create directory '%s'"), clone_data->path);
-               strbuf_addf(&sb, "%s/index", sm_gitdir);
-               unlink_or_warn(sb.buf);
-               strbuf_reset(&sb);
+               char *path;
+
+               if (clone_data->require_init && !access(clone_data_path, X_OK) &&
+                   !is_empty_dir(clone_data_path))
+                       die(_("directory not empty: '%s'"), clone_data_path);
+               if (safe_create_leading_directories_const(clone_data_path) < 0)
+                       die(_("could not create directory '%s'"), clone_data_path);
+               path = xstrfmt("%s/index", sm_gitdir);
+               unlink_or_warn(path);
+               free(path);
        }
 
-       connect_work_tree_and_git_dir(clone_data->path, sm_gitdir, 0);
+       connect_work_tree_and_git_dir(clone_data_path, sm_gitdir, 0);
 
-       p = git_pathdup_submodule(clone_data->path, "config");
+       p = git_pathdup_submodule(clone_data_path, "config");
        if (!p)
-               die(_("could not get submodule directory for '%s'"), clone_data->path);
+               die(_("could not get submodule directory for '%s'"), clone_data_path);
 
        /* setup alternateLocation and alternateErrorStrategy in the cloned submodule if needed */
        git_config_get_string("submodule.alternateLocation", &sm_alternate);
@@ -1744,9 +1736,9 @@ static int clone_submodule(struct module_clone_data *clone_data)
        free(sm_alternate);
        free(error_strategy);
 
-       strbuf_release(&sb);
        free(sm_gitdir);
        free(p);
+       free(to_free);
        return 0;
 }
 
@@ -1754,8 +1746,8 @@ static int module_clone(int argc, const char **argv, const char *prefix)
 {
        int dissociate = 0, quiet = 0, progress = 0, require_init = 0;
        struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT;
-       struct list_objects_filter_options filter_options;
-
+       struct list_objects_filter_options filter_options = { 0 };
+       struct string_list reference = STRING_LIST_INIT_NODUP;
        struct option module_clone_options[] = {
                OPT_STRING(0, "prefix", &clone_data.prefix,
                           N_("path"),
@@ -1769,7 +1761,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
                OPT_STRING(0, "url", &clone_data.url,
                           N_("string"),
                           N_("url where to clone the submodule from")),
-               OPT_STRING_LIST(0, "reference", &clone_data.reference,
+               OPT_STRING_LIST(0, "reference", &reference,
                           N_("repo"),
                           N_("reference repository")),
                OPT_BOOL(0, "dissociate", &dissociate,
@@ -1787,7 +1779,6 @@ static int module_clone(int argc, const char **argv, const char *prefix)
                OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
                N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
                   "[--reference <repository>] [--name <name>] [--depth <depth>] "
@@ -1796,7 +1787,6 @@ static int module_clone(int argc, const char **argv, const char *prefix)
                NULL
        };
 
-       memset(&filter_options, 0, sizeof(filter_options));
        argc = parse_options(argc, argv, prefix, module_clone_options,
                             git_submodule_helper_usage, 0);
 
@@ -1810,31 +1800,33 @@ static int module_clone(int argc, const char **argv, const char *prefix)
                usage_with_options(git_submodule_helper_usage,
                                   module_clone_options);
 
-       clone_submodule(&clone_data);
+       clone_submodule(&clone_data, &reference);
        list_objects_filter_release(&filter_options);
+       string_list_clear(&reference, 1);
        return 0;
 }
 
-static void determine_submodule_update_strategy(struct repository *r,
-                                               int just_cloned,
-                                               const char *path,
-                                               const char *update,
-                                               struct submodule_update_strategy *out)
+static int determine_submodule_update_strategy(struct repository *r,
+                                              int just_cloned,
+                                              const char *path,
+                                              enum submodule_update_type update,
+                                              struct submodule_update_strategy *out)
 {
        const struct submodule *sub = submodule_from_path(r, null_oid(), path);
        char *key;
        const char *val;
+       int ret;
 
        key = xstrfmt("submodule.%s.update", sub->name);
 
        if (update) {
-               if (parse_submodule_update_strategy(update, out) < 0)
-                       die(_("Invalid update mode '%s' for submodule path '%s'"),
-                               update, path);
+               out->type = update;
        } else if (!repo_config_get_string_tmp(r, key, &val)) {
-               if (parse_submodule_update_strategy(val, out) < 0)
-                       die(_("Invalid update mode '%s' configured for submodule path '%s'"),
-                               val, path);
+               if (parse_submodule_update_strategy(val, out) < 0) {
+                       ret = die_message(_("Invalid update mode '%s' configured for submodule path '%s'"),
+                                         val, path);
+                       goto cleanup;
+               }
        } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
                if (sub->update_strategy.type == SM_UPDATE_COMMAND)
                        BUG("how did we read update = !command from .gitmodules?");
@@ -1849,7 +1841,10 @@ static void determine_submodule_update_strategy(struct repository *r,
             out->type == SM_UPDATE_NONE))
                out->type = SM_UPDATE_CHECKOUT;
 
+       ret = 0;
+cleanup:
        free(key);
+       return ret;
 }
 
 struct update_clone_data {
@@ -1863,7 +1858,7 @@ struct submodule_update_clone {
        int current;
 
        /* configuration parameters which are passed on to the children */
-       struct update_data *update_data;
+       const struct update_data *update_data;
 
        /* to be consumed by update_submodule() */
        struct update_clone_data *update_clone;
@@ -1878,11 +1873,16 @@ struct submodule_update_clone {
 };
 #define SUBMODULE_UPDATE_CLONE_INIT { 0 }
 
+static void submodule_update_clone_release(struct submodule_update_clone *suc)
+{
+       free(suc->update_clone);
+       free(suc->failed_clones);
+}
+
 struct update_data {
        const char *prefix;
-       const char *recursive_prefix;
-       const char *displaypath;
-       const char *update_default;
+       char *displaypath;
+       enum submodule_update_type update_default;
        struct object_id suboid;
        struct string_list references;
        struct submodule_update_strategy update_strategy;
@@ -1917,6 +1917,12 @@ struct update_data {
        .max_jobs = 1, \
 }
 
+static void update_data_release(struct update_data *ud)
+{
+       free(ud->displaypath);
+       module_list_release(&ud->list);
+}
+
 static void next_submodule_warn_missing(struct submodule_update_clone *suc,
                struct strbuf *out, const char *displaypath)
 {
@@ -1949,30 +1955,20 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
        const char *update_string;
        enum submodule_update_type update_type;
        char *key;
-       struct strbuf displaypath_sb = STRBUF_INIT;
+       const struct update_data *ud = suc->update_data;
+       char *displaypath = get_submodule_displaypath(ce->name, ud->prefix);
        struct strbuf sb = STRBUF_INIT;
-       const char *displaypath = NULL;
        int needs_cloning = 0;
        int need_free_url = 0;
 
        if (ce_stage(ce)) {
-               if (suc->update_data->recursive_prefix)
-                       strbuf_addf(&sb, "%s/%s", suc->update_data->recursive_prefix, ce->name);
-               else
-                       strbuf_addstr(&sb, ce->name);
-               strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf);
+               strbuf_addf(out, _("Skipping unmerged submodule %s"), displaypath);
                strbuf_addch(out, '\n');
                goto cleanup;
        }
 
        sub = submodule_from_path(the_repository, null_oid(), ce->name);
 
-       if (suc->update_data->recursive_prefix)
-               displaypath = relative_path(suc->update_data->recursive_prefix,
-                                           ce->name, &displaypath_sb);
-       else
-               displaypath = ce->name;
-
        if (!sub) {
                next_submodule_warn_missing(suc, out, displaypath);
                goto cleanup;
@@ -2051,6 +2047,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
        strvec_pushl(&child->args, "--url", url, NULL);
        if (suc->update_data->references.nr) {
                struct string_list_item *item;
+
                for_each_string_list_item(item, &suc->update_data->references)
                        strvec_pushl(&child->args, "--reference", item->string, NULL);
        }
@@ -2062,7 +2059,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
                                              "--no-single-branch");
 
 cleanup:
-       strbuf_release(&displaypath_sb);
+       free(displaypath);
        strbuf_release(&sb);
        if (need_free_url)
                free((void*)url);
@@ -2083,6 +2080,7 @@ static int update_clone_get_next_task(struct child_process *child,
                ce = suc->update_data->list.entries[suc->current];
                if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
                        int *p = xmalloc(sizeof(*p));
+
                        *p = suc->current;
                        *idx_task_cb = p;
                        suc->current++;
@@ -2098,6 +2096,7 @@ static int update_clone_get_next_task(struct child_process *child,
        index = suc->current - suc->update_data->list.nr;
        if (index < suc->failed_clones_nr) {
                int *p;
+
                ce = suc->failed_clones[index];
                if (!prepare_to_clone_next_submodule(ce, child, suc, err)) {
                        suc->current ++;
@@ -2121,6 +2120,7 @@ static int update_clone_start_failure(struct strbuf *err,
                                      void *idx_task_cb)
 {
        struct submodule_update_clone *suc = suc_cb;
+
        suc->quickstop = 1;
        return 1;
 }
@@ -2132,9 +2132,9 @@ static int update_clone_task_finished(int result,
 {
        const struct cache_entry *ce;
        struct submodule_update_clone *suc = suc_cb;
-
        int *idxP = idx_task_cb;
        int idx = *idxP;
+
        free(idxP);
 
        if (!result)
@@ -2167,19 +2167,20 @@ static int git_update_clone_config(const char *var, const char *value,
                                   void *cb)
 {
        int *max_jobs = cb;
+
        if (!strcmp(var, "submodule.fetchjobs"))
                *max_jobs = parse_submodule_fetchjobs(var, value);
        return 0;
 }
 
-static int is_tip_reachable(const char *path, struct object_id *oid)
+static int is_tip_reachable(const char *path, const struct object_id *oid)
 {
        struct child_process cp = CHILD_PROCESS_INIT;
        struct strbuf rev = STRBUF_INIT;
        char *hex = oid_to_hex(oid);
 
        cp.git_cmd = 1;
-       cp.dir = xstrdup(path);
+       cp.dir = path;
        cp.no_stderr = 1;
        strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL);
 
@@ -2191,13 +2192,14 @@ static int is_tip_reachable(const char *path, struct object_id *oid)
        return 1;
 }
 
-static int fetch_in_submodule(const char *module_path, int depth, int quiet, struct object_id *oid)
+static int fetch_in_submodule(const char *module_path, int depth, int quiet,
+                             const struct object_id *oid)
 {
        struct child_process cp = CHILD_PROCESS_INIT;
 
        prepare_submodule_repo_env(&cp.env);
        cp.git_cmd = 1;
-       cp.dir = xstrdup(module_path);
+       cp.dir = module_path;
 
        strvec_push(&cp.args, "fetch");
        if (quiet)
@@ -2207,6 +2209,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
        if (oid) {
                char *hex = oid_to_hex(oid);
                char *remote = get_default_remote();
+
                strvec_pushl(&cp.args, remote, hex, NULL);
                free(remote);
        }
@@ -2214,11 +2217,11 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
        return run_command(&cp);
 }
 
-static int run_update_command(struct update_data *ud, int subforce)
+static int run_update_command(const struct update_data *ud, int subforce)
 {
        struct child_process cp = CHILD_PROCESS_INIT;
        char *oid = oid_to_hex(&ud->oid);
-       int must_die_on_failure = 0;
+       int ret;
 
        switch (ud->update_strategy.type) {
        case SM_UPDATE_CHECKOUT:
@@ -2232,55 +2235,50 @@ static int run_update_command(struct update_data *ud, int subforce)
                strvec_push(&cp.args, "rebase");
                if (ud->quiet)
                        strvec_push(&cp.args, "--quiet");
-               must_die_on_failure = 1;
                break;
        case SM_UPDATE_MERGE:
                cp.git_cmd = 1;
                strvec_push(&cp.args, "merge");
                if (ud->quiet)
                        strvec_push(&cp.args, "--quiet");
-               must_die_on_failure = 1;
                break;
        case SM_UPDATE_COMMAND:
                cp.use_shell = 1;
                strvec_push(&cp.args, ud->update_strategy.command);
-               must_die_on_failure = 1;
                break;
        default:
-               BUG("unexpected update strategy type: %s",
-                   submodule_strategy_to_string(&ud->update_strategy));
+               BUG("unexpected update strategy type: %d",
+                   ud->update_strategy.type);
        }
        strvec_push(&cp.args, oid);
 
-       cp.dir = xstrdup(ud->sm_path);
+       cp.dir = ud->sm_path;
        prepare_submodule_repo_env(&cp.env);
-       if (run_command(&cp)) {
+       if ((ret = run_command(&cp))) {
                switch (ud->update_strategy.type) {
                case SM_UPDATE_CHECKOUT:
                        die_message(_("Unable to checkout '%s' in submodule path '%s'"),
                                    oid, ud->displaypath);
+                       /* No "ret" assignment, use "git checkout"'s */
                        break;
                case SM_UPDATE_REBASE:
-                       die_message(_("Unable to rebase '%s' in submodule path '%s'"),
-                           oid, ud->displaypath);
+                       ret = die_message(_("Unable to rebase '%s' in submodule path '%s'"),
+                                         oid, ud->displaypath);
                        break;
                case SM_UPDATE_MERGE:
-                       die_message(_("Unable to merge '%s' in submodule path '%s'"),
-                           oid, ud->displaypath);
+                       ret = die_message(_("Unable to merge '%s' in submodule path '%s'"),
+                                         oid, ud->displaypath);
                        break;
                case SM_UPDATE_COMMAND:
-                       die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
-                           ud->update_strategy.command, oid, ud->displaypath);
+                       ret = die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
+                                         ud->update_strategy.command, oid, ud->displaypath);
                        break;
                default:
-                       BUG("unexpected update strategy type: %s",
-                           submodule_strategy_to_string(&ud->update_strategy));
+                       BUG("unexpected update strategy type: %d",
+                           ud->update_strategy.type);
                }
-               if (must_die_on_failure)
-                       exit(128);
 
-               /* the command failed, but update must continue */
-               return 1;
+               return ret;
        }
 
        if (ud->quiet)
@@ -2304,14 +2302,14 @@ static int run_update_command(struct update_data *ud, int subforce)
                       ud->displaypath, ud->update_strategy.command, oid);
                break;
        default:
-               BUG("unexpected update strategy type: %s",
-                   submodule_strategy_to_string(&ud->update_strategy));
+               BUG("unexpected update strategy type: %d",
+                   ud->update_strategy.type);
        }
 
        return 0;
 }
 
-static int run_update_procedure(struct update_data *ud)
+static int run_update_procedure(const struct update_data *ud)
 {
        int subforce = is_null_oid(&ud->suboid) || ud->force;
 
@@ -2333,59 +2331,67 @@ static int run_update_procedure(struct update_data *ud)
                 */
                if (!is_tip_reachable(ud->sm_path, &ud->oid) &&
                    fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid))
-                       die(_("Fetched in submodule path '%s', but it did not "
-                             "contain %s. Direct fetching of that commit failed."),
-                           ud->displaypath, oid_to_hex(&ud->oid));
+                       return die_message(_("Fetched in submodule path '%s', but it did not "
+                                            "contain %s. Direct fetching of that commit failed."),
+                                          ud->displaypath, oid_to_hex(&ud->oid));
        }
 
        return run_update_command(ud, subforce);
 }
 
-static const char *remote_submodule_branch(const char *path)
+static int remote_submodule_branch(const char *path, const char **branch)
 {
        const struct submodule *sub;
-       const char *branch = NULL;
        char *key;
+       *branch = NULL;
 
        sub = submodule_from_path(the_repository, null_oid(), path);
        if (!sub)
-               return NULL;
+               return die_message(_("could not initialize submodule at path '%s'"),
+                                  path);
 
        key = xstrfmt("submodule.%s.branch", sub->name);
-       if (repo_config_get_string_tmp(the_repository, key, &branch))
-               branch = sub->branch;
+       if (repo_config_get_string_tmp(the_repository, key, branch))
+               *branch = sub->branch;
        free(key);
 
-       if (!branch)
-               return "HEAD";
+       if (!*branch) {
+               *branch = "HEAD";
+               return 0;
+       }
 
-       if (!strcmp(branch, ".")) {
+       if (!strcmp(*branch, ".")) {
                const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
 
                if (!refname)
-                       die(_("No such ref: %s"), "HEAD");
+                       return die_message(_("No such ref: %s"), "HEAD");
 
                /* detached HEAD */
                if (!strcmp(refname, "HEAD"))
-                       die(_("Submodule (%s) branch configured to inherit "
-                             "branch from superproject, but the superproject "
-                             "is not on any branch"), sub->name);
+                       return die_message(_("Submodule (%s) branch configured to inherit "
+                                            "branch from superproject, but the superproject "
+                                            "is not on any branch"), sub->name);
 
                if (!skip_prefix(refname, "refs/heads/", &refname))
-                       die(_("Expecting a full ref name, got %s"), refname);
-               return refname;
+                       return die_message(_("Expecting a full ref name, got %s"),
+                                          refname);
+
+               *branch = refname;
+               return 0;
        }
 
-       return branch;
+       /* Our "branch" is coming from repo_config_get_string_tmp() */
+       return 0;
 }
 
-static void ensure_core_worktree(const char *path)
+static int ensure_core_worktree(const char *path)
 {
        const char *cw;
        struct repository subrepo;
 
        if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
-               die(_("could not get a repository handle for submodule '%s'"), path);
+               return die_message(_("could not get a repository handle for submodule '%s'"),
+                                  path);
 
        if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
                char *cfg_file, *abs_path;
@@ -2403,15 +2409,39 @@ static void ensure_core_worktree(const char *path)
                free(abs_path);
                strbuf_release(&sb);
        }
+
+       repo_clear(&subrepo);
+       return 0;
+}
+
+static const char *submodule_update_type_to_label(enum submodule_update_type type)
+{
+       switch (type) {
+       case SM_UPDATE_CHECKOUT:
+               return "checkout";
+       case SM_UPDATE_MERGE:
+               return "merge";
+       case SM_UPDATE_REBASE:
+               return "rebase";
+       case SM_UPDATE_UNSPECIFIED:
+       case SM_UPDATE_NONE:
+       case SM_UPDATE_COMMAND:
+               break;
+       }
+       BUG("unreachable with type %d", type);
 }
 
-static void update_data_to_args(struct update_data *update_data, struct strvec *args)
+static void update_data_to_args(const struct update_data *update_data,
+                               struct strvec *args)
 {
+       enum submodule_update_type update_type = update_data->update_default;
+
+       if (update_data->displaypath) {
+               strvec_push(args, "--super-prefix");
+               strvec_pushf(args, "%s/", update_data->displaypath);
+       }
        strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL);
        strvec_pushf(args, "--jobs=%d", update_data->max_jobs);
-       if (update_data->recursive_prefix)
-               strvec_pushl(args, "--recursive-prefix",
-                            update_data->recursive_prefix, NULL);
        if (update_data->quiet)
                strvec_push(args, "--quiet");
        if (update_data->force)
@@ -2430,10 +2460,13 @@ static void update_data_to_args(struct update_data *update_data, struct strvec *
                strvec_push(args, "--require-init");
        if (update_data->depth)
                strvec_pushf(args, "--depth=%d", update_data->depth);
-       if (update_data->update_default)
-               strvec_pushl(args, "--update", update_data->update_default, NULL);
+       if (update_type != SM_UPDATE_UNSPECIFIED)
+               strvec_pushf(args, "--%s",
+                            submodule_update_type_to_label(update_type));
+
        if (update_data->references.nr) {
                struct string_list_item *item;
+
                for_each_string_list_item(item, &update_data->references)
                        strvec_pushl(args, "--reference", item->string, NULL);
        }
@@ -2453,66 +2486,62 @@ static void update_data_to_args(struct update_data *update_data, struct strvec *
 
 static int update_submodule(struct update_data *update_data)
 {
-       char *prefixed_path;
-
-       ensure_core_worktree(update_data->sm_path);
-
-       if (update_data->recursive_prefix)
-               prefixed_path = xstrfmt("%s%s", update_data->recursive_prefix,
-                                       update_data->sm_path);
-       else
-               prefixed_path = xstrdup(update_data->sm_path);
-
-       update_data->displaypath = get_submodule_displaypath(prefixed_path,
-                                                            update_data->prefix);
-       free(prefixed_path);
+       int ret;
 
-       determine_submodule_update_strategy(the_repository, update_data->just_cloned,
-                                           update_data->sm_path, update_data->update_default,
-                                           &update_data->update_strategy);
+       ret = determine_submodule_update_strategy(the_repository,
+                                                 update_data->just_cloned,
+                                                 update_data->sm_path,
+                                                 update_data->update_default,
+                                                 &update_data->update_strategy);
+       if (ret)
+               return ret;
 
        if (update_data->just_cloned)
                oidcpy(&update_data->suboid, null_oid());
        else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
-               die(_("Unable to find current revision in submodule path '%s'"),
-                       update_data->displaypath);
+               return die_message(_("Unable to find current revision in submodule path '%s'"),
+                                  update_data->displaypath);
 
        if (update_data->remote) {
-               char *remote_name = get_default_remote_submodule(update_data->sm_path);
-               const char *branch = remote_submodule_branch(update_data->sm_path);
-               char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+               char *remote_name;
+               const char *branch;
+               char *remote_ref;
+               int code;
+
+               code = get_default_remote_submodule(update_data->sm_path, &remote_name);
+               if (code)
+                       return code;
+               code = remote_submodule_branch(update_data->sm_path, &branch);
+               if (code)
+                       return code;
+               remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+
+               free(remote_name);
 
                if (!update_data->nofetch) {
                        if (fetch_in_submodule(update_data->sm_path, update_data->depth,
                                              0, NULL))
-                               die(_("Unable to fetch in submodule path '%s'"),
-                                   update_data->sm_path);
+                               return die_message(_("Unable to fetch in submodule path '%s'"),
+                                                  update_data->sm_path);
                }
 
                if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
-                       die(_("Unable to find %s revision in submodule path '%s'"),
-                           remote_ref, update_data->sm_path);
+                       return die_message(_("Unable to find %s revision in submodule path '%s'"),
+                                          remote_ref, update_data->sm_path);
 
                free(remote_ref);
        }
 
-       if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force)
-               if (run_update_procedure(update_data))
-                       return 1;
+       if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) {
+               ret = run_update_procedure(update_data);
+               if (ret)
+                       return ret;
+       }
 
        if (update_data->recursive) {
                struct child_process cp = CHILD_PROCESS_INIT;
                struct update_data next = *update_data;
-               int res;
 
-               if (update_data->recursive_prefix)
-                       prefixed_path = xstrfmt("%s%s/", update_data->recursive_prefix,
-                                               update_data->sm_path);
-               else
-                       prefixed_path = xstrfmt("%s/", update_data->sm_path);
-
-               next.recursive_prefix = get_submodule_displaypath(prefixed_path,
-                                                                 update_data->prefix);
                next.prefix = NULL;
                oidcpy(&next.oid, null_oid());
                oidcpy(&next.suboid, null_oid());
@@ -2522,16 +2551,11 @@ static int update_submodule(struct update_data *update_data)
                prepare_submodule_repo_env(&cp.env);
                update_data_to_args(&next, &cp.args);
 
-               /* die() if child process die()'d */
-               res = run_command(&cp);
-               if (!res)
-                       return 0;
-               die_message(_("Failed to recurse into submodule path '%s'"),
-                           update_data->displaypath);
-               if (res == 128)
-                       exit(res);
-               else if (res)
-                       return 1;
+               ret = run_command(&cp);
+               if (ret)
+                       die_message(_("Failed to recurse into submodule path '%s'"),
+                                   update_data->displaypath);
+               return ret;
        }
 
        return 0;
@@ -2539,7 +2563,7 @@ static int update_submodule(struct update_data *update_data)
 
 static int update_submodules(struct update_data *update_data)
 {
-       int i, res = 0;
+       int i, ret = 0;
        struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
 
        suc.update_data = update_data;
@@ -2557,33 +2581,47 @@ static int update_submodules(struct update_data *update_data)
         * - the listener can avoid doing any work if fetching failed.
         */
        if (suc.quickstop) {
-               res = 1;
+               ret = 1;
                goto cleanup;
        }
 
        for (i = 0; i < suc.update_clone_nr; i++) {
                struct update_clone_data ucd = suc.update_clone[i];
+               int code;
 
                oidcpy(&update_data->oid, &ucd.oid);
                update_data->just_cloned = ucd.just_cloned;
                update_data->sm_path = ucd.sub->path;
 
-               if (update_submodule(update_data))
-                       res = 1;
+               code = ensure_core_worktree(update_data->sm_path);
+               if (code)
+                       goto fail;
+
+               update_data->displaypath = get_submodule_displaypath(
+                       update_data->sm_path, update_data->prefix);
+               code = update_submodule(update_data);
+               FREE_AND_NULL(update_data->displaypath);
+fail:
+               if (!code)
+                       continue;
+               ret = code;
+               if (ret == 128)
+                       goto cleanup;
        }
 
 cleanup:
+       submodule_update_clone_release(&suc);
        string_list_clear(&update_data->references, 0);
-       return res;
+       return ret;
 }
 
 static int module_update(int argc, const char **argv, const char *prefix)
 {
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
+       struct pathspec pathspec2 = { 0 };
        struct update_data opt = UPDATE_DATA_INIT;
-       struct list_objects_filter_options filter_options;
+       struct list_objects_filter_options filter_options = { 0 };
        int ret;
-
        struct option module_update_options[] = {
                OPT__FORCE(&opt.force, N_("force checkout updates"), 0),
                OPT_BOOL(0, "init", &opt.init,
@@ -2597,13 +2635,15 @@ static int module_update(int argc, const char **argv, const char *prefix)
                OPT_STRING(0, "prefix", &opt.prefix,
                           N_("path"),
                           N_("path into the working tree")),
-               OPT_STRING(0, "recursive-prefix", &opt.recursive_prefix,
-                          N_("path"),
-                          N_("path into the working tree, across nested "
-                             "submodule boundaries")),
-               OPT_STRING(0, "update", &opt.update_default,
-                          N_("string"),
-                          N_("rebase, merge, checkout or none")),
+               OPT_SET_INT(0, "checkout", &opt.update_default,
+                       N_("use the 'checkout' update strategy (default)"),
+                       SM_UPDATE_CHECKOUT),
+               OPT_SET_INT('m', "merge", &opt.update_default,
+                       N_("use the 'merge' update strategy"),
+                       SM_UPDATE_MERGE),
+               OPT_SET_INT('r', "rebase", &opt.update_default,
+                       N_("use the 'rebase' update strategy"),
+                       SM_UPDATE_REBASE),
                OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"),
                           N_("reference repository")),
                OPT_BOOL(0, "dissociate", &opt.dissociate,
@@ -2619,13 +2659,12 @@ static int module_update(int argc, const char **argv, const char *prefix)
                OPT_BOOL(0, "progress", &opt.progress,
                            N_("force cloning progress")),
                OPT_BOOL(0, "require-init", &opt.require_init,
-                          N_("disallow cloning into non-empty directory")),
+                          N_("disallow cloning into non-empty directory, implies --init")),
                OPT_BOOL(0, "single-branch", &opt.single_branch,
                         N_("clone only one branch, HEAD or --branch")),
                OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
                N_("git submodule [--quiet] update"
                " [--init [--filter=<filter-spec>]] [--remote]"
@@ -2639,10 +2678,12 @@ static int module_update(int argc, const char **argv, const char *prefix)
        update_clone_config_from_gitmodules(&opt.max_jobs);
        git_config(git_update_clone_config, &opt.max_jobs);
 
-       memset(&filter_options, 0, sizeof(filter_options));
        argc = parse_options(argc, argv, prefix, module_update_options,
                             git_submodule_helper_usage, 0);
 
+       if (opt.require_init)
+               opt.init = 1;
+
        if (filter_options.choice && !opt.init) {
                usage_with_options(git_submodule_helper_usage,
                                   module_update_options);
@@ -2651,13 +2692,11 @@ static int module_update(int argc, const char **argv, const char *prefix)
        opt.filter_options = &filter_options;
 
        if (opt.update_default)
-               if (parse_submodule_update_strategy(opt.update_default,
-                                                   &opt.update_strategy) < 0)
-                       die(_("bad value for update parameter"));
+               opt.update_strategy.type = opt.update_default;
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) {
-               list_objects_filter_release(&filter_options);
-               return 1;
+               ret = 1;
+               goto cleanup;
        }
 
        if (pathspec.nr)
@@ -2668,8 +2707,11 @@ static int module_update(int argc, const char **argv, const char *prefix)
                struct init_cb info = INIT_CB_INIT;
 
                if (module_list_compute(argc, argv, opt.prefix,
-                                       &pathspec, &list) < 0)
-                       return 1;
+                                       &pathspec2, &list) < 0) {
+                       module_list_release(&list);
+                       ret = 1;
+                       goto cleanup;
+               }
 
                /*
                 * If there are no path args and submodule.active is set then,
@@ -2679,15 +2721,19 @@ static int module_update(int argc, const char **argv, const char *prefix)
                        module_list_active(&list);
 
                info.prefix = opt.prefix;
-               info.superprefix = opt.recursive_prefix;
                if (opt.quiet)
                        info.flags |= OPT_QUIET;
 
                for_each_listed_submodule(&list, init_submodule_cb, &info);
+               module_list_release(&list);
        }
 
        ret = update_submodules(&opt);
+cleanup:
+       update_data_release(&opt);
        list_objects_filter_release(&filter_options);
+       clear_pathspec(&pathspec);
+       clear_pathspec(&pathspec2);
        return ret;
 }
 
@@ -2771,10 +2817,9 @@ static int push_check(int argc, const char **argv, const char *prefix)
 static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
 {
        int i;
-       struct pathspec pathspec;
+       struct pathspec pathspec = { 0 };
        struct module_list list = MODULE_LIST_INIT;
        unsigned flags = ABSORB_GITDIR_RECURSE_SUBMODULES;
-
        struct option embed_gitdir_options[] = {
                OPT_STRING(0, "prefix", &prefix,
                           N_("path"),
@@ -2783,53 +2828,26 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
                        ABSORB_GITDIR_RECURSE_SUBMODULES),
                OPT_END()
        };
-
        const char *const git_submodule_helper_usage[] = {
-               N_("git submodule--helper absorb-git-dirs [<options>] [<path>...]"),
+               N_("git submodule absorbgitdirs [<options>] [<path>...]"),
                NULL
        };
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, embed_gitdir_options,
                             git_submodule_helper_usage, 0);
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               return 1;
+               goto cleanup;
 
        for (i = 0; i < list.nr; i++)
                absorb_git_dir_into_superproject(list.entries[i]->name, flags);
 
-       return 0;
-}
-
-static int is_active(int argc, const char **argv, const char *prefix)
-{
-       if (argc != 2)
-               die("submodule--helper is-active takes exactly 1 argument");
-
-       return !is_submodule_active(the_repository, argv[1]);
-}
-
-/*
- * Exit non-zero if any of the submodule names given on the command line is
- * invalid. If no names are given, filter stdin to print only valid names
- * (which is primarily intended for testing).
- */
-static int check_name(int argc, const char **argv, const char *prefix)
-{
-       if (argc > 1) {
-               while (*++argv) {
-                       if (check_submodule_name(*argv) < 0)
-                               return 1;
-               }
-       } else {
-               struct strbuf buf = STRBUF_INIT;
-               while (strbuf_getline(&buf, stdin) != EOF) {
-                       if (!check_submodule_name(buf.buf))
-                               printf("%s\n", buf.buf);
-               }
-               strbuf_release(&buf);
-       }
-       return 0;
+       ret = 0;
+cleanup:
+       clear_pathspec(&pathspec);
+       module_list_release(&list);
+       return ret;
 }
 
 static int module_config(int argc, const char **argv, const char *prefix)
@@ -2838,7 +2856,6 @@ static int module_config(int argc, const char **argv, const char *prefix)
                CHECK_WRITEABLE = 1,
                DO_UNSET = 2
        } command = 0;
-
        struct option module_config_options[] = {
                OPT_CMDMODE(0, "check-writeable", &command,
                            N_("check if it is safe to write to the .gitmodules file"),
@@ -2884,13 +2901,12 @@ static int module_set_url(int argc, const char **argv, const char *prefix)
        const char *newurl;
        const char *path;
        char *config_name;
-
        struct option options[] = {
                OPT__QUIET(&quiet, N_("suppress output for setting url of a submodule")),
                OPT_END()
        };
        const char *const usage[] = {
-               N_("git submodule--helper set-url [--quiet] <path> <newurl>"),
+               N_("git submodule set-url [--quiet] <path> <newurl>"),
                NULL
        };
 
@@ -2915,13 +2931,13 @@ static int module_set_branch(int argc, const char **argv, const char *prefix)
        const char *opt_branch = NULL;
        const char *path;
        char *config_name;
-
-       /*
-        * We accept the `quiet` option for uniformity across subcommands,
-        * though there is nothing to make less verbose in this subcommand.
-        */
        struct option options[] = {
+               /*
+                * We accept the `quiet` option for uniformity across subcommands,
+                * though there is nothing to make less verbose in this subcommand.
+                */
                OPT_NOOP_NOARG('q', "quiet"),
+
                OPT_BOOL('d', "default", &opt_default,
                        N_("set the default tracking branch to master")),
                OPT_STRING('b', "branch", &opt_branch, N_("branch"),
@@ -2929,8 +2945,8 @@ static int module_set_branch(int argc, const char **argv, const char *prefix)
                OPT_END()
        };
        const char *const usage[] = {
-               N_("git submodule--helper set-branch [-q|--quiet] (-d|--default) <path>"),
-               N_("git submodule--helper set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
+               N_("git submodule set-branch [-q|--quiet] (-d|--default) <path>"),
+               N_("git submodule set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
                NULL
        };
 
@@ -2956,7 +2972,6 @@ static int module_create_branch(int argc, const char **argv, const char *prefix)
 {
        enum branch_track track;
        int quiet = 0, force = 0, reflog = 0, dry_run = 0;
-
        struct option options[] = {
                OPT__QUIET(&quiet, N_("print only error messages")),
                OPT__FORCE(&force, N_("force creation"), 0),
@@ -3019,8 +3034,10 @@ static void append_fetch_remotes(struct strbuf *msg, const char *git_dir_path)
        if (!capture_command(&cp_remote, &sb_remote_out, 0)) {
                char *next_line;
                char *line = sb_remote_out.buf;
+
                while ((next_line = strchr(line, '\n')) != NULL) {
                        size_t len = next_line - line;
+
                        if (strip_suffix_mem(line, &len, " (fetch)"))
                                strbuf_addf(msg, "  %.*s\n", (int)len, line);
                        line = next_line + 1;
@@ -3034,6 +3051,8 @@ static int add_submodule(const struct add_data *add_data)
 {
        char *submod_gitdir_path;
        struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT;
+       struct string_list reference = STRING_LIST_INIT_NODUP;
+       int ret = -1;
 
        /* perhaps the path already exists and is already a git repo, else clone it */
        if (is_directory(add_data->sm_path)) {
@@ -3050,6 +3069,7 @@ static int add_submodule(const struct add_data *add_data)
                free(submod_gitdir_path);
        } else {
                struct child_process cp = CHILD_PROCESS_INIT;
+
                submod_gitdir_path = xstrfmt(".git/modules/%s", add_data->sm_name);
 
                if (is_directory(submod_gitdir_path)) {
@@ -3088,15 +3108,17 @@ static int add_submodule(const struct add_data *add_data)
                clone_data.url = add_data->realrepo;
                clone_data.quiet = add_data->quiet;
                clone_data.progress = add_data->progress;
-               if (add_data->reference_path)
-                       string_list_append(&clone_data.reference,
-                                          xstrdup(add_data->reference_path));
+               if (add_data->reference_path) {
+                       char *p = xstrdup(add_data->reference_path);
+
+                       string_list_append(&reference, p)->util = p;
+               }
                clone_data.dissociate = add_data->dissociate;
                if (add_data->depth >= 0)
                        clone_data.depth = xstrfmt("%d", add_data->depth);
 
-               if (clone_submodule(&clone_data))
-                       return -1;
+               if (clone_submodule(&clone_data, &reference))
+                       goto cleanup;
 
                prepare_submodule_repo_env(&cp.env);
                cp.git_cmd = 1;
@@ -3115,7 +3137,10 @@ static int add_submodule(const struct add_data *add_data)
                if (run_command(&cp))
                        die(_("unable to checkout submodule '%s'"), add_data->sm_path);
        }
-       return 0;
+       ret = 0;
+cleanup:
+       string_list_clear(&reference, 1);
+       return ret;
 }
 
 static int config_submodule_in_gitmodules(const char *name, const char *var, const char *value)
@@ -3136,7 +3161,7 @@ static int config_submodule_in_gitmodules(const char *name, const char *var, con
 static void configure_added_submodule(struct add_data *add_data)
 {
        char *key;
-       char *val = NULL;
+       const char *val;
        struct child_process add_submod = CHILD_PROCESS_INIT;
        struct child_process add_gitmodules = CHILD_PROCESS_INIT;
 
@@ -3181,7 +3206,7 @@ static void configure_added_submodule(struct add_data *add_data)
         * is_submodule_active(), since that function needs to find
         * out the value of "submodule.active" again anyway.
         */
-       if (!git_config_get_string("submodule.active", &val) && val) {
+       if (!git_config_get_string_tmp("submodule.active", &val)) {
                /*
                 * If the submodule being added isn't already covered by the
                 * current configured pathspec, set the submodule's active flag
@@ -3255,7 +3280,6 @@ static int module_add(int argc, const char **argv, const char *prefix)
        int force = 0, quiet = 0, progress = 0, dissociate = 0;
        struct add_data add_data = ADD_DATA_INIT;
        char *to_free = NULL;
-
        struct option options[] = {
                OPT_STRING('b', "branch", &add_data.branch, N_("branch"),
                           N_("branch of repository to add as submodule")),
@@ -3272,11 +3296,12 @@ static int module_add(int argc, const char **argv, const char *prefix)
                OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")),
                OPT_END()
        };
-
        const char *const usage[] = {
-               N_("git submodule--helper add [<options>] [--] <repository> [<path>]"),
+               N_("git submodule add [<options>] [--] <repository> [<path>]"),
                NULL
        };
+       struct strbuf sb = STRBUF_INIT;
+       int ret = 1;
 
        argc = parse_options(argc, argv, prefix, options, usage, 0);
 
@@ -3296,8 +3321,12 @@ static int module_add(int argc, const char **argv, const char *prefix)
        else
                add_data.sm_path = xstrdup(argv[1]);
 
-       if (prefix && *prefix && !is_absolute_path(add_data.sm_path))
-               add_data.sm_path = xstrfmt("%s%s", prefix, add_data.sm_path);
+       if (prefix && *prefix && !is_absolute_path(add_data.sm_path)) {
+               char *sm_path = add_data.sm_path;
+
+               add_data.sm_path = xstrfmt("%s%s", prefix, sm_path);
+               free(sm_path);
+       }
 
        if (starts_with_dot_dot_slash(add_data.repo) ||
            starts_with_dot_slash(add_data.repo)) {
@@ -3326,20 +3355,17 @@ static int module_add(int argc, const char **argv, const char *prefix)
        die_on_repo_without_commits(add_data.sm_path);
 
        if (!force) {
-               int exit_code = -1;
-               struct strbuf sb = STRBUF_INIT;
                struct child_process cp = CHILD_PROCESS_INIT;
+
                cp.git_cmd = 1;
                cp.no_stdout = 1;
                strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing",
                             "--no-warn-embedded-repo", add_data.sm_path, NULL);
-               if ((exit_code = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) {
+               if ((ret = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) {
                        strbuf_complete_line(&sb);
                        fputs(sb.buf, stderr);
-                       free(add_data.sm_path);
-                       return exit_code;
+                       goto cleanup;
                }
-               strbuf_release(&sb);
        }
 
        if(!add_data.sm_name)
@@ -3354,15 +3380,17 @@ static int module_add(int argc, const char **argv, const char *prefix)
        add_data.progress = !!progress;
        add_data.dissociate = !!dissociate;
 
-       if (add_submodule(&add_data)) {
-               free(add_data.sm_path);
-               return 1;
-       }
+       if (add_submodule(&add_data))
+               goto cleanup;
        configure_added_submodule(&add_data);
+
+       ret = 0;
+cleanup:
        free(add_data.sm_path);
        free(to_free);
+       strbuf_release(&sb);
 
-       return 0;
+       return ret;
 }
 
 #define SUPPORT_SUPER_PREFIX (1<<0)
@@ -3374,22 +3402,17 @@ struct cmd_struct {
 };
 
 static struct cmd_struct commands[] = {
-       {"list", module_list, 0},
-       {"name", module_name, 0},
-       {"clone", module_clone, 0},
-       {"add", module_add, SUPPORT_SUPER_PREFIX},
-       {"update", module_update, 0},
-       {"resolve-relative-url-test", resolve_relative_url_test, 0},
+       {"clone", module_clone, SUPPORT_SUPER_PREFIX},
+       {"add", module_add, 0},
+       {"update", module_update, SUPPORT_SUPER_PREFIX},
        {"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
-       {"init", module_init, SUPPORT_SUPER_PREFIX},
+       {"init", module_init, 0},
        {"status", module_status, SUPPORT_SUPER_PREFIX},
        {"sync", module_sync, SUPPORT_SUPER_PREFIX},
        {"deinit", module_deinit, 0},
-       {"summary", module_summary, SUPPORT_SUPER_PREFIX},
+       {"summary", module_summary, 0},
        {"push-check", push_check, 0},
-       {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
-       {"is-active", is_active, 0},
-       {"check-name", check_name, 0},
+       {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
        {"config", module_config, 0},
        {"set-url", module_set_url, 0},
        {"set-branch", module_set_branch, 0},
index e547a08d6c7ce517a5a26598ade75356da93cdab..1b0f10225f0c2630fab0f67534e7135b30571c66 100644 (file)
@@ -71,6 +71,8 @@ int cmd_symbolic_ref(int argc, const char **argv, const char *prefix)
                if (!strcmp(argv[0], "HEAD") &&
                    !starts_with(argv[1], "refs/"))
                        die("Refusing to point HEAD outside of refs/");
+               if (check_refname_format(argv[1], REFNAME_ALLOW_ONELEVEL) < 0)
+                       die("Refusing to set '%s' to invalid ref '%s'", argv[0], argv[1]);
                ret = !!create_symref(argv[0], argv[1], msg);
                break;
        default:
index 56d05e2725db45f1fc2ffba48fc4930dddd13ed7..43789b8ef294d8aa3fc9517f65712ac90e1fa60c 100644 (file)
@@ -97,15 +97,27 @@ static void use(int bytes)
        display_throughput(progress, consumed_bytes);
 }
 
+/*
+ * Decompress zstream from the standard input into a newly
+ * allocated buffer of specified size and return the buffer.
+ * The caller is responsible to free the returned buffer.
+ *
+ * But for dry_run mode, "get_data()" is only used to check the
+ * integrity of data, and the returned buffer is not used at all.
+ * Therefore, in dry_run mode, "get_data()" will release the small
+ * allocated buffer which is reused to hold temporary zstream output
+ * and return NULL instead of returning garbage data.
+ */
 static void *get_data(unsigned long size)
 {
        git_zstream stream;
-       void *buf = xmallocz(size);
+       unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
+       void *buf = xmallocz(bufsize);
 
        memset(&stream, 0, sizeof(stream));
 
        stream.next_out = buf;
-       stream.avail_out = size;
+       stream.avail_out = bufsize;
        stream.next_in = fill(1);
        stream.avail_in = len;
        git_inflate_init(&stream);
@@ -125,8 +137,17 @@ static void *get_data(unsigned long size)
                }
                stream.next_in = fill(1);
                stream.avail_in = len;
+               if (dry_run) {
+                       /* reuse the buffer in dry_run mode */
+                       stream.next_out = buf;
+                       stream.avail_out = bufsize > size - stream.total_out ?
+                                                  size - stream.total_out :
+                                                  bufsize;
+               }
        }
        git_inflate_end(&stream);
+       if (dry_run)
+               FREE_AND_NULL(buf);
        return buf;
 }
 
@@ -326,10 +347,70 @@ static void unpack_non_delta_entry(enum object_type type, unsigned long size,
 {
        void *buf = get_data(size);
 
-       if (!dry_run && buf)
+       if (buf)
                write_object(nr, type, buf, size);
-       else
-               free(buf);
+}
+
+struct input_zstream_data {
+       git_zstream *zstream;
+       unsigned char buf[8192];
+       int status;
+};
+
+static const void *feed_input_zstream(struct input_stream *in_stream,
+                                     unsigned long *readlen)
+{
+       struct input_zstream_data *data = in_stream->data;
+       git_zstream *zstream = data->zstream;
+       void *in = fill(1);
+
+       if (in_stream->is_finished) {
+               *readlen = 0;
+               return NULL;
+       }
+
+       zstream->next_out = data->buf;
+       zstream->avail_out = sizeof(data->buf);
+       zstream->next_in = in;
+       zstream->avail_in = len;
+
+       data->status = git_inflate(zstream, 0);
+
+       in_stream->is_finished = data->status != Z_OK;
+       use(len - zstream->avail_in);
+       *readlen = sizeof(data->buf) - zstream->avail_out;
+
+       return data->buf;
+}
+
+static void stream_blob(unsigned long size, unsigned nr)
+{
+       git_zstream zstream = { 0 };
+       struct input_zstream_data data = { 0 };
+       struct input_stream in_stream = {
+               .read = feed_input_zstream,
+               .data = &data,
+       };
+       struct obj_info *info = &obj_list[nr];
+
+       data.zstream = &zstream;
+       git_inflate_init(&zstream);
+
+       if (stream_loose_object(&in_stream, size, &info->oid))
+               die(_("failed to write object in stream"));
+
+       if (data.status != Z_STREAM_END)
+               die(_("inflate returned (%d)"), data.status);
+       git_inflate_end(&zstream);
+
+       if (strict) {
+               struct blob *blob = lookup_blob(the_repository, &info->oid);
+
+               if (!blob)
+                       die(_("invalid blob object from stream"));
+               blob->object.flags |= FLAG_WRITTEN;
+       }
+       info->obj = NULL;
 }
 
 static int resolve_against_held(unsigned nr, const struct object_id *base,
@@ -359,10 +440,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
                oidread(&base_oid, fill(the_hash_algo->rawsz));
                use(the_hash_algo->rawsz);
                delta_data = get_data(delta_size);
-               if (dry_run || !delta_data) {
-                       free(delta_data);
+               if (!delta_data)
                        return;
-               }
                if (has_object_file(&base_oid))
                        ; /* Ok we have this one */
                else if (resolve_against_held(nr, &base_oid,
@@ -398,10 +477,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
                        die("offset value out of bound for delta base object");
 
                delta_data = get_data(delta_size);
-               if (dry_run || !delta_data) {
-                       free(delta_data);
+               if (!delta_data)
                        return;
-               }
                lo = 0;
                hi = nr;
                while (lo < hi) {
@@ -468,9 +545,14 @@ static void unpack_one(unsigned nr)
        }
 
        switch (type) {
+       case OBJ_BLOB:
+               if (!dry_run && size > big_file_threshold) {
+                       stream_blob(size, nr);
+                       return;
+               }
+               /* fallthrough */
        case OBJ_COMMIT:
        case OBJ_TREE:
-       case OBJ_BLOB:
        case OBJ_TAG:
                unpack_non_delta_entry(type, size, nr);
                return;
index cd62eef240ee40262fe3308fb05ee57ca3fdc765..c6710b2552006df0a3bc70fcddcde81fa2476783 100644 (file)
@@ -1112,31 +1112,24 @@ static int repair(int ac, const char **av, const char *prefix)
 
 int cmd_worktree(int ac, const char **av, const char *prefix)
 {
+       parse_opt_subcommand_fn *fn = NULL;
        struct option options[] = {
+               OPT_SUBCOMMAND("add", &fn, add),
+               OPT_SUBCOMMAND("prune", &fn, prune),
+               OPT_SUBCOMMAND("list", &fn, list),
+               OPT_SUBCOMMAND("lock", &fn, lock_worktree),
+               OPT_SUBCOMMAND("unlock", &fn, unlock_worktree),
+               OPT_SUBCOMMAND("move", &fn, move_worktree),
+               OPT_SUBCOMMAND("remove", &fn, remove_worktree),
+               OPT_SUBCOMMAND("repair", &fn, repair),
                OPT_END()
        };
 
        git_config(git_worktree_config, NULL);
 
-       if (ac < 2)
-               usage_with_options(worktree_usage, options);
        if (!prefix)
                prefix = "";
-       if (!strcmp(av[1], "add"))
-               return add(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "prune"))
-               return prune(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "list"))
-               return list(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "lock"))
-               return lock_worktree(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "unlock"))
-               return unlock_worktree(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "move"))
-               return move_worktree(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "remove"))
-               return remove_worktree(ac - 1, av + 1, prefix);
-       if (!strcmp(av[1], "repair"))
-               return repair(ac - 1, av + 1, prefix);
-       usage_with_options(worktree_usage, options);
+
+       ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+       return fn(ac, av, prefix);
 }
index 98ec8938424406ef5973df797b8eca37fda71041..855b68ec23bdb1bdc85c1184a9c144c226f0245c 100644 (file)
@@ -340,6 +340,8 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename)
         */
        if (!bulk_fsync_objdir ||
            git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
+               if (errno == ENOSYS)
+                       warning(_("core.fsyncMethod = batch is unsupported on this platform"));
                fsync_or_die(fd, filename);
        }
 }
diff --git a/bundle-uri.c b/bundle-uri.c
new file mode 100644 (file)
index 0000000..4a8cc74
--- /dev/null
@@ -0,0 +1,168 @@
+#include "cache.h"
+#include "bundle-uri.h"
+#include "bundle.h"
+#include "object-store.h"
+#include "refs.h"
+#include "run-command.h"
+
+static int find_temp_filename(struct strbuf *name)
+{
+       int fd;
+       /*
+        * Find a temporary filename that is available. This is briefly
+        * racy, but unlikely to collide.
+        */
+       fd = odb_mkstemp(name, "bundles/tmp_uri_XXXXXX");
+       if (fd < 0) {
+               warning(_("failed to create temporary file"));
+               return -1;
+       }
+
+       close(fd);
+       unlink(name->buf);
+       return 0;
+}
+
+static int download_https_uri_to_file(const char *file, const char *uri)
+{
+       int result = 0;
+       struct child_process cp = CHILD_PROCESS_INIT;
+       FILE *child_in = NULL, *child_out = NULL;
+       struct strbuf line = STRBUF_INIT;
+       int found_get = 0;
+
+       strvec_pushl(&cp.args, "git-remote-https", uri, NULL);
+       cp.in = -1;
+       cp.out = -1;
+
+       if (start_command(&cp))
+               return 1;
+
+       child_in = fdopen(cp.in, "w");
+       if (!child_in) {
+               result = 1;
+               goto cleanup;
+       }
+
+       child_out = fdopen(cp.out, "r");
+       if (!child_out) {
+               result = 1;
+               goto cleanup;
+       }
+
+       fprintf(child_in, "capabilities\n");
+       fflush(child_in);
+
+       while (!strbuf_getline(&line, child_out)) {
+               if (!line.len)
+                       break;
+               if (!strcmp(line.buf, "get"))
+                       found_get = 1;
+       }
+       strbuf_release(&line);
+
+       if (!found_get) {
+               result = error(_("insufficient capabilities"));
+               goto cleanup;
+       }
+
+       fprintf(child_in, "get %s %s\n\n", uri, file);
+
+cleanup:
+       if (child_in)
+               fclose(child_in);
+       if (finish_command(&cp))
+               return 1;
+       if (child_out)
+               fclose(child_out);
+       return result;
+}
+
+static int copy_uri_to_file(const char *filename, const char *uri)
+{
+       const char *out;
+
+       if (starts_with(uri, "https:") ||
+           starts_with(uri, "http:"))
+               return download_https_uri_to_file(filename, uri);
+
+       if (skip_prefix(uri, "file://", &out))
+               uri = out;
+
+       /* Copy as a file */
+       return copy_file(filename, uri, 0);
+}
+
+static int unbundle_from_file(struct repository *r, const char *file)
+{
+       int result = 0;
+       int bundle_fd;
+       struct bundle_header header = BUNDLE_HEADER_INIT;
+       struct string_list_item *refname;
+       struct strbuf bundle_ref = STRBUF_INIT;
+       size_t bundle_prefix_len;
+
+       if ((bundle_fd = read_bundle_header(file, &header)) < 0)
+               return 1;
+
+       if ((result = unbundle(r, &header, bundle_fd, NULL)))
+               return 1;
+
+       /*
+        * Convert all refs/heads/ from the bundle into refs/bundles/
+        * in the local repository.
+        */
+       strbuf_addstr(&bundle_ref, "refs/bundles/");
+       bundle_prefix_len = bundle_ref.len;
+
+       for_each_string_list_item(refname, &header.references) {
+               struct object_id *oid = refname->util;
+               struct object_id old_oid;
+               const char *branch_name;
+               int has_old;
+
+               if (!skip_prefix(refname->string, "refs/heads/", &branch_name))
+                       continue;
+
+               strbuf_setlen(&bundle_ref, bundle_prefix_len);
+               strbuf_addstr(&bundle_ref, branch_name);
+
+               has_old = !read_ref(bundle_ref.buf, &old_oid);
+               update_ref("fetched bundle", bundle_ref.buf, oid,
+                          has_old ? &old_oid : NULL,
+                          REF_SKIP_OID_VERIFICATION,
+                          UPDATE_REFS_MSG_ON_ERR);
+       }
+
+       bundle_header_release(&header);
+       return result;
+}
+
+int fetch_bundle_uri(struct repository *r, const char *uri)
+{
+       int result = 0;
+       struct strbuf filename = STRBUF_INIT;
+
+       if ((result = find_temp_filename(&filename)))
+               goto cleanup;
+
+       if ((result = copy_uri_to_file(filename.buf, uri))) {
+               warning(_("failed to download bundle from URI '%s'"), uri);
+               goto cleanup;
+       }
+
+       if ((result = !is_bundle(filename.buf, 0))) {
+               warning(_("file at URI '%s' is not a bundle"), uri);
+               goto cleanup;
+       }
+
+       if ((result = unbundle_from_file(r, filename.buf))) {
+               warning(_("failed to unbundle bundle from URI '%s'"), uri);
+               goto cleanup;
+       }
+
+cleanup:
+       unlink(filename.buf);
+       strbuf_release(&filename);
+       return result;
+}
diff --git a/bundle-uri.h b/bundle-uri.h
new file mode 100644 (file)
index 0000000..8a152f1
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef BUNDLE_URI_H
+#define BUNDLE_URI_H
+
+struct repository;
+
+/**
+ * Fetch data from the given 'uri' and unbundle the bundle data found
+ * based on that information.
+ *
+ * Returns non-zero if no bundle information is found at the given 'uri'.
+ */
+int fetch_bundle_uri(struct repository *r, const char *uri);
+
+#endif
index 56db0b5026b675a77a3962802ebf38c083ae107b..c97111cccf2eda3a53be9dfce637fa2c35464f4d 100644 (file)
@@ -857,9 +857,7 @@ int cache_tree_matches_traversal(struct cache_tree *root,
        return 0;
 }
 
-static void verify_one_sparse(struct repository *r,
-                             struct index_state *istate,
-                             struct cache_tree *it,
+static void verify_one_sparse(struct index_state *istate,
                              struct strbuf *path,
                              int pos)
 {
@@ -910,7 +908,7 @@ static int verify_one(struct repository *r,
                        return 1;
 
                if (pos >= 0) {
-                       verify_one_sparse(r, istate, it, path, pos);
+                       verify_one_sparse(istate, path, pos);
                        return 0;
                }
 
diff --git a/cache.h b/cache.h
index 33fc8337c6aaef7942bd8b0c5b59637b115a9e44..26ed03bd6de626e497af549fe4fecfe27acaf699 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -475,8 +475,7 @@ extern struct index_state the_index;
 
 /*
  * Values in this enum (except those outside the 3 bit range) are part
- * of pack file format. See Documentation/technical/pack-format.txt
- * for more information.
+ * of pack file format. See gitformat-pack(5) for more information.
  */
 enum object_type {
        OBJ_BAD = -1,
@@ -1017,7 +1016,6 @@ void reset_shared_repository(void);
  * commands that do not want replace references to be active.
  */
 extern int read_replace_refs;
-extern char *git_replace_ref_base;
 
 /*
  * These values are used to help identify parts of a repository to fsync.
@@ -1697,6 +1695,12 @@ struct ident_split {
  */
 int split_ident_line(struct ident_split *, const char *, int);
 
+/*
+ * Given a commit or tag object buffer and the commit or tag headers, replaces
+ * the idents in the headers with their canonical versions using the mailmap mechanism.
+ */
+void apply_mailmap_to_header(struct strbuf *, const char **, struct string_list *);
+
 /*
  * Compare split idents for equality or strict ordering. Note that we
  * compare only the ident part of the line, ignoring any timestamp.
index f095519f8dba9672530430ced23eb5c66e7b8e67..1b0cc2b57db8667f3cbc9ae8b6223dd7997e7485 100755 (executable)
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -276,6 +276,7 @@ linux-musl)
 linux-leaks)
        export SANITIZE=leak
        export GIT_TEST_PASSING_SANITIZE_LEAK=true
+       export GIT_TEST_SANITIZE_LEAK_LOG=true
        ;;
 esac
 
diff --git a/color.c b/color.c
index 4f884c6b3dc1d98c99d3b5e5be0e990f58434d8e..f05d8a81d72115edbf47d34950a31615a15ea98e 100644 (file)
--- a/color.c
+++ b/color.c
@@ -415,7 +415,7 @@ int want_color_fd(int fd, int var)
        return var;
 }
 
-int git_color_config(const char *var, const char *value, void *cb)
+int git_color_config(const char *var, const char *value, void *cb UNUSED)
 {
        if (!strcmp(var, "color.ui")) {
                git_use_color_default = git_config_colorbool(var, value);
index 9bd6f3c48f4d1853e4cc5df7a9685696f63d72ce..f96bdabd7d95d8c00468a969fdb83dc724f49ede 100644 (file)
 # specified here, which can only have "guide" attribute and nothing
 # else.
 #
+# User-facing repository, command and file interfaces such as
+# documentation for the .gitmodules, .mailmap etc. files lives in man
+# sections 5 and 7. These entries can only have the "userinterfaces"
+# attribute and nothing else.
+#
+# Git's file formats and protocols, such as documentation for the
+# *.bundle format lives in man section 5. These entries can only have
+# the "developerinterfaces" attribute and nothing else.
+#
 ### command list (do not change this line)
 # command name                          category [category] [category]
 git-add                                 mainporcelain           worktree
@@ -192,24 +201,35 @@ git-verify-tag                          ancillaryinterrogators
 git-whatchanged                         ancillaryinterrogators          complete
 git-worktree                            mainporcelain
 git-write-tree                          plumbingmanipulators
-gitattributes                           guide
-gitcli                                  guide
+gitattributes                           userinterfaces
+gitcli                                  userinterfaces
 gitcore-tutorial                        guide
 gitcredentials                          guide
 gitcvs-migration                        guide
 gitdiffcore                             guide
 giteveryday                             guide
 gitfaq                                  guide
+gitformat-bundle                        developerinterfaces
+gitformat-chunk                         developerinterfaces
+gitformat-commit-graph                  developerinterfaces
+gitformat-index                         developerinterfaces
+gitformat-pack                          developerinterfaces
+gitformat-signature                     developerinterfaces
 gitglossary                             guide
-githooks                                guide
-gitignore                               guide
+githooks                                userinterfaces
+gitignore                               userinterfaces
 gitk                                    mainporcelain
-gitmailmap                              guide
-gitmodules                              guide
+gitmailmap                              userinterfaces
+gitmodules                              userinterfaces
 gitnamespaces                           guide
+gitprotocol-capabilities                developerinterfaces
+gitprotocol-common                      developerinterfaces
+gitprotocol-http                        developerinterfaces
+gitprotocol-pack                        developerinterfaces
+gitprotocol-v2                          developerinterfaces
 gitremote-helpers                       guide
-gitrepository-layout                    guide
-gitrevisions                            guide
+gitrepository-layout                    userinterfaces
+gitrevisions                            userinterfaces
 gitsubmodules                           guide
 gittutorial                             guide
 gittutorial-2                           guide
index a487d49c3e48a5eea0bee945d07cd2e37723994c..06f7d9e0b6aff7ae34f25f07f25c9feb80193e3a 100644 (file)
@@ -252,7 +252,8 @@ struct commit_graph *load_commit_graph_one_fd_st(struct repository *r,
        }
        graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
        close(fd);
-       ret = parse_commit_graph(r, graph_map, graph_size);
+       prepare_repo_settings(r);
+       ret = parse_commit_graph(&r->settings, graph_map, graph_size);
 
        if (ret)
                ret->odb = odb;
@@ -321,7 +322,7 @@ static int graph_read_bloom_data(const unsigned char *chunk_start,
        return 0;
 }
 
-struct commit_graph *parse_commit_graph(struct repository *r,
+struct commit_graph *parse_commit_graph(struct repo_settings *s,
                                        void *graph_map, size_t graph_size)
 {
        const unsigned char *data;
@@ -359,8 +360,6 @@ struct commit_graph *parse_commit_graph(struct repository *r,
                return NULL;
        }
 
-       prepare_repo_settings(r);
-
        graph = alloc_commit_graph();
 
        graph->hash_len = the_hash_algo->rawsz;
@@ -390,7 +389,7 @@ struct commit_graph *parse_commit_graph(struct repository *r,
        pair_chunk(cf, GRAPH_CHUNKID_EXTRAEDGES, &graph->chunk_extra_edges);
        pair_chunk(cf, GRAPH_CHUNKID_BASE, &graph->chunk_base_graphs);
 
-       if (get_configured_generation_version(r) >= 2) {
+       if (s->commit_graph_generation_version >= 2) {
                pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA,
                        &graph->chunk_generation_data);
                pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW,
@@ -400,7 +399,7 @@ struct commit_graph *parse_commit_graph(struct repository *r,
                        graph->read_generation_data = 1;
        }
 
-       if (r->settings.commit_graph_read_changed_paths) {
+       if (s->commit_graph_read_changed_paths) {
                pair_chunk(cf, GRAPH_CHUNKID_BLOOMINDEXES,
                           &graph->chunk_bloom_indexes);
                read_chunk(cf, GRAPH_CHUNKID_BLOOMDATA,
@@ -902,7 +901,7 @@ struct commit *lookup_commit_in_graph(struct repository *repo, const struct obje
        struct commit *commit;
        uint32_t pos;
 
-       if (!repo->objects->commit_graph)
+       if (!prepare_commit_graph(repo))
                return NULL;
        if (!search_commit_pos_in_graph(id, repo->objects->commit_graph, &pos))
                return NULL;
@@ -1640,9 +1639,9 @@ struct refs_cb_data {
        struct progress *progress;
 };
 
-static int add_ref_to_set(const char *refname,
+static int add_ref_to_set(const char *refname UNUSED,
                          const struct object_id *oid,
-                         int flags, void *cb_data)
+                         int flags UNUSED, void *cb_data)
 {
        struct object_id peeled;
        struct refs_cb_data *data = (struct refs_cb_data *)cb_data;
index f23b9e9026d2ea5c048351b92b5068a8a5a117e5..37faee6b66d59c693dc9ee829dbd5ad61654fe3c 100644 (file)
@@ -108,7 +108,12 @@ struct commit_graph *load_commit_graph_one_fd_st(struct repository *r,
                                                 struct object_directory *odb);
 struct commit_graph *read_commit_graph_one(struct repository *r,
                                           struct object_directory *odb);
-struct commit_graph *parse_commit_graph(struct repository *r,
+
+/*
+ * Callers should initialize the repo_settings with prepare_repo_settings()
+ * prior to calling parse_commit_graph().
+ */
+struct commit_graph *parse_commit_graph(struct repo_settings *s,
                                        void *graph_map, size_t graph_size);
 
 /*
index 1fb1b2ea90c5953eb465d3b108c01f23fb442a32..89b8efc6116883d032912cdc38429b5a8ba5f2f6 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -642,10 +642,11 @@ struct commit_list * commit_list_insert_by_date(struct commit *item, struct comm
        return commit_list_insert(item, pp);
 }
 
-static int commit_list_compare_by_date(const void *a, const void *b)
+static int commit_list_compare_by_date(const struct commit_list *a,
+                                      const struct commit_list *b)
 {
-       timestamp_t a_date = ((const struct commit_list *)a)->item->date;
-       timestamp_t b_date = ((const struct commit_list *)b)->item->date;
+       timestamp_t a_date = a->item->date;
+       timestamp_t b_date = b->item->date;
        if (a_date < b_date)
                return 1;
        if (a_date > b_date)
@@ -653,20 +654,11 @@ static int commit_list_compare_by_date(const void *a, const void *b)
        return 0;
 }
 
-static void *commit_list_get_next(const void *a)
-{
-       return ((const struct commit_list *)a)->next;
-}
-
-static void commit_list_set_next(void *a, void *next)
-{
-       ((struct commit_list *)a)->next = next;
-}
+DEFINE_LIST_SORT(static, commit_list_sort, struct commit_list, next);
 
 void commit_list_sort_by_date(struct commit_list **list)
 {
-       *list = llist_mergesort(*list, commit_list_get_next, commit_list_set_next,
-                               commit_list_compare_by_date);
+       commit_list_sort(list, commit_list_compare_by_date);
 }
 
 struct commit *pop_most_recent_commit(struct commit_list **list,
@@ -959,8 +951,9 @@ static void add_one_commit(struct object_id *oid, struct rev_collect *revs)
 }
 
 static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
-                                 const char *ident, timestamp_t timestamp,
-                                 int tz, const char *message, void *cbdata)
+                                 const char *ident UNUSED,
+                                 timestamp_t timestamp UNUSED, int tz UNUSED,
+                                 const char *message UNUSED, void *cbdata)
 {
        struct rev_collect *revs = cbdata;
 
diff --git a/compat/disk.h b/compat/disk.h
new file mode 100644 (file)
index 0000000..50a32e3
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef COMPAT_DISK_H
+#define COMPAT_DISK_H
+
+#include "git-compat-util.h"
+
+static int get_disk_info(struct strbuf *out)
+{
+       struct strbuf buf = STRBUF_INIT;
+       int res = 0;
+
+#ifdef GIT_WINDOWS_NATIVE
+       char volume_name[MAX_PATH], fs_name[MAX_PATH];
+       DWORD serial_number, component_length, flags;
+       ULARGE_INTEGER avail2caller, total, avail;
+
+       strbuf_realpath(&buf, ".", 1);
+       if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) {
+               error(_("could not determine free disk size for '%s'"),
+                     buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+
+       strbuf_setlen(&buf, offset_1st_component(buf.buf));
+       if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name),
+                                  &serial_number, &component_length, &flags,
+                                  fs_name, sizeof(fs_name))) {
+               error(_("could not get info for '%s'"), buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+       strbuf_addf(out, "Available space on '%s': ", buf.buf);
+       strbuf_humanise_bytes(out, avail2caller.QuadPart);
+       strbuf_addch(out, '\n');
+#else
+       struct statvfs stat;
+
+       strbuf_realpath(&buf, ".", 1);
+       if (statvfs(buf.buf, &stat) < 0) {
+               error_errno(_("could not determine free disk size for '%s'"),
+                           buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+
+       strbuf_addf(out, "Available space on '%s': ", buf.buf);
+       strbuf_humanise_bytes(out, (off_t)stat.f_bsize * (off_t)stat.f_bavail);
+       strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag);
+#endif
+
+cleanup:
+       strbuf_release(&buf);
+       return res;
+}
+
+#endif /* COMPAT_DISK_H */
index 907655720bb41e303e5ce86ae367778608b08548..e5ec5b0a9f73bcf04085b5d62d7b123690ce937e 100644 (file)
@@ -24,6 +24,59 @@ static enum fsmonitor_reason check_vfs4git(struct repository *r)
        return FSMONITOR_REASON_OK;
 }
 
+/*
+ * Check if monitoring remote working directories is allowed.
+ *
+ * By default, monitoring remote working directories is
+ * disabled.  Users may override this behavior in enviroments where
+ * they have proper support.
+ */
+static int check_config_allowremote(struct repository *r)
+{
+       int allow;
+
+       if (!repo_config_get_bool(r, "fsmonitor.allowremote", &allow))
+               return allow;
+
+       return -1; /* fsmonitor.allowremote not set */
+}
+
+/*
+ * Check remote working directory protocol.
+ *
+ * Error if client machine cannot get remote protocol information.
+ */
+static int check_remote_protocol(wchar_t *wpath)
+{
+       HANDLE h;
+       FILE_REMOTE_PROTOCOL_INFO proto_info;
+
+       h = CreateFileW(wpath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+                       FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+       if (h == INVALID_HANDLE_VALUE) {
+               error(_("[GLE %ld] unable to open for read '%ls'"),
+                     GetLastError(), wpath);
+               return -1;
+       }
+
+       if (!GetFileInformationByHandleEx(h, FileRemoteProtocolInfo,
+               &proto_info, sizeof(proto_info))) {
+               error(_("[GLE %ld] unable to get protocol information for '%ls'"),
+                     GetLastError(), wpath);
+               CloseHandle(h);
+               return -1;
+       }
+
+       CloseHandle(h);
+
+       trace_printf_key(&trace_fsmonitor,
+                               "check_remote_protocol('%ls') remote protocol %#8.8lx",
+                               wpath, proto_info.Protocol);
+
+       return 0;
+}
+
 /*
  * Remote working directories are problematic for FSMonitor.
  *
@@ -76,6 +129,7 @@ static enum fsmonitor_reason check_vfs4git(struct repository *r)
  */
 static enum fsmonitor_reason check_remote(struct repository *r)
 {
+       int ret;
        wchar_t wpath[MAX_PATH];
        wchar_t wfullpath[MAX_PATH];
        size_t wlen;
@@ -115,6 +169,20 @@ static enum fsmonitor_reason check_remote(struct repository *r)
                trace_printf_key(&trace_fsmonitor,
                                 "check_remote('%s') true",
                                 r->worktree);
+
+               ret = check_remote_protocol(wfullpath);
+               if (ret < 0)
+                       return FSMONITOR_REASON_ERROR;
+
+               switch (check_config_allowremote(r)) {
+               case 0: /* config overrides and disables */
+                       return FSMONITOR_REASON_REMOTE;
+               case 1: /* config overrides and enables */
+                       return FSMONITOR_REASON_OK;
+               default:
+                       break; /* config has no opinion */
+               }
+
                return FSMONITOR_REASON_REMOTE;
        }
 
diff --git a/compat/nonblock.c b/compat/nonblock.c
new file mode 100644 (file)
index 0000000..9694ebd
--- /dev/null
@@ -0,0 +1,50 @@
+#include "git-compat-util.h"
+#include "nonblock.h"
+
+#ifdef O_NONBLOCK
+
+int enable_pipe_nonblock(int fd)
+{
+       int flags = fcntl(fd, F_GETFL);
+       if (flags < 0)
+               return -1;
+       flags |= O_NONBLOCK;
+       return fcntl(fd, F_SETFL, flags);
+}
+
+#elif defined(GIT_WINDOWS_NATIVE)
+
+#include "win32.h"
+
+int enable_pipe_nonblock(int fd)
+{
+       HANDLE h = (HANDLE)_get_osfhandle(fd);
+       DWORD mode;
+       DWORD type = GetFileType(h);
+       if (type == FILE_TYPE_UNKNOWN && GetLastError() != NO_ERROR) {
+               errno = EBADF;
+               return -1;
+       }
+       if (type != FILE_TYPE_PIPE)
+               BUG("unsupported file type: %lu", type);
+       if (!GetNamedPipeHandleState(h, &mode, NULL, NULL, NULL, NULL, 0)) {
+               errno = err_win_to_posix(GetLastError());
+               return -1;
+       }
+       mode |= PIPE_NOWAIT;
+       if (!SetNamedPipeHandleState(h, &mode, NULL, NULL)) {
+               errno = err_win_to_posix(GetLastError());
+               return -1;
+       }
+       return 0;
+}
+
+#else
+
+int enable_pipe_nonblock(int fd)
+{
+       errno = ENOSYS;
+       return -1;
+}
+
+#endif
diff --git a/compat/nonblock.h b/compat/nonblock.h
new file mode 100644 (file)
index 0000000..af1a331
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef COMPAT_NONBLOCK_H
+#define COMPAT_NONBLOCK_H
+
+/*
+ * Enable non-blocking I/O for the pipe specified by the passed-in descriptor.
+ */
+int enable_pipe_nonblock(int fd);
+
+#endif
index 7db330c52dcc2326f245f8984e259f0a8dac1294..ea490a7ced431a798629ca86ac904052d99f839a 100644 (file)
@@ -477,7 +477,7 @@ struct escape_sequence_entry {
        char sequence[FLEX_ARRAY];
 };
 
-static int sequence_entry_cmp(const void *hashmap_cmp_fn_data,
+static int sequence_entry_cmp(const void *hashmap_cmp_fn_data UNUSED,
                              const struct escape_sequence_entry *e1,
                              const struct escape_sequence_entry *e2,
                              const void *keydata)
index 9b0e9c93285fb33a69ab3ee0b20ae95ad2e202e1..cbb5a3bab74f6f6f292c1628ed889e57f0157d10 100644 (file)
--- a/config.c
+++ b/config.c
@@ -81,6 +81,17 @@ static enum config_scope current_parsing_scope;
 static int pack_compression_seen;
 static int zlib_compression_seen;
 
+/*
+ * Config that comes from trusted scopes, namely:
+ * - CONFIG_SCOPE_SYSTEM (e.g. /etc/gitconfig)
+ * - CONFIG_SCOPE_GLOBAL (e.g. $HOME/.gitconfig, $XDG_CONFIG_HOME/git)
+ * - CONFIG_SCOPE_COMMAND (e.g. "-c" option, environment variables)
+ *
+ * This is declared here for code cleanliness, but unlike the other
+ * static variables, this does not hold config parser state.
+ */
+static struct config_set protected_config;
+
 static int config_file_fgetc(struct config_source *conf)
 {
        return getc_unlocked(conf->u.file);
@@ -351,7 +362,8 @@ static void populate_remote_urls(struct config_include_data *inc)
        current_parsing_scope = store_scope;
 }
 
-static int forbid_remote_url(const char *var, const char *value, void *data)
+static int forbid_remote_url(const char *var, const char *value UNUSED,
+                            void *data UNUSED)
 {
        const char *remote_name;
        size_t remote_name_len;
@@ -1968,6 +1980,8 @@ int git_config_from_file_with_options(config_fn_t fn, const char *filename,
        int ret = -1;
        FILE *f;
 
+       if (!filename)
+               BUG("filename cannot be NULL");
        f = fopen_or_warn(filename, "r");
        if (f) {
                ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename,
@@ -2324,10 +2338,10 @@ static int configset_add_value(struct config_set *cs, const char *key, const cha
        return 0;
 }
 
-static int config_set_element_cmp(const void *unused_cmp_data,
+static int config_set_element_cmp(const void *cmp_data UNUSED,
                                  const struct hashmap_entry *eptr,
                                  const struct hashmap_entry *entry_or_key,
-                                 const void *unused_keydata)
+                                 const void *keydata UNUSED)
 {
        const struct config_set_element *e1, *e2;
 
@@ -2378,6 +2392,11 @@ int git_configset_add_file(struct config_set *cs, const char *filename)
        return git_config_from_file(config_set_callback, filename, cs);
 }
 
+int git_configset_add_parameters(struct config_set *cs)
+{
+       return git_config_from_parameters(config_set_callback, cs);
+}
+
 int git_configset_get_value(struct config_set *cs, const char *key, const char **value)
 {
        const struct string_list *values = NULL;
@@ -2619,6 +2638,36 @@ int repo_config_get_pathname(struct repository *repo,
        return ret;
 }
 
+/* Read values into protected_config. */
+static void read_protected_config(void)
+{
+       char *xdg_config = NULL, *user_config = NULL, *system_config = NULL;
+
+       git_configset_init(&protected_config);
+
+       system_config = git_system_config();
+       git_global_config(&user_config, &xdg_config);
+
+       if (system_config)
+               git_configset_add_file(&protected_config, system_config);
+       if (xdg_config)
+               git_configset_add_file(&protected_config, xdg_config);
+       if (user_config)
+               git_configset_add_file(&protected_config, user_config);
+       git_configset_add_parameters(&protected_config);
+
+       free(system_config);
+       free(xdg_config);
+       free(user_config);
+}
+
+void git_protected_config(config_fn_t fn, void *data)
+{
+       if (!protected_config.hash_initialized)
+               read_protected_config();
+       configset_iter(&protected_config, fn, data);
+}
+
 /* Functions used historically to read configuration from 'the_repository' */
 void git_config(config_fn_t fn, void *data)
 {
index 7654f61c6349a6e8deec22f4e2c79b35d45059ba..ca994d771475a961da458454beabbb66bf63b949 100644 (file)
--- a/config.h
+++ b/config.h
@@ -446,6 +446,15 @@ void git_configset_init(struct config_set *cs);
  */
 int git_configset_add_file(struct config_set *cs, const char *filename);
 
+/**
+ * Parses command line options and environment variables, and adds the
+ * variable-value pairs to the `config_set`. Returns 0 on success, or -1
+ * if there is an error in parsing. The caller decides whether to free
+ * the incomplete configset or continue using it when the function
+ * returns -1.
+ */
+int git_configset_add_parameters(struct config_set *cs);
+
 /**
  * Finds and returns the value list, sorted in order of increasing priority
  * for the configuration variable `key` and config set `cs`. When the
@@ -505,6 +514,13 @@ int repo_config_get_maybe_bool(struct repository *repo,
 int repo_config_get_pathname(struct repository *repo,
                             const char *key, const char **dest);
 
+/*
+ * Functions for reading protected config. By definition, protected
+ * config ignores repository config, so these do not take a `struct
+ * repository` parameter.
+ */
+void git_protected_config(config_fn_t fn, void *data);
+
 /**
  * Querying For Specific Variables
  * -------------------------------
index 7dcd04820429481adf3d504ca187c7d5263f3cee..38ff86678a03a96dffa57507364a6f706ddb44d0 100644 (file)
@@ -237,9 +237,6 @@ AC_MSG_NOTICE([CHECKS for site configuration])
 # tests.  These tests take up a significant amount of the total test time
 # but are not needed unless you plan to talk to SVN repos.
 #
-# Define PPC_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for PowerPC.
-#
 # Define NO_OPENSSL environment variable if you do not have OpenSSL.
 #
 # Define OPENSSLDIR=/foo/bar if your openssl header and library files are in
index 9a4f00cb1bdc2e189fa3ccb41d842988e2430a92..aa759379508895ba002c531a13bde3f8995f9251 100644 (file)
@@ -1,60 +1,58 @@
 @@
-expression dst, src, n, E;
+type T;
+T *dst_ptr;
+T *src_ptr;
+expression n;
 @@
-  memcpy(dst, src, n * sizeof(
-- E[...]
-+ *(E)
-  ))
+- memcpy(dst_ptr, src_ptr, (n) * \( sizeof(T)
+-                                \| sizeof(*(dst_ptr))
+-                                \| sizeof(*(src_ptr))
+-                                \| sizeof(dst_ptr[...])
+-                                \| sizeof(src_ptr[...])
+-                                \) )
++ COPY_ARRAY(dst_ptr, src_ptr, n)
 
 @@
 type T;
-T *ptr;
-T[] arr;
-expression E, n;
+T *dst_ptr;
+T[] src_arr;
+expression n;
 @@
-(
-  memcpy(ptr, E,
-- n * sizeof(*(ptr))
-+ n * sizeof(T)
-  )
-|
-  memcpy(arr, E,
-- n * sizeof(*(arr))
-+ n * sizeof(T)
-  )
-|
-  memcpy(E, ptr,
-- n * sizeof(*(ptr))
-+ n * sizeof(T)
-  )
-|
-  memcpy(E, arr,
-- n * sizeof(*(arr))
-+ n * sizeof(T)
-  )
-)
+- memcpy(dst_ptr, src_arr, (n) * \( sizeof(T)
+-                                \| sizeof(*(dst_ptr))
+-                                \| sizeof(*(src_arr))
+-                                \| sizeof(dst_ptr[...])
+-                                \| sizeof(src_arr[...])
+-                                \) )
++ COPY_ARRAY(dst_ptr, src_arr, n)
 
 @@
 type T;
-T *dst_ptr;
+T[] dst_arr;
 T *src_ptr;
+expression n;
+@@
+- memcpy(dst_arr, src_ptr, (n) * \( sizeof(T)
+-                                \| sizeof(*(dst_arr))
+-                                \| sizeof(*(src_ptr))
+-                                \| sizeof(dst_arr[...])
+-                                \| sizeof(src_ptr[...])
+-                                \) )
++ COPY_ARRAY(dst_arr, src_ptr, n)
+
+@@
+type T;
 T[] dst_arr;
 T[] src_arr;
 expression n;
 @@
-(
-- memcpy(dst_ptr, src_ptr, (n) * sizeof(T))
-+ COPY_ARRAY(dst_ptr, src_ptr, n)
-|
-- memcpy(dst_ptr, src_arr, (n) * sizeof(T))
-+ COPY_ARRAY(dst_ptr, src_arr, n)
-|
-- memcpy(dst_arr, src_ptr, (n) * sizeof(T))
-+ COPY_ARRAY(dst_arr, src_ptr, n)
-|
-- memcpy(dst_arr, src_arr, (n) * sizeof(T))
+- memcpy(dst_arr, src_arr, (n) * \( sizeof(T)
+-                                \| sizeof(*(dst_arr))
+-                                \| sizeof(*(src_arr))
+-                                \| sizeof(dst_arr[...])
+-                                \| sizeof(src_arr[...])
+-                                \) )
 + COPY_ARRAY(dst_arr, src_arr, n)
-)
 
 @@
 type T;
diff --git a/contrib/coccinelle/tests/free.c b/contrib/coccinelle/tests/free.c
new file mode 100644 (file)
index 0000000..96d4abc
--- /dev/null
@@ -0,0 +1,11 @@
+int use_FREE_AND_NULL(int *v)
+{
+       free(*v);
+       *v = NULL;
+}
+
+int need_no_if(int *v)
+{
+       if (v)
+               free(v);
+}
diff --git a/contrib/coccinelle/tests/free.res b/contrib/coccinelle/tests/free.res
new file mode 100644 (file)
index 0000000..f90fd9f
--- /dev/null
@@ -0,0 +1,9 @@
+int use_FREE_AND_NULL(int *v)
+{
+       FREE_AND_NULL(*v);
+}
+
+int need_no_if(int *v)
+{
+       free(v);
+}
diff --git a/contrib/coccinelle/tests/unused.c b/contrib/coccinelle/tests/unused.c
new file mode 100644 (file)
index 0000000..8294d73
--- /dev/null
@@ -0,0 +1,82 @@
+void test_strbuf(void)
+{
+       struct strbuf sb1 = STRBUF_INIT;
+       struct strbuf sb2 = STRBUF_INIT;
+       struct strbuf sb3 = STRBUF_INIT;
+       struct strbuf sb4 = STRBUF_INIT;
+       struct strbuf sb5;
+       struct strbuf sb6 = { 0 };
+       struct strbuf sb7 = STRBUF_INIT;
+       struct strbuf sb8 = STRBUF_INIT;
+       struct strbuf *sp1;
+       struct strbuf *sp2;
+       struct strbuf *sp3;
+       struct strbuf *sp4 = xmalloc(sizeof(struct strbuf));
+       struct strbuf *sp5 = xmalloc(sizeof(struct strbuf));
+       struct strbuf *sp6 = xmalloc(sizeof(struct strbuf));
+       struct strbuf *sp7;
+
+       strbuf_init(&sb5, 0);
+       strbuf_init(sp1, 0);
+       strbuf_init(sp2, 0);
+       strbuf_init(sp3, 0);
+       strbuf_init(sp4, 0);
+       strbuf_init(sp5, 0);
+       strbuf_init(sp6, 0);
+       strbuf_init(sp7, 0);
+       sp7 = xmalloc(sizeof(struct strbuf));
+
+       use_before(&sb3);
+       use_as_str("%s", sb7.buf);
+       use_as_str("%s", sp1->buf);
+       use_as_str("%s", sp6->buf);
+       pass_pp(&sp3);
+
+       strbuf_release(&sb1);
+       strbuf_reset(&sb2);
+       strbuf_release(&sb3);
+       strbuf_release(&sb4);
+       strbuf_release(&sb5);
+       strbuf_release(&sb6);
+       strbuf_release(&sb7);
+       strbuf_release(sp1);
+       strbuf_release(sp2);
+       strbuf_release(sp3);
+       strbuf_release(sp4);
+       strbuf_release(sp5);
+       strbuf_release(sp6);
+       strbuf_release(sp7);
+
+       use_after(&sb4);
+
+       if (when_strict())
+               return;
+       strbuf_release(&sb8);
+}
+
+void test_other(void)
+{
+       struct string_list l = STRING_LIST_INIT_DUP;
+       struct strbuf sb = STRBUF_INIT;
+
+       string_list_clear(&l, 0);
+       string_list_clear(&sb, 0);
+}
+
+void test_worktrees(void)
+{
+       struct worktree **w1 = get_worktrees();
+       struct worktree **w2 = get_worktrees();
+       struct worktree **w3;
+       struct worktree **w4;
+
+       w3 = get_worktrees();
+       w4 = get_worktrees();
+
+       use_it(w4);
+
+       free_worktrees(w1);
+       free_worktrees(w2);
+       free_worktrees(w3);
+       free_worktrees(w4);
+}
diff --git a/contrib/coccinelle/tests/unused.res b/contrib/coccinelle/tests/unused.res
new file mode 100644 (file)
index 0000000..6d3e745
--- /dev/null
@@ -0,0 +1,45 @@
+void test_strbuf(void)
+{
+       struct strbuf sb3 = STRBUF_INIT;
+       struct strbuf sb4 = STRBUF_INIT;
+       struct strbuf sb7 = STRBUF_INIT;
+       struct strbuf *sp1;
+       struct strbuf *sp3;
+       struct strbuf *sp6 = xmalloc(sizeof(struct strbuf));
+       strbuf_init(sp1, 0);
+       strbuf_init(sp3, 0);
+       strbuf_init(sp6, 0);
+
+       use_before(&sb3);
+       use_as_str("%s", sb7.buf);
+       use_as_str("%s", sp1->buf);
+       use_as_str("%s", sp6->buf);
+       pass_pp(&sp3);
+
+       strbuf_release(&sb3);
+       strbuf_release(&sb4);
+       strbuf_release(&sb7);
+       strbuf_release(sp1);
+       strbuf_release(sp3);
+       strbuf_release(sp6);
+
+       use_after(&sb4);
+
+       if (when_strict())
+               return;
+}
+
+void test_other(void)
+{
+}
+
+void test_worktrees(void)
+{
+       struct worktree **w4;
+
+       w4 = get_worktrees();
+
+       use_it(w4);
+
+       free_worktrees(w4);
+}
diff --git a/contrib/coccinelle/unused.cocci b/contrib/coccinelle/unused.cocci
new file mode 100644 (file)
index 0000000..d84046f
--- /dev/null
@@ -0,0 +1,43 @@
+// This rule finds sequences of "unused" declerations and uses of a
+// variable, where "unused" is defined to include only calling the
+// equivalent of alloc, init & free functions on the variable.
+@@
+type T;
+identifier I;
+// STRBUF_INIT, but also e.g. STRING_LIST_INIT_DUP (so no anchoring)
+constant INIT_MACRO =~ "_INIT";
+identifier MALLOC1 =~ "^x?[mc]alloc$";
+identifier INIT_ASSIGN1 =~ "^get_worktrees$";
+identifier INIT_CALL1 =~ "^[a-z_]*_init$";
+identifier REL1 =~ "^[a-z_]*_(release|reset|clear|free)$";
+identifier REL2 =~ "^(release|clear|free)_[a-z_]*$";
+@@
+
+(
+- T I;
+|
+- T I = { 0 };
+|
+- T I = INIT_MACRO;
+|
+- T I = MALLOC1(...);
+|
+- T I = INIT_ASSIGN1(...);
+)
+
+<... when != \( I \| &I \)
+(
+- \( INIT_CALL1 \)( \( I \| &I \), ...);
+|
+- I = \( INIT_ASSIGN1 \)(...);
+|
+- I = MALLOC1(...);
+)
+...>
+
+(
+- \( REL1 \| REL2 \)( \( I \| &I \), ...);
+|
+- \( REL1 \| REL2 \)( \( &I \| I \) );
+)
+  ... when != \( I \| &I \)
index 1435548e004687fcc21a9e1b8922aa3bd39b8515..57972c2845c135dc45aeb560d289aade4caa5e5d 100644 (file)
 # single '?' character by setting GIT_PS1_COMPRESSSPARSESTATE, or omitted
 # by setting GIT_PS1_OMITSPARSESTATE.
 #
+# If you would like to see a notification on the prompt when there are
+# unresolved conflicts, set GIT_PS1_SHOWCONFLICTSTATE to "yes". The
+# prompt will include "|CONFLICT".
+#
 # If you would like to see more information about the identity of
 # commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
 # to one of these values:
@@ -508,6 +512,12 @@ __git_ps1 ()
                r="$r $step/$total"
        fi
 
+       local conflict="" # state indicator for unresolved conflicts
+       if [[ "${GIT_PS1_SHOWCONFLICTSTATE}" == "yes" ]] &&
+          [[ $(git ls-files --unmerged 2>/dev/null) ]]; then
+               conflict="|CONFLICT"
+       fi
+
        local w=""
        local i=""
        local s=""
@@ -572,7 +582,7 @@ __git_ps1 ()
        fi
 
        local f="$h$w$i$s$u$p"
-       local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}"
+       local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}${conflict}"
 
        if [ $pcmode = yes ]; then
                if [ "${__git_printf_supports_v-}" != yes ]; then
index 07227d02287618394d649ae915a1b42d93140950..bf2777308a56b6f233553196821ab2750e269e8d 100755 (executable)
@@ -3,16 +3,9 @@
        cd ../../../t
        test_description='git-credential-netrc'
        . ./test-lib.sh
+       . "$TEST_DIRECTORY"/lib-perl.sh
 
-       if ! test_have_prereq PERL; then
-               skip_all='skipping perl interface tests, perl not available'
-               test_done
-       fi
-
-       perl -MTest::More -e 0 2>/dev/null || {
-               skip_all="Perl Test::More unavailable, skipping test"
-               test_done
-       }
+       skip_all_if_no_Test_More
 
        # set up test repository
 
                'set up test repository' \
                'git config --add gpg.program test.git-config-gpg'
 
-       # The external test will outputs its own plan
-       test_external_has_tap=1
-
        export PERL5LIB="$GITPERLLIB"
-       test_external \
-               'git-credential-netrc' \
+       test_expect_success 'git-credential-netrc' '
                perl "$GIT_BUILD_DIR"/contrib/credential/netrc/test.pl
+       '
 
        test_done
 )
diff --git a/contrib/scalar/README.md b/contrib/scalar/README.md
deleted file mode 100644 (file)
index 634b577..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-# Scalar - an opinionated repository management tool
-
-Scalar is an add-on to Git that helps users take advantage of advanced
-performance features in Git. Originally implemented in C# using .NET Core,
-based on the learnings from the VFS for Git project, most of the techniques
-developed by the Scalar project have been integrated into core Git already:
-
-* partial clone,
-* commit graphs,
-* multi-pack index,
-* sparse checkout (cone mode),
-* scheduled background maintenance,
-* etc
-
-This directory contains the remaining parts of Scalar that are not (yet) in
-core Git.
-
-## Roadmap
-
-The idea is to populate this directory via incremental patch series and
-eventually move to a top-level directory next to `gitk-git/` and to `git-gui/`. The
-current plan involves the following patch series:
-
-- `scalar-the-beginning`: The initial patch series which sets up
-  `contrib/scalar/` and populates it with a minimal `scalar` command that
-  demonstrates the fundamental ideas.
-
-- `scalar-c-and-C`: The `scalar` command learns about two options that can be
-  specified before the command, `-c <key>=<value>` and `-C <directory>`.
-
-- `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand.
-
-- `scalar-and-builtin-fsmonitor`: The built-in FSMonitor is enabled in `scalar
-  register` and in `scalar clone`, for an enormous performance boost when
-  working in large worktrees. This patch series necessarily depends on Jeff
-  Hostetler's FSMonitor patch series to be integrated into Git.
-
-- `scalar-gentler-config-locking`: Scalar enlistments are registered in the
-  user's Git config. This usually does not represent any problem because it is
-  rare for a user to register an enlistment. However, in Scalar's functional
-  tests, Scalar enlistments are created galore, and in parallel, which can lead
-  to lock contention. This patch series works around that problem by re-trying
-  to lock the config file in a gentle fashion.
-
-- `scalar-extra-docs`: Add some extensive documentation that has been written
-  in the original Scalar project (all subject to discussion, of course).
-
-- `optionally-install-scalar`: Now that Scalar is feature (and documentation)
-  complete and is verified in CI builds, let's offer to install it.
-
-- `move-scalar-to-toplevel`: Now that Scalar is complete, let's move it next to
-  `gitk-git/` and to `git-gui/`, making it a top-level command.
-
-The following two patch series exist in Microsoft's fork of Git and are
-publicly available. There is no current plan to upstream them, not because I
-want to withhold these patches, but because I don't think the Git community is
-interested in these patches.
-
-There are some interesting ideas there, but the implementation is too specific
-to Azure Repos and/or VFS for Git to be of much help in general (and also: my
-colleagues tried to upstream some patches already and the enthusiasm for
-integrating things related to Azure Repos and VFS for Git can be summarized in
-very, very few words).
-
-These still exist mainly because the GVFS protocol is what Azure Repos has
-instead of partial clone, while Git is focused on improving partial clone:
-
-- `scalar-with-gvfs`: The primary purpose of this patch series is to support
-  existing Scalar users whose repositories are hosted in Azure Repos (which
-  does not support Git's partial clones, but supports its predecessor, the GVFS
-  protocol, which is used by Scalar to emulate the partial clone).
-
-  Since the GVFS protocol will never be supported by core Git, this patch
-  series will remain in Microsoft's fork of Git.
-
-- `run-scalar-functional-tests`: The Scalar project developed a quite
-  comprehensive set of integration tests (or, "Functional Tests"). They are the
-  sole remaining part of the original C#-based Scalar project, and this patch
-  adds a GitHub workflow that runs them all.
-
-  Since the tests partially depend on features that are only provided in the
-  `scalar-with-gvfs` patch series, this patch cannot be upstreamed.
index 28176914e57b91e823d6197a926909a87cfe1d16..642d16124eb20c28de20e8601e99f1b347c7fa75 100644 (file)
@@ -7,27 +7,13 @@
 #include "parse-options.h"
 #include "config.h"
 #include "run-command.h"
+#include "simple-ipc.h"
+#include "fsmonitor-ipc.h"
+#include "fsmonitor-settings.h"
 #include "refs.h"
 #include "dir.h"
 #include "packfile.h"
 #include "help.h"
-#include "archive.h"
-#include "object-store.h"
-
-/*
- * Remove the deepest subdirectory in the provided path string. Path must not
- * include a trailing path separator. Returns 1 if parent directory found,
- * otherwise 0.
- */
-static int strbuf_parent_directory(struct strbuf *buf)
-{
-       size_t len = buf->len;
-       size_t offset = offset_1st_component(buf->buf);
-       char *path_sep = find_last_dir_sep(buf->buf + offset);
-       strbuf_setlen(buf, path_sep ? path_sep - buf->buf : offset);
-
-       return buf->len < len;
-}
 
 static void setup_enlistment_directory(int argc, const char **argv,
                                       const char * const *usagestr,
@@ -35,8 +21,8 @@ static void setup_enlistment_directory(int argc, const char **argv,
                                       struct strbuf *enlistment_root)
 {
        struct strbuf path = STRBUF_INIT;
-       char *root;
-       int enlistment_found = 0;
+       int enlistment_is_repo_parent = 0;
+       size_t len;
 
        if (startup_info->have_repository)
                BUG("gitdir already set up?!?");
@@ -49,51 +35,36 @@ static void setup_enlistment_directory(int argc, const char **argv,
                strbuf_add_absolute_path(&path, argv[0]);
                if (!is_directory(path.buf))
                        die(_("'%s' does not exist"), path.buf);
+               if (chdir(path.buf) < 0)
+                       die_errno(_("could not switch to '%s'"), path.buf);
        } else if (strbuf_getcwd(&path) < 0)
                die(_("need a working directory"));
 
        strbuf_trim_trailing_dir_sep(&path);
-       do {
-               const size_t len = path.len;
-
-               /* check if currently in enlistment root with src/ workdir */
-               strbuf_addstr(&path, "/src");
-               if (is_nonbare_repository_dir(&path)) {
-                       if (enlistment_root)
-                               strbuf_add(enlistment_root, path.buf, len);
 
-                       enlistment_found = 1;
-                       break;
-               }
-
-               /* reset to original path */
-               strbuf_setlen(&path, len);
-
-               /* check if currently in workdir */
-               if (is_nonbare_repository_dir(&path)) {
-                       if (enlistment_root) {
-                               /*
-                                * If the worktree's directory's name is `src`, the enlistment is the
-                                * parent directory, otherwise it is identical to the worktree.
-                                */
-                               root = strip_path_suffix(path.buf, "src");
-                               strbuf_addstr(enlistment_root, root ? root : path.buf);
-                               free(root);
-                       }
+       /* check if currently in enlistment root with src/ workdir */
+       len = path.len;
+       strbuf_addstr(&path, "/src");
+       if (is_nonbare_repository_dir(&path)) {
+               enlistment_is_repo_parent = 1;
+               if (chdir(path.buf) < 0)
+                       die_errno(_("could not switch to '%s'"), path.buf);
+       }
+       strbuf_setlen(&path, len);
 
-                       enlistment_found = 1;
-                       break;
-               }
-       } while (strbuf_parent_directory(&path));
+       setup_git_directory();
 
-       if (!enlistment_found)
-               die(_("could not find enlistment root"));
+       if (!the_repository->worktree)
+               die(_("Scalar enlistments require a worktree"));
 
-       if (chdir(path.buf) < 0)
-               die_errno(_("could not switch to '%s'"), path.buf);
+       if (enlistment_root) {
+               if (enlistment_is_repo_parent)
+                       strbuf_addbuf(enlistment_root, &path);
+               else
+                       strbuf_addstr(enlistment_root, the_repository->worktree);
+       }
 
        strbuf_release(&path);
-       setup_git_directory();
 }
 
 static int run_git(const char *arg, ...)
@@ -115,13 +86,39 @@ static int run_git(const char *arg, ...)
        return res;
 }
 
+struct scalar_config {
+       const char *key;
+       const char *value;
+       int overwrite_on_reconfigure;
+};
+
+static int set_scalar_config(const struct scalar_config *config, int reconfigure)
+{
+       char *value = NULL;
+       int res;
+
+       if ((reconfigure && config->overwrite_on_reconfigure) ||
+           git_config_get_string(config->key, &value)) {
+               trace2_data_string("scalar", the_repository, config->key, "created");
+               res = git_config_set_gently(config->key, config->value);
+       } else {
+               trace2_data_string("scalar", the_repository, config->key, "exists");
+               res = 0;
+       }
+
+       free(value);
+       return res;
+}
+
+static int have_fsmonitor_support(void)
+{
+       return fsmonitor_ipc__is_supported() &&
+              fsm_settings__get_reason(the_repository) == FSMONITOR_REASON_OK;
+}
+
 static int set_recommended_config(int reconfigure)
 {
-       struct {
-               const char *key;
-               const char *value;
-               int overwrite_on_reconfigure;
-       } config[] = {
+       struct scalar_config config[] = {
                /* Required */
                { "am.keepCR", "true", 1 },
                { "core.FSCache", "true", 1 },
@@ -175,17 +172,16 @@ static int set_recommended_config(int reconfigure)
        char *value;
 
        for (i = 0; config[i].key; i++) {
-               if ((reconfigure && config[i].overwrite_on_reconfigure) ||
-                   git_config_get_string(config[i].key, &value)) {
-                       trace2_data_string("scalar", the_repository, config[i].key, "created");
-                       if (git_config_set_gently(config[i].key,
-                                                 config[i].value) < 0)
-                               return error(_("could not configure %s=%s"),
-                                            config[i].key, config[i].value);
-               } else {
-                       trace2_data_string("scalar", the_repository, config[i].key, "exists");
-                       free(value);
-               }
+               if (set_scalar_config(config + i, reconfigure))
+                       return error(_("could not configure %s=%s"),
+                                    config[i].key, config[i].value);
+       }
+
+       if (have_fsmonitor_support()) {
+               struct scalar_config fsmonitor = { "core.fsmonitor", "true" };
+               if (set_scalar_config(&fsmonitor, reconfigure))
+                       return error(_("could not configure %s=%s"),
+                                    fsmonitor.key, fsmonitor.value);
        }
 
        /*
@@ -236,123 +232,55 @@ static int add_or_remove_enlistment(int add)
                       "scalar.repo", the_repository->worktree, NULL);
 }
 
-static int register_dir(void)
+static int start_fsmonitor_daemon(void)
 {
-       int res = add_or_remove_enlistment(1);
-
-       if (!res)
-               res = set_recommended_config(0);
+       assert(have_fsmonitor_support());
 
-       if (!res)
-               res = toggle_maintenance(1);
+       if (fsmonitor_ipc__get_state() != IPC_STATE__LISTENING)
+               return run_git("fsmonitor--daemon", "start", NULL);
 
-       return res;
+       return 0;
 }
 
-static int unregister_dir(void)
+static int stop_fsmonitor_daemon(void)
 {
-       int res = 0;
+       assert(have_fsmonitor_support());
 
-       if (toggle_maintenance(0) < 0)
-               res = -1;
+       if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+               return run_git("fsmonitor--daemon", "stop", NULL);
 
-       if (add_or_remove_enlistment(0) < 0)
-               res = -1;
-
-       return res;
+       return 0;
 }
 
-static int add_directory_to_archiver(struct strvec *archiver_args,
-                                         const char *path, int recurse)
+static int register_dir(void)
 {
-       int at_root = !*path;
-       DIR *dir = opendir(at_root ? "." : path);
-       struct dirent *e;
-       struct strbuf buf = STRBUF_INIT;
-       size_t len;
-       int res = 0;
+       if (add_or_remove_enlistment(1))
+               return error(_("could not add enlistment"));
 
-       if (!dir)
-               return error_errno(_("could not open directory '%s'"), path);
-
-       if (!at_root)
-               strbuf_addf(&buf, "%s/", path);
-       len = buf.len;
-       strvec_pushf(archiver_args, "--prefix=%s", buf.buf);
-
-       while (!res && (e = readdir(dir))) {
-               if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
-                       continue;
-
-               strbuf_setlen(&buf, len);
-               strbuf_addstr(&buf, e->d_name);
-
-               if (e->d_type == DT_REG)
-                       strvec_pushf(archiver_args, "--add-file=%s", buf.buf);
-               else if (e->d_type != DT_DIR)
-                       warning(_("skipping '%s', which is neither file nor "
-                                 "directory"), buf.buf);
-               else if (recurse &&
-                        add_directory_to_archiver(archiver_args,
-                                                  buf.buf, recurse) < 0)
-                       res = -1;
+       if (set_recommended_config(0))
+               return error(_("could not set recommended config"));
+
+       if (toggle_maintenance(1))
+               return error(_("could not turn on maintenance"));
+
+       if (have_fsmonitor_support() && start_fsmonitor_daemon()) {
+               return error(_("could not start the FSMonitor daemon"));
        }
 
-       closedir(dir);
-       strbuf_release(&buf);
-       return res;
+       return 0;
 }
 
-#ifndef WIN32
-#include <sys/statvfs.h>
-#endif
-
-static int get_disk_info(struct strbuf *out)
+static int unregister_dir(void)
 {
-#ifdef WIN32
-       struct strbuf buf = STRBUF_INIT;
-       char volume_name[MAX_PATH], fs_name[MAX_PATH];
-       DWORD serial_number, component_length, flags;
-       ULARGE_INTEGER avail2caller, total, avail;
-
-       strbuf_realpath(&buf, ".", 1);
-       if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) {
-               error(_("could not determine free disk size for '%s'"),
-                     buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
+       int res = 0;
 
-       strbuf_setlen(&buf, offset_1st_component(buf.buf));
-       if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name),
-                                  &serial_number, &component_length, &flags,
-                                  fs_name, sizeof(fs_name))) {
-               error(_("could not get info for '%s'"), buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
-       strbuf_addf(out, "Available space on '%s': ", buf.buf);
-       strbuf_humanise_bytes(out, avail2caller.QuadPart);
-       strbuf_addch(out, '\n');
-       strbuf_release(&buf);
-#else
-       struct strbuf buf = STRBUF_INIT;
-       struct statvfs stat;
+       if (toggle_maintenance(0))
+               res = error(_("could not turn off maintenance"));
 
-       strbuf_realpath(&buf, ".", 1);
-       if (statvfs(buf.buf, &stat) < 0) {
-               error_errno(_("could not determine free disk size for '%s'"),
-                           buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
+       if (add_or_remove_enlistment(0))
+               res = error(_("could not remove enlistment"));
 
-       strbuf_addf(out, "Available space on '%s': ", buf.buf);
-       strbuf_humanise_bytes(out, st_mult(stat.f_bsize, stat.f_bavail));
-       strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag);
-       strbuf_release(&buf);
-#endif
-       return 0;
+       return res;
 }
 
 /* printf-style interface, expects `<key>=<value>` argument */
@@ -431,25 +359,35 @@ static int delete_enlistment(struct strbuf *enlistment)
 {
 #ifdef WIN32
        struct strbuf parent = STRBUF_INIT;
+       size_t offset;
+       char *path_sep;
 #endif
 
        if (unregister_dir())
-               die(_("failed to unregister repository"));
+               return error(_("failed to unregister repository"));
 
 #ifdef WIN32
        /*
         * Change the current directory to one outside of the enlistment so
         * that we may delete everything underneath it.
         */
-       strbuf_addbuf(&parent, enlistment);
-       strbuf_parent_directory(&parent);
-       if (chdir(parent.buf) < 0)
-               die_errno(_("could not switch to '%s'"), parent.buf);
+       offset = offset_1st_component(enlistment->buf);
+       path_sep = find_last_dir_sep(enlistment->buf + offset);
+       strbuf_add(&parent, enlistment->buf,
+                  path_sep ? path_sep - enlistment->buf : offset);
+       if (chdir(parent.buf) < 0) {
+               int res = error_errno(_("could not switch to '%s'"), parent.buf);
+               strbuf_release(&parent);
+               return res;
+       }
        strbuf_release(&parent);
 #endif
 
+       if (have_fsmonitor_support() && stop_fsmonitor_daemon())
+               return error(_("failed to stop the FSMonitor daemon"));
+
        if (remove_dir_recursively(enlistment, 0))
-               die(_("failed to delete enlistment directory"));
+               return error(_("failed to delete enlistment directory"));
 
        return 0;
 }
@@ -595,83 +533,6 @@ cleanup:
        return res;
 }
 
-static void dir_file_stats_objects(const char *full_path, size_t full_path_len,
-                                  const char *file_name, void *data)
-{
-       struct strbuf *buf = data;
-       struct stat st;
-
-       if (!stat(full_path, &st))
-               strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name,
-                           (uintmax_t)st.st_size);
-}
-
-static int dir_file_stats(struct object_directory *object_dir, void *data)
-{
-       struct strbuf *buf = data;
-
-       strbuf_addf(buf, "Contents of %s:\n", object_dir->path);
-
-       for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects,
-                                 data);
-
-       return 0;
-}
-
-static int count_files(char *path)
-{
-       DIR *dir = opendir(path);
-       struct dirent *e;
-       int count = 0;
-
-       if (!dir)
-               return 0;
-
-       while ((e = readdir(dir)) != NULL)
-               if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG)
-                       count++;
-
-       closedir(dir);
-       return count;
-}
-
-static void loose_objs_stats(struct strbuf *buf, const char *path)
-{
-       DIR *dir = opendir(path);
-       struct dirent *e;
-       int count;
-       int total = 0;
-       unsigned char c;
-       struct strbuf count_path = STRBUF_INIT;
-       size_t base_path_len;
-
-       if (!dir)
-               return;
-
-       strbuf_addstr(buf, "Object directory stats for ");
-       strbuf_add_absolute_path(buf, path);
-       strbuf_addstr(buf, ":\n");
-
-       strbuf_add_absolute_path(&count_path, path);
-       strbuf_addch(&count_path, '/');
-       base_path_len = count_path.len;
-
-       while ((e = readdir(dir)) != NULL)
-               if (!is_dot_or_dotdot(e->d_name) &&
-                   e->d_type == DT_DIR && strlen(e->d_name) == 2 &&
-                   !hex_to_bytes(&c, e->d_name, 1)) {
-                       strbuf_setlen(&count_path, base_path_len);
-                       strbuf_addstr(&count_path, e->d_name);
-                       total += (count = count_files(count_path.buf));
-                       strbuf_addf(buf, "%s : %7d files\n", e->d_name, count);
-               }
-
-       strbuf_addf(buf, "Total: %d loose objects", total);
-
-       strbuf_release(&count_path);
-       closedir(dir);
-}
-
 static int cmd_diagnose(int argc, const char **argv)
 {
        struct option options[] = {
@@ -681,107 +542,19 @@ static int cmd_diagnose(int argc, const char **argv)
                N_("scalar diagnose [<enlistment>]"),
                NULL
        };
-       struct strbuf zip_path = STRBUF_INIT;
-       struct strvec archiver_args = STRVEC_INIT;
-       char **argv_copy = NULL;
-       int stdout_fd = -1, archiver_fd = -1;
-       time_t now = time(NULL);
-       struct tm tm;
-       struct strbuf path = STRBUF_INIT, buf = STRBUF_INIT;
+       struct strbuf diagnostics_root = STRBUF_INIT;
        int res = 0;
 
        argc = parse_options(argc, argv, NULL, options,
                             usage, 0);
 
-       setup_enlistment_directory(argc, argv, usage, options, &zip_path);
-
-       strbuf_addstr(&zip_path, "/.scalarDiagnostics/scalar_");
-       strbuf_addftime(&zip_path,
-                       "%Y%m%d_%H%M%S", localtime_r(&now, &tm), 0, 0);
-       strbuf_addstr(&zip_path, ".zip");
-       switch (safe_create_leading_directories(zip_path.buf)) {
-       case SCLD_EXISTS:
-       case SCLD_OK:
-               break;
-       default:
-               error_errno(_("could not create directory for '%s'"),
-                           zip_path.buf);
-               goto diagnose_cleanup;
-       }
-       stdout_fd = dup(1);
-       if (stdout_fd < 0) {
-               res = error_errno(_("could not duplicate stdout"));
-               goto diagnose_cleanup;
-       }
+       setup_enlistment_directory(argc, argv, usage, options, &diagnostics_root);
+       strbuf_addstr(&diagnostics_root, "/.scalarDiagnostics");
 
-       archiver_fd = xopen(zip_path.buf, O_CREAT | O_WRONLY | O_TRUNC, 0666);
-       if (archiver_fd < 0 || dup2(archiver_fd, 1) < 0) {
-               res = error_errno(_("could not redirect output"));
-               goto diagnose_cleanup;
-       }
-
-       init_zip_archiver();
-       strvec_pushl(&archiver_args, "scalar-diagnose", "--format=zip", NULL);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "Collecting diagnostic info\n\n");
-       get_version_info(&buf, 1);
-
-       strbuf_addf(&buf, "Enlistment root: %s\n", the_repository->worktree);
-       get_disk_info(&buf);
-       write_or_die(stdout_fd, buf.buf, buf.len);
-       strvec_pushf(&archiver_args,
-                    "--add-virtual-file=diagnostics.log:%.*s",
-                    (int)buf.len, buf.buf);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:");
-       dir_file_stats(the_repository->objects->odb, &buf);
-       foreach_alt_odb(dir_file_stats, &buf);
-       strvec_push(&archiver_args, buf.buf);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:");
-       loose_objs_stats(&buf, ".git/objects");
-       strvec_push(&archiver_args, buf.buf);
-
-       if ((res = add_directory_to_archiver(&archiver_args, ".git", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/hooks", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/info", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/logs", 1)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/objects/info", 0)))
-               goto diagnose_cleanup;
-
-       strvec_pushl(&archiver_args, "--prefix=",
-                    oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
-
-       /* `write_archive()` modifies the `argv` passed to it. Let it. */
-       argv_copy = xmemdupz(archiver_args.v,
-                            sizeof(char *) * archiver_args.nr);
-       res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL,
-                           the_repository, NULL, 0);
-       if (res) {
-               error(_("failed to write archive"));
-               goto diagnose_cleanup;
-       }
-
-       if (!res)
-               fprintf(stderr, "\n"
-                      "Diagnostics complete.\n"
-                      "All of the gathered info is captured in '%s'\n",
-                      zip_path.buf);
-
-diagnose_cleanup:
-       if (archiver_fd >= 0) {
-               close(1);
-               dup2(stdout_fd, 1);
-       }
-       free(argv_copy);
-       strvec_clear(&archiver_args);
-       strbuf_release(&zip_path);
-       strbuf_release(&path);
-       strbuf_release(&buf);
+       res = run_git("diagnose", "--mode=all", "-s", "%Y%m%d_%H%M%S",
+                     "-o", diagnostics_root.buf, NULL);
 
+       strbuf_release(&diagnostics_root);
        return res;
 }
 
index c0425e065338fd0c55021de85b50362e8ede9727..1a12dc450774c2e1aa8b5142c624b5e7704eb61c 100644 (file)
@@ -3,7 +3,7 @@ scalar(1)
 
 NAME
 ----
-scalar - an opinionated repository management tool
+scalar - A tool for managing large Git repositories
 
 SYNOPSIS
 --------
@@ -20,10 +20,9 @@ scalar delete <enlistment>
 DESCRIPTION
 -----------
 
-Scalar is an opinionated repository management tool. By creating new
-repositories or registering existing repositories with Scalar, your Git
-experience will speed up. Scalar sets advanced Git config settings,
-maintains your repositories in the background, and helps reduce data sent
+Scalar is a repository management tool that optimizes Git for use in large
+repositories. Scalar improves performance by configuring advanced Git settings,
+maintaining repositories in the background, and helping to reduce data sent
 across the network.
 
 An important Scalar concept is the enlistment: this is the top-level directory
index 01e82e56d15629abd0444646341c1a9d639fc310..1ed174a8cf38e35a993ddeae4e85c234bcc34928 100644 (file)
@@ -42,7 +42,7 @@ $(T):
        @echo "*** $@ ***"; GIT_CONFIG=.git/config '$(SHELL_PATH_SQ)' $@ $(GIT_TEST_OPTS)
 
 clean-except-prove-cache:
-       $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)'
+       $(RM) -r 'trash directory'.*
        $(RM) -r valgrind/bin
 
 clean: clean-except-prove-cache
index 10b1172a8aa0d219f26a0f927c6bbf1800ba3a1c..dfb949f52eed045e73ad06c2ed972bdd84f5663d 100755 (executable)
@@ -17,6 +17,99 @@ test_expect_success 'scalar shows a usage' '
        test_expect_code 129 scalar -h
 '
 
+test_expect_success 'scalar invoked on enlistment root' '
+       test_when_finished rm -rf test src deeper &&
+
+       for enlistment_root in test src deeper/test
+       do
+               git init ${enlistment_root}/src &&
+
+               # Register
+               scalar register ${enlistment_root} &&
+               scalar list >out &&
+               grep "$(pwd)/${enlistment_root}/src\$" out &&
+
+               # Delete (including enlistment root)
+               scalar delete $enlistment_root &&
+               test_path_is_missing $enlistment_root &&
+               scalar list >out &&
+               ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1
+       done
+'
+
+test_expect_success 'scalar invoked on enlistment src repo' '
+       test_when_finished rm -rf test src deeper &&
+
+       for enlistment_root in test src deeper/test
+       do
+               git init ${enlistment_root}/src &&
+
+               # Register
+               scalar register ${enlistment_root}/src &&
+               scalar list >out &&
+               grep "$(pwd)/${enlistment_root}/src\$" out &&
+
+               # Delete (will not include enlistment root)
+               scalar delete ${enlistment_root}/src &&
+               test_path_is_dir $enlistment_root &&
+               scalar list >out &&
+               ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1
+       done
+'
+
+test_expect_success 'scalar invoked when enlistment root and repo are the same' '
+       test_when_finished rm -rf test src deeper &&
+
+       for enlistment_root in test src deeper/test
+       do
+               git init ${enlistment_root} &&
+
+               # Register
+               scalar register ${enlistment_root} &&
+               scalar list >out &&
+               grep "$(pwd)/${enlistment_root}\$" out &&
+
+               # Delete (will not include enlistment root)
+               scalar delete ${enlistment_root} &&
+               test_path_is_missing $enlistment_root &&
+               scalar list >out &&
+               ! grep "^$(pwd)/${enlistment_root}\$" out &&
+
+               # Make sure we did not accidentally delete the trash dir
+               test_path_is_dir "$TRASH_DIRECTORY" || return 1
+       done
+'
+
+test_expect_success 'scalar repo search respects GIT_CEILING_DIRECTORIES' '
+       test_when_finished rm -rf test &&
+
+       git init test/src &&
+       mkdir -p test/src/deep &&
+       GIT_CEILING_DIRECTORIES="$(pwd)/test/src" &&
+       ! scalar register test/src/deep 2>err &&
+       grep "not a git repository" err
+'
+
+test_expect_success 'scalar enlistments need a worktree' '
+       test_when_finished rm -rf bare test &&
+
+       git init --bare bare/src &&
+       ! scalar register bare/src 2>err &&
+       grep "Scalar enlistments require a worktree" err &&
+
+       git init test/src &&
+       ! scalar register test/src/.git 2>err &&
+       grep "Scalar enlistments require a worktree" err
+'
+
+test_expect_success FSMONITOR_DAEMON 'scalar register starts fsmon daemon' '
+       git init test/src &&
+       test_must_fail git -C test/src fsmonitor--daemon status &&
+       scalar register test/src &&
+       git -C test/src fsmonitor--daemon status &&
+       test_cmp_config -C test/src true core.fsmonitor
+'
+
 test_expect_success 'scalar unregister' '
        git init vanish/src &&
        scalar register vanish/src &&
@@ -109,14 +202,14 @@ test_expect_success UNZIP 'scalar diagnose' '
        sed -n "s/.*$SQ\\(.*\\.zip\\)$SQ.*/\\1/p" <err >zip_path &&
        zip_path=$(cat zip_path) &&
        test -n "$zip_path" &&
-       unzip -v "$zip_path" &&
+       "$GIT_UNZIP" -v "$zip_path" &&
        folder=${zip_path%.zip} &&
        test_path_is_missing "$folder" &&
-       unzip -p "$zip_path" diagnostics.log >out &&
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
        test_file_not_empty out &&
-       unzip -p "$zip_path" packs-local.txt >out &&
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
        grep "$(pwd)/.git/objects" out &&
-       unzip -p "$zip_path" objects-local.txt >out &&
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
        grep "^Total: [1-9]" out
 '
 
index 1af1d9653e94b39623f8435957c6e7537c41c60a..7562a395c2456bf4588a26e6fa847cced5bb71c2 100755 (executable)
@@ -50,6 +50,14 @@ m,message=    use the given message as the commit message for the merge commit
 
 indent=0
 
+# Usage: say [MSG...]
+say () {
+       if test -z "$arg_quiet"
+       then
+               printf '%s\n' "$*"
+       fi
+}
+
 # Usage: debug [MSG...]
 debug () {
        if test -n "$arg_debug"
@@ -60,7 +68,7 @@ debug () {
 
 # Usage: progress [MSG...]
 progress () {
-       if test -z "$GIT_QUIET"
+       if test -z "$arg_quiet"
        then
                if test -z "$arg_debug"
                then
@@ -146,6 +154,7 @@ main () {
        eval "$set_args"
 
        # Begin "real" flag parsing.
+       arg_quiet=
        arg_debug=
        arg_prefix=
        arg_split_branch=
@@ -161,7 +170,7 @@ main () {
 
                case "$opt" in
                -q)
-                       GIT_QUIET=1
+                       arg_quiet=1
                        ;;
                -d)
                        arg_debug=1
@@ -252,7 +261,7 @@ main () {
        dir="$(dirname "$arg_prefix/.")"
 
        debug "command: {$arg_command}"
-       debug "quiet: {$GIT_QUIET}"
+       debug "quiet: {$arg_quiet}"
        debug "dir: {$dir}"
        debug "opts: {$*}"
        debug
index 276898eb6bd720ac63a87a0f3d7a2bebb542cb37..3d278bb0edbc3696ad99f8c6e89ede6c8141ef04 100644 (file)
@@ -47,7 +47,7 @@ pre-clean:
        $(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)'
 
 clean-except-prove-cache:
-       $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)'
+       $(RM) -r 'trash directory'.*
        $(RM) -r valgrind/bin
 
 clean: clean-except-prove-cache
index 4d153729da0f185207b85d103905b30e3183796b..95e6a5244fc26c029abff85bb37a4e9fae71acba 100644 (file)
--- a/convert.c
+++ b/convert.c
@@ -619,7 +619,7 @@ struct filter_params {
        const char *path;
 };
 
-static int filter_buffer_or_fd(int in, int out, void *data)
+static int filter_buffer_or_fd(int in UNUSED, int out, void *data)
 {
        /*
         * Spawn cmd and feed the buffer contents through its stdin.
@@ -1008,7 +1008,7 @@ static int apply_filter(const char *path, const char *src, size_t len,
        return 0;
 }
 
-static int read_convert_config(const char *var, const char *value, void *cb)
+static int read_convert_config(const char *var, const char *value, void *cb UNUSED)
 {
        const char *key, *name;
        size_t namelen;
index aa98b2e54146f7de1f349f16a9a01e754a3a0b73..26f9e99e1a978921d9ec19725c09a88247548d95 100644 (file)
@@ -316,7 +316,7 @@ static regex_t *island_regexes;
 static unsigned int island_regexes_alloc, island_regexes_nr;
 static const char *core_island_name;
 
-static int island_config_callback(const char *k, const char *v, void *cb)
+static int island_config_callback(const char *k, const char *v, void *cb UNUSED)
 {
        if (!strcmp(k, "pack.island")) {
                struct strbuf re = STRBUF_INIT;
@@ -365,7 +365,7 @@ static void add_ref_to_island(const char *island_name, const struct object_id *o
 }
 
 static int find_island_for_ref(const char *refname, const struct object_id *oid,
-                              int flags, void *data)
+                              int flags UNUSED, void *data UNUSED)
 {
        /*
         * We should advertise 'ARRAY_SIZE(matches) - 2' as the max,
diff --git a/diagnose.c b/diagnose.c
new file mode 100644 (file)
index 0000000..beb0a87
--- /dev/null
@@ -0,0 +1,269 @@
+#include "cache.h"
+#include "diagnose.h"
+#include "compat/disk.h"
+#include "archive.h"
+#include "dir.h"
+#include "help.h"
+#include "strvec.h"
+#include "object-store.h"
+#include "packfile.h"
+
+struct archive_dir {
+       const char *path;
+       int recursive;
+};
+
+struct diagnose_option {
+       enum diagnose_mode mode;
+       const char *option_name;
+};
+
+static struct diagnose_option diagnose_options[] = {
+       { DIAGNOSE_STATS, "stats" },
+       { DIAGNOSE_ALL, "all" },
+};
+
+int option_parse_diagnose(const struct option *opt, const char *arg, int unset)
+{
+       int i;
+       enum diagnose_mode *diagnose = opt->value;
+
+       if (!arg) {
+               *diagnose = unset ? DIAGNOSE_NONE : DIAGNOSE_STATS;
+               return 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(diagnose_options); i++) {
+               if (!strcmp(arg, diagnose_options[i].option_name)) {
+                       *diagnose = diagnose_options[i].mode;
+                       return 0;
+               }
+       }
+
+       return error(_("invalid --%s value '%s'"), opt->long_name, arg);
+}
+
+static void dir_file_stats_objects(const char *full_path, size_t full_path_len,
+                                  const char *file_name, void *data)
+{
+       struct strbuf *buf = data;
+       struct stat st;
+
+       if (!stat(full_path, &st))
+               strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name,
+                           (uintmax_t)st.st_size);
+}
+
+static int dir_file_stats(struct object_directory *object_dir, void *data)
+{
+       struct strbuf *buf = data;
+
+       strbuf_addf(buf, "Contents of %s:\n", object_dir->path);
+
+       for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects,
+                                 data);
+
+       return 0;
+}
+
+static int count_files(char *path)
+{
+       DIR *dir = opendir(path);
+       struct dirent *e;
+       int count = 0;
+
+       if (!dir)
+               return 0;
+
+       while ((e = readdir(dir)) != NULL)
+               if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG)
+                       count++;
+
+       closedir(dir);
+       return count;
+}
+
+static void loose_objs_stats(struct strbuf *buf, const char *path)
+{
+       DIR *dir = opendir(path);
+       struct dirent *e;
+       int count;
+       int total = 0;
+       unsigned char c;
+       struct strbuf count_path = STRBUF_INIT;
+       size_t base_path_len;
+
+       if (!dir)
+               return;
+
+       strbuf_addstr(buf, "Object directory stats for ");
+       strbuf_add_absolute_path(buf, path);
+       strbuf_addstr(buf, ":\n");
+
+       strbuf_add_absolute_path(&count_path, path);
+       strbuf_addch(&count_path, '/');
+       base_path_len = count_path.len;
+
+       while ((e = readdir(dir)) != NULL)
+               if (!is_dot_or_dotdot(e->d_name) &&
+                   e->d_type == DT_DIR && strlen(e->d_name) == 2 &&
+                   !hex_to_bytes(&c, e->d_name, 1)) {
+                       strbuf_setlen(&count_path, base_path_len);
+                       strbuf_addstr(&count_path, e->d_name);
+                       total += (count = count_files(count_path.buf));
+                       strbuf_addf(buf, "%s : %7d files\n", e->d_name, count);
+               }
+
+       strbuf_addf(buf, "Total: %d loose objects", total);
+
+       strbuf_release(&count_path);
+       closedir(dir);
+}
+
+static int add_directory_to_archiver(struct strvec *archiver_args,
+                                    const char *path, int recurse)
+{
+       int at_root = !*path;
+       DIR *dir;
+       struct dirent *e;
+       struct strbuf buf = STRBUF_INIT;
+       size_t len;
+       int res = 0;
+
+       dir = opendir(at_root ? "." : path);
+       if (!dir) {
+               if (errno == ENOENT) {
+                       warning(_("could not archive missing directory '%s'"), path);
+                       return 0;
+               }
+               return error_errno(_("could not open directory '%s'"), path);
+       }
+
+       if (!at_root)
+               strbuf_addf(&buf, "%s/", path);
+       len = buf.len;
+       strvec_pushf(archiver_args, "--prefix=%s", buf.buf);
+
+       while (!res && (e = readdir(dir))) {
+               if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+                       continue;
+
+               strbuf_setlen(&buf, len);
+               strbuf_addstr(&buf, e->d_name);
+
+               if (e->d_type == DT_REG)
+                       strvec_pushf(archiver_args, "--add-file=%s", buf.buf);
+               else if (e->d_type != DT_DIR)
+                       warning(_("skipping '%s', which is neither file nor "
+                                 "directory"), buf.buf);
+               else if (recurse &&
+                        add_directory_to_archiver(archiver_args,
+                                                  buf.buf, recurse) < 0)
+                       res = -1;
+       }
+
+       closedir(dir);
+       strbuf_release(&buf);
+       return res;
+}
+
+int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
+{
+       struct strvec archiver_args = STRVEC_INIT;
+       char **argv_copy = NULL;
+       int stdout_fd = -1, archiver_fd = -1;
+       struct strbuf buf = STRBUF_INIT;
+       int res, i;
+       struct archive_dir archive_dirs[] = {
+               { ".git", 0 },
+               { ".git/hooks", 0 },
+               { ".git/info", 0 },
+               { ".git/logs", 1 },
+               { ".git/objects/info", 0 }
+       };
+
+       if (mode == DIAGNOSE_NONE) {
+               res = 0;
+               goto diagnose_cleanup;
+       }
+
+       stdout_fd = dup(STDOUT_FILENO);
+       if (stdout_fd < 0) {
+               res = error_errno(_("could not duplicate stdout"));
+               goto diagnose_cleanup;
+       }
+
+       archiver_fd = xopen(zip_path->buf, O_CREAT | O_WRONLY | O_TRUNC, 0666);
+       if (dup2(archiver_fd, STDOUT_FILENO) < 0) {
+               res = error_errno(_("could not redirect output"));
+               goto diagnose_cleanup;
+       }
+
+       init_zip_archiver();
+       strvec_pushl(&archiver_args, "git-diagnose", "--format=zip", NULL);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "Collecting diagnostic info\n\n");
+       get_version_info(&buf, 1);
+
+       strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
+       get_disk_info(&buf);
+       write_or_die(stdout_fd, buf.buf, buf.len);
+       strvec_pushf(&archiver_args,
+                    "--add-virtual-file=diagnostics.log:%.*s",
+                    (int)buf.len, buf.buf);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:");
+       dir_file_stats(the_repository->objects->odb, &buf);
+       foreach_alt_odb(dir_file_stats, &buf);
+       strvec_push(&archiver_args, buf.buf);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:");
+       loose_objs_stats(&buf, ".git/objects");
+       strvec_push(&archiver_args, buf.buf);
+
+       /* Only include this if explicitly requested */
+       if (mode == DIAGNOSE_ALL) {
+               for (i = 0; i < ARRAY_SIZE(archive_dirs); i++) {
+                       if (add_directory_to_archiver(&archiver_args,
+                                                     archive_dirs[i].path,
+                                                     archive_dirs[i].recursive)) {
+                               res = error_errno(_("could not add directory '%s' to archiver"),
+                                                 archive_dirs[i].path);
+                               goto diagnose_cleanup;
+                       }
+               }
+       }
+
+       strvec_pushl(&archiver_args, "--prefix=",
+                    oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
+
+       /* `write_archive()` modifies the `argv` passed to it. Let it. */
+       argv_copy = xmemdupz(archiver_args.v,
+                            sizeof(char *) * archiver_args.nr);
+       res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL,
+                           the_repository, NULL, 0);
+       if (res) {
+               error(_("failed to write archive"));
+               goto diagnose_cleanup;
+       }
+
+       fprintf(stderr, "\n"
+               "Diagnostics complete.\n"
+               "All of the gathered info is captured in '%s'\n",
+               zip_path->buf);
+
+diagnose_cleanup:
+       if (archiver_fd >= 0) {
+               dup2(stdout_fd, STDOUT_FILENO);
+               close(stdout_fd);
+               close(archiver_fd);
+       }
+       free(argv_copy);
+       strvec_clear(&archiver_args);
+       strbuf_release(&buf);
+
+       return res;
+}
diff --git a/diagnose.h b/diagnose.h
new file mode 100644 (file)
index 0000000..7a4951a
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef DIAGNOSE_H
+#define DIAGNOSE_H
+
+#include "strbuf.h"
+#include "parse-options.h"
+
+enum diagnose_mode {
+       DIAGNOSE_NONE,
+       DIAGNOSE_STATS,
+       DIAGNOSE_ALL
+};
+
+int option_parse_diagnose(const struct option *opt, const char *arg, int unset);
+
+int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode);
+
+#endif /* DIAGNOSE_H */
index 9a8b09346bd3da77daa1cd755486d68ae37e9a39..18edbdf4b59ec020cc58e3d16d01d92edfdc302e 100644 (file)
@@ -243,7 +243,9 @@ int diff_no_index(struct rev_info *revs,
                  int argc, const char **argv)
 {
        int i, no_index;
+       int ret = 1;
        const char *paths[2];
+       char *to_free[ARRAY_SIZE(paths)] = { 0 };
        struct strbuf replacement = STRBUF_INIT;
        const char *prefix = revs->prefix;
        struct option no_index_options[] = {
@@ -265,7 +267,7 @@ int diff_no_index(struct rev_info *revs,
        }
        FREE_AND_NULL(options);
        for (i = 0; i < 2; i++) {
-               const char *p = argv[argc - 2 + i];
+               const char *p = argv[i];
                if (!strcmp(p, "-"))
                        /*
                         * stdin should be spelled as "-"; if you have
@@ -273,7 +275,7 @@ int diff_no_index(struct rev_info *revs,
                         */
                        p = file_from_standard_input;
                else if (prefix)
-                       p = prefix_filename(prefix, p);
+                       p = to_free[i] = prefix_filename(prefix, p);
                paths[i] = p;
        }
 
@@ -295,16 +297,20 @@ int diff_no_index(struct rev_info *revs,
        revs->diffopt.flags.exit_with_status = 1;
 
        if (queue_diff(&revs->diffopt, paths[0], paths[1]))
-               return 1;
+               goto out;
        diff_set_mnemonic_prefix(&revs->diffopt, "1/", "2/");
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
 
-       strbuf_release(&replacement);
-
        /*
         * The return code for --no-index imitates diff(1):
         * 0 = no changes, 1 = changes, else error
         */
-       return diff_result_code(&revs->diffopt, 0);
+       ret = diff_result_code(&revs->diffopt, 0);
+
+out:
+       for (i = 0; i < ARRAY_SIZE(to_free); i++)
+               free(to_free[i]);
+       strbuf_release(&replacement);
+       return ret;
 }
diff --git a/diff.c b/diff.c
index e71cf758861bd7596ce122611a4c92fe6b27d8c5..648f6717a5597c30c423256a25e0fece08cd30de 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -264,7 +264,8 @@ void init_diff_ui_defaults(void)
        diff_detect_rename_default = DIFF_DETECT_RENAME;
 }
 
-int git_diff_heuristic_config(const char *var, const char *value, void *cb)
+int git_diff_heuristic_config(const char *var, const char *value,
+                             void *cb UNUSED)
 {
        if (!strcmp(var, "diff.indentheuristic"))
                diff_indent_heuristic = git_config_bool(var, value);
@@ -916,7 +917,7 @@ struct interned_diff_symbol {
 static int interned_diff_symbol_cmp(const void *hashmap_cmp_fn_data,
                                    const struct hashmap_entry *eptr,
                                    const struct hashmap_entry *entry_or_key,
-                                   const void *keydata)
+                                   const void *keydata UNUSED)
 {
        const struct diff_options *diffopt = hashmap_cmp_fn_data;
        const struct emitted_diff_symbol *a, *b;
@@ -1289,7 +1290,6 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
 {
        static const char *nneof = " No newline at end of file\n";
        const char *context, *reset, *set, *set_sign, *meta, *fraginfo;
-       struct strbuf sb = STRBUF_INIT;
 
        enum diff_symbol s = eds->s;
        const char *line = eds->line;
@@ -1521,7 +1521,6 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
        default:
                BUG("unknown diff symbol");
        }
-       strbuf_release(&sb);
 }
 
 static void emit_diff_symbol(struct diff_options *o, enum diff_symbol s,
@@ -3362,23 +3361,23 @@ struct userdiff_driver *get_textconv(struct repository *r,
        return userdiff_get_textconv(r, one->driver);
 }
 
-static struct strbuf *additional_headers(struct diff_options *o,
-                                        const char *path)
+static struct string_list *additional_headers(struct diff_options *o,
+                                             const char *path)
 {
        if (!o->additional_path_headers)
                return NULL;
        return strmap_get(o->additional_path_headers, path);
 }
 
-static void add_formatted_headers(struct strbuf *msg,
-                                 struct strbuf *more_headers,
+static void add_formatted_header(struct strbuf *msg,
+                                 const char *header,
                                  const char *line_prefix,
                                  const char *meta,
                                  const char *reset)
 {
-       char *next, *newline;
+       const char *next, *newline;
 
-       for (next = more_headers->buf; *next; next = newline) {
+       for (next = header; *next; next = newline) {
                newline = strchrnul(next, '\n');
                strbuf_addf(msg, "%s%s%.*s%s\n", line_prefix, meta,
                            (int)(newline - next), next, reset);
@@ -3387,6 +3386,34 @@ static void add_formatted_headers(struct strbuf *msg,
        }
 }
 
+static void add_formatted_headers(struct strbuf *msg,
+                                 struct string_list *more_headers,
+                                 const char *line_prefix,
+                                 const char *meta,
+                                 const char *reset)
+{
+       int i;
+
+       for (i = 0; i < more_headers->nr; i++)
+               add_formatted_header(msg, more_headers->items[i].string,
+                                    line_prefix, meta, reset);
+}
+
+static int diff_filepair_is_phoney(struct diff_filespec *one,
+                                  struct diff_filespec *two)
+{
+       /*
+        * This function specifically looks for pairs injected by
+        * create_filepairs_for_header_only_notifications().  Such
+        * pairs are "phoney" in that they do not represent any
+        * content or even mode difference, but were inserted because
+        * diff_queued_diff previously had no pair associated with
+        * that path but we needed some pair to avoid losing the
+        * "remerge CONFLICT" header associated with the path.
+        */
+       return !DIFF_FILE_VALID(one) && !DIFF_FILE_VALID(two);
+}
+
 static void builtin_diff(const char *name_a,
                         const char *name_b,
                         struct diff_filespec *one,
@@ -3418,14 +3445,16 @@ static void builtin_diff(const char *name_a,
 
        if (o->submodule_format == DIFF_SUBMODULE_LOG &&
            (!one->mode || S_ISGITLINK(one->mode)) &&
-           (!two->mode || S_ISGITLINK(two->mode))) {
+           (!two->mode || S_ISGITLINK(two->mode)) &&
+           (!diff_filepair_is_phoney(one, two))) {
                show_submodule_diff_summary(o, one->path ? one->path : two->path,
                                &one->oid, &two->oid,
                                two->dirty_submodule);
                return;
        } else if (o->submodule_format == DIFF_SUBMODULE_INLINE_DIFF &&
                   (!one->mode || S_ISGITLINK(one->mode)) &&
-                  (!two->mode || S_ISGITLINK(two->mode))) {
+                  (!two->mode || S_ISGITLINK(two->mode)) &&
+                  (!diff_filepair_is_phoney(one, two))) {
                show_submodule_inline_diff(o, one->path ? one->path : two->path,
                                &one->oid, &two->oid,
                                two->dirty_submodule);
@@ -3445,12 +3474,12 @@ static void builtin_diff(const char *name_a,
        b_two = quote_two(b_prefix, name_b + (*name_b == '/'));
        lbl[0] = DIFF_FILE_VALID(one) ? a_one : "/dev/null";
        lbl[1] = DIFF_FILE_VALID(two) ? b_two : "/dev/null";
-       if (!DIFF_FILE_VALID(one) && !DIFF_FILE_VALID(two)) {
+       if (diff_filepair_is_phoney(one, two)) {
                /*
-                * We should only reach this point for pairs from
+                * We should only reach this point for pairs generated from
                 * create_filepairs_for_header_only_notifications().  For
-                * these, we should avoid the "/dev/null" special casing
-                * above, meaning we avoid showing such pairs as either
+                * these, we want to avoid the "/dev/null" special casing
+                * above, because we do not want such pairs shown as either
                 * "new file" or "deleted file" below.
                 */
                lbl[0] = a_one;
@@ -4314,7 +4343,7 @@ static void fill_metainfo(struct strbuf *msg,
        const char *set = diff_get_color(use_color, DIFF_METAINFO);
        const char *reset = diff_get_color(use_color, DIFF_RESET);
        const char *line_prefix = diff_line_prefix(o);
-       struct strbuf *more_headers = NULL;
+       struct string_list *more_headers = NULL;
 
        *must_show_header = 1;
        strbuf_init(msg, PATH_MAX * 2 + 300);
@@ -5650,7 +5679,7 @@ int diff_opt_parse(struct diff_options *options,
 
        ac = parse_options(ac, av, prefix, options->parseopts, NULL,
                           PARSE_OPT_KEEP_DASHDASH |
-                          PARSE_OPT_KEEP_UNKNOWN |
+                          PARSE_OPT_KEEP_UNKNOWN_OPT |
                           PARSE_OPT_NO_INTERNAL_HELP |
                           PARSE_OPT_ONE_SHOT |
                           PARSE_OPT_STOP_AT_NON_OPTION);
@@ -5841,6 +5870,7 @@ static void diff_flush_patch(struct diff_filepair *p, struct diff_options *o)
 {
        int include_conflict_headers =
            (additional_headers(o, p->one->path) &&
+            !o->pickaxe_opts &&
             (!o->filter || filter_bit_tst(DIFF_STATUS_UNMERGED, o)));
 
        /*
@@ -5896,6 +5926,8 @@ int diff_queue_is_empty(struct diff_options *o)
        int i;
        int include_conflict_headers =
            (o->additional_path_headers &&
+            strmap_get_size(o->additional_path_headers) &&
+            !o->pickaxe_opts &&
             (!o->filter || filter_bit_tst(DIFF_STATUS_UNMERGED, o)));
 
        if (include_conflict_headers)
diff --git a/dir.c b/dir.c
index d7cfb08e441825f421f91a982063e00be4168c71..75429508200a9d8b591d23586ecab69fe6011a0d 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -655,10 +655,10 @@ void parse_path_pattern(const char **pattern,
        *patternlen = len;
 }
 
-int pl_hashmap_cmp(const void *unused_cmp_data,
+int pl_hashmap_cmp(const void *cmp_data UNUSED,
                   const struct hashmap_entry *a,
                   const struct hashmap_entry *b,
-                  const void *key)
+                  const void *key UNUSED)
 {
        const struct pattern_entry *ee1 =
                        container_of(a, struct pattern_entry, ent);
@@ -1244,8 +1244,7 @@ int match_basename(const char *basename, int basenamelen,
 
 int match_pathname(const char *pathname, int pathlen,
                   const char *base, int baselen,
-                  const char *pattern, int prefix, int patternlen,
-                  unsigned flags)
+                  const char *pattern, int prefix, int patternlen)
 {
        const char *name;
        int namelen;
@@ -1347,8 +1346,7 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname
                if (match_pathname(pathname, pathlen,
                                   pattern->base,
                                   pattern->baselen ? pattern->baselen - 1 : 0,
-                                  exclude, prefix, pattern->patternlen,
-                                  pattern->flags)) {
+                                  exclude, prefix, pattern->patternlen)) {
                        res = pattern;
                        break;
                }
diff --git a/dir.h b/dir.h
index 7bc862030cfb3bbc97cd3b9f6523696662e12633..674747d93af7ad903b1e78a1418ff07c914a69ec 100644 (file)
--- a/dir.h
+++ b/dir.h
@@ -414,7 +414,7 @@ int match_basename(const char *, int,
                   const char *, int, int, unsigned);
 int match_pathname(const char *, int,
                   const char *, int,
-                  const char *, int, int, unsigned);
+                  const char *, int, int);
 
 struct path_pattern *last_matching_pattern(struct dir_struct *dir,
                                           struct index_state *istate,
index b3296ce7d15140bff12299b25d1450f69f8508ee..18d042b467d26a9cb3b228db1ef5afa2d903e72c 100644 (file)
@@ -56,7 +56,6 @@ const char *askpass_program;
 const char *excludes_file;
 enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
 int read_replace_refs = 1;
-char *git_replace_ref_base;
 enum eol core_eol = EOL_UNSET;
 int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
 char *check_roundtrip_encoding = "SHIFT-JIS";
@@ -162,6 +161,7 @@ const char *getenv_safe(struct strvec *argv, const char *name)
 
 void setup_git_env(const char *git_dir)
 {
+       char *git_replace_ref_base;
        const char *shallow_file;
        const char *replace_ref_base;
        struct set_gitdir_args args = { NULL };
@@ -182,9 +182,10 @@ void setup_git_env(const char *git_dir)
        if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT))
                read_replace_refs = 0;
        replace_ref_base = getenv(GIT_REPLACE_REF_BASE_ENVIRONMENT);
-       free(git_replace_ref_base);
        git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
                                                          : "refs/replace/");
+       update_ref_namespace(NAMESPACE_REPLACE, git_replace_ref_base);
+
        free(git_namespace);
        git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
        shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
@@ -333,10 +334,10 @@ static void set_git_dir_1(const char *path)
        setup_git_env(path);
 }
 
-static void update_relative_gitdir(const char *name,
+static void update_relative_gitdir(const char *name UNUSED,
                                   const char *old_cwd,
                                   const char *new_cwd,
-                                  void *data)
+                                  void *data UNUSED)
 {
        char *path = reparent_relative_path(old_cwd, new_cwd, get_git_dir());
        struct tmp_objdir *tmp_objdir = tmp_objdir_unapply_primary_odb();
index cb6647d65703aca1ec1ec7014ac433de31852fcc..998fc2fa1ed4abd32ad0e723115f568768134d3e 100644 (file)
@@ -26,6 +26,7 @@
 #include "commit-reach.h"
 #include "commit-graph.h"
 #include "sigchain.h"
+#include "mergesort.h"
 
 static int transfer_unpack_limit = -1;
 static int fetch_unpack_limit = -1;
@@ -175,8 +176,10 @@ static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
        return 0;
 }
 
-static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
-                                  int flag, void *cb_data)
+static int rev_list_insert_ref_oid(const char *refname UNUSED,
+                                  const struct object_id *oid,
+                                  int flag UNUSED,
+                                  void *cb_data)
 {
        return rev_list_insert_ref(cb_data, oid);
 }
@@ -292,6 +295,29 @@ static void mark_tips(struct fetch_negotiator *negotiator,
        return;
 }
 
+static void send_filter(struct fetch_pack_args *args,
+                       struct strbuf *req_buf,
+                       int server_supports_filter)
+{
+       if (args->filter_options.choice) {
+               const char *spec =
+                       expand_list_objects_filter_spec(&args->filter_options);
+               if (server_supports_filter) {
+                       print_verbose(args, _("Server supports filter"));
+                       packet_buf_write(req_buf, "filter %s", spec);
+                       trace2_data_string("fetch", the_repository,
+                                          "filter/effective", spec);
+               } else {
+                       warning("filtering not recognized by server, ignoring");
+                       trace2_data_string("fetch", the_repository,
+                                          "filter/unsupported", spec);
+               }
+       } else {
+               trace2_data_string("fetch", the_repository,
+                                  "filter/none", "");
+       }
+}
+
 static int find_common(struct fetch_negotiator *negotiator,
                       struct fetch_pack_args *args,
                       int fd[2], struct object_id *result_oid,
@@ -299,6 +325,7 @@ static int find_common(struct fetch_negotiator *negotiator,
 {
        int fetching;
        int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
+       int negotiation_round = 0, haves = 0;
        const struct object_id *oid;
        unsigned in_vain = 0;
        int got_continue = 0;
@@ -389,11 +416,7 @@ static int find_common(struct fetch_negotiator *negotiator,
                        packet_buf_write(&req_buf, "deepen-not %s", s->string);
                }
        }
-       if (server_supports_filtering && args->filter_options.choice) {
-               const char *spec =
-                       expand_list_objects_filter_spec(&args->filter_options);
-               packet_buf_write(&req_buf, "filter %s", spec);
-       }
+       send_filter(args, &req_buf, server_supports_filtering);
        packet_buf_flush(&req_buf);
        state_len = req_buf.len;
 
@@ -441,9 +464,19 @@ static int find_common(struct fetch_negotiator *negotiator,
                packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
                print_verbose(args, "have %s", oid_to_hex(oid));
                in_vain++;
+               haves++;
                if (flush_at <= ++count) {
                        int ack;
 
+                       negotiation_round++;
+                       trace2_region_enter_printf("negotiation_v0_v1", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
+                       trace2_data_intmax("negotiation_v0_v1", the_repository,
+                                          "haves_added", haves);
+                       trace2_data_intmax("negotiation_v0_v1", the_repository,
+                                          "in_vain", in_vain);
+                       haves = 0;
                        packet_buf_flush(&req_buf);
                        send_request(args, fd[1], &req_buf);
                        strbuf_setlen(&req_buf, state_len);
@@ -465,6 +498,9 @@ static int find_common(struct fetch_negotiator *negotiator,
                                                      ack, oid_to_hex(result_oid));
                                switch (ack) {
                                case ACK:
+                                       trace2_region_leave_printf("negotiation_v0_v1", "round",
+                                                                  the_repository, "%d",
+                                                                  negotiation_round);
                                        flushes = 0;
                                        multi_ack = 0;
                                        retval = 0;
@@ -490,6 +526,7 @@ static int find_common(struct fetch_negotiator *negotiator,
                                                const char *hex = oid_to_hex(result_oid);
                                                packet_buf_write(&req_buf, "have %s\n", hex);
                                                state_len = req_buf.len;
+                                               haves++;
                                                /*
                                                 * Reset in_vain because an ack
                                                 * for this commit has not been
@@ -508,6 +545,9 @@ static int find_common(struct fetch_negotiator *negotiator,
                                }
                        } while (ack);
                        flushes--;
+                       trace2_region_leave_printf("negotiation_v0_v1", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (got_continue && MAX_IN_VAIN < in_vain) {
                                print_verbose(args, _("giving up"));
                                break; /* give up */
@@ -518,6 +558,8 @@ static int find_common(struct fetch_negotiator *negotiator,
        }
 done:
        trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository);
+       trace2_data_intmax("negotiation_v0_v1", the_repository, "total_rounds",
+                          negotiation_round);
        if (!got_ready || !no_done) {
                packet_buf_write(&req_buf, "done\n");
                send_request(args, fd[1], &req_buf);
@@ -560,8 +602,10 @@ static int mark_complete(const struct object_id *oid)
        return 0;
 }
 
-static int mark_complete_oid(const char *refname, const struct object_id *oid,
-                            int flag, void *cb_data)
+static int mark_complete_oid(const char *refname UNUSED,
+                            const struct object_id *oid,
+                            int flag UNUSED,
+                            void *cb_data UNUSED)
 {
        return mark_complete(oid);
 }
@@ -799,7 +843,7 @@ static int everything_local(struct fetch_pack_args *args,
        return retval;
 }
 
-static int sideband_demux(int in, int out, void *data)
+static int sideband_demux(int in UNUSED, int out, void *data)
 {
        int *xd = data;
        int ret;
@@ -1025,6 +1069,13 @@ static int get_pack(struct fetch_pack_args *args,
        return 0;
 }
 
+static int ref_compare_name(const struct ref *a, const struct ref *b)
+{
+       return strcmp(a->name, b->name);
+}
+
+DEFINE_LIST_SORT(static, sort_ref_list, struct ref, next);
+
 static int cmp_ref_by_name(const void *a_, const void *b_)
 {
        const struct ref *a = *((const struct ref **)a_);
@@ -1323,15 +1374,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
                die(_("Server does not support shallow requests"));
 
        /* Add filter */
-       if (server_supports_feature("fetch", "filter", 0) &&
-           args->filter_options.choice) {
-               const char *spec =
-                       expand_list_objects_filter_spec(&args->filter_options);
-               print_verbose(args, _("Server supports filter"));
-               packet_buf_write(&req_buf, "filter %s", spec);
-       } else if (args->filter_options.choice) {
-               warning("filtering not recognized by server, ignoring");
-       }
+       send_filter(args, &req_buf,
+                   server_supports_feature("fetch", "filter", 0));
 
        if (server_supports_feature("fetch", "packfile-uris", 0)) {
                int i;
@@ -1361,6 +1405,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
 
        haves_added = add_haves(negotiator, &req_buf, haves_to_send);
        *in_vain += haves_added;
+       trace2_data_intmax("negotiation_v2", the_repository, "haves_added", haves_added);
+       trace2_data_intmax("negotiation_v2", the_repository, "in_vain", *in_vain);
        if (!haves_added || (seen_ack && *in_vain >= MAX_IN_VAIN)) {
                /* Send Done */
                packet_buf_write(&req_buf, "done\n");
@@ -1603,6 +1649,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
        struct oidset common = OIDSET_INIT;
        struct packet_reader reader;
        int in_vain = 0, negotiation_started = 0;
+       int negotiation_round = 0;
        int haves_to_send = INITIAL_FLUSH;
        struct fetch_negotiator negotiator_alloc;
        struct fetch_negotiator *negotiator;
@@ -1659,12 +1706,20 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                                                    "negotiation_v2",
                                                    the_repository);
                        }
+                       negotiation_round++;
+                       trace2_region_enter_printf("negotiation_v2", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (send_fetch_request(negotiator, fd[1], args, ref,
                                               &common,
                                               &haves_to_send, &in_vain,
                                               reader.use_sideband,
-                                              seen_ack))
+                                              seen_ack)) {
+                               trace2_region_leave_printf("negotiation_v2", "round",
+                                                          the_repository, "%d",
+                                                          negotiation_round);
                                state = FETCH_GET_PACK;
+                       }
                        else
                                state = FETCH_PROCESS_ACKS;
                        break;
@@ -1677,6 +1732,9 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                                seen_ack = 1;
                                oidset_insert(&common, &common_oid);
                        }
+                       trace2_region_leave_printf("negotiation_v2", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (received_ready) {
                                /*
                                 * Don't check for response delimiter; get_pack() will
@@ -1692,6 +1750,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                        trace2_region_leave("fetch-pack",
                                            "negotiation_v2",
                                            the_repository);
+                       trace2_data_intmax("negotiation_v2", the_repository,
+                                          "total_rounds", negotiation_round);
                        /* Check for shallow-info section */
                        if (process_section_header(&reader, "shallow-info", 1))
                                receive_shallow_info(args, &reader, shallows, si);
@@ -2071,6 +2131,7 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
        int in_vain = 0;
        int seen_ack = 0;
        int last_iteration = 0;
+       int negotiation_round = 0;
        timestamp_t min_generation = GENERATION_NUMBER_INFINITY;
 
        fetch_negotiator_init(the_repository, &negotiator);
@@ -2084,11 +2145,17 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                           add_to_object_array,
                           &nt_object_array);
 
+       trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository);
        while (!last_iteration) {
                int haves_added;
                struct object_id common_oid;
                int received_ready = 0;
 
+               negotiation_round++;
+
+               trace2_region_enter_printf("negotiate_using_fetch", "round",
+                                          the_repository, "%d",
+                                          negotiation_round);
                strbuf_reset(&req_buf);
                write_fetch_command_and_capabilities(&req_buf, server_options);
 
@@ -2099,6 +2166,11 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                if (!haves_added || (seen_ack && in_vain >= MAX_IN_VAIN))
                        last_iteration = 1;
 
+               trace2_data_intmax("negotiate_using_fetch", the_repository,
+                                  "haves_added", haves_added);
+               trace2_data_intmax("negotiate_using_fetch", the_repository,
+                                  "in_vain", in_vain);
+
                /* Send request */
                packet_buf_flush(&req_buf);
                if (write_in_full(fd[1], req_buf.buf, req_buf.len) < 0)
@@ -2131,7 +2203,13 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                                                 REACH_SCRATCH, 0,
                                                 min_generation))
                        last_iteration = 1;
+               trace2_region_leave_printf("negotiation", "round",
+                                          the_repository, "%d",
+                                          negotiation_round);
        }
+       trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository);
+       trace2_data_intmax("negotiate_using_fetch", the_repository,
+                          "total_rounds", negotiation_round);
        clear_common_flag(acked_commits);
        strbuf_release(&req_buf);
 }
index e7cf6d5b0facb53d3e5f35d9b54ac34814d0491a..914026f5d80f876c8a7c28a914a58295a5a72346 100644 (file)
@@ -1,7 +1,7 @@
 #include "commit-graph.h"
 #include "repository.h"
 
-struct commit_graph *parse_commit_graph(struct repository *r,
+struct commit_graph *parse_commit_graph(struct repo_settings *s,
                                        void *graph_map, size_t graph_size);
 
 int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);
@@ -11,7 +11,15 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
        struct commit_graph *g;
 
        initialize_the_repository();
-       g = parse_commit_graph(the_repository, (void *)data, size);
+       /*
+        * Initialize the_repository with commit-graph settings that would
+        * normally be read from the repository's gitdir. We want to avoid
+        * touching the disk to keep the individual fuzz-test cases as fast as
+        * possible.
+        */
+       the_repository->settings.commit_graph_generation_version = 2;
+       the_repository->settings.commit_graph_read_changed_paths = 1;
+       g = parse_commit_graph(&the_repository->settings, (void *)data, size);
        repo_clear(the_repository);
        free_commit_graph(g);
 
index 36a25ae252f24d23852c5cdd810936df1c23558f..b90b64718eb610f9303fde76b0f207e22a4c9be6 100644 (file)
@@ -189,6 +189,13 @@ struct strbuf;
 #define _NETBSD_SOURCE 1
 #define _SGI_SOURCE 1
 
+#if defined(__GNUC__)
+#define UNUSED __attribute__((unused)) \
+       __attribute__((deprecated ("parameter declared as UNUSED")))
+#else
+#define UNUSED
+#endif
+
 #if defined(WIN32) && !defined(__CYGWIN__) /* Both MinGW and MSVC */
 # if !defined(_WIN32_WINNT)
 #  define _WIN32_WINNT 0x0600
@@ -261,6 +268,7 @@ static inline int is_xplatform_dir_sep(int c)
 #include <sys/resource.h>
 #include <sys/socket.h>
 #include <sys/ioctl.h>
+#include <sys/statvfs.h>
 #include <termios.h>
 #ifndef NO_SYS_SELECT_H
 #include <sys/select.h>
@@ -397,7 +405,9 @@ typedef uintmax_t timestamp_t;
 #endif
 
 #ifndef platform_core_config
-static inline int noop_core_config(const char *var, const char *value, void *cb)
+static inline int noop_core_config(const char *var UNUSED,
+                                  const char *value UNUSED,
+                                  void *cb UNUSED)
 {
        return 0;
 }
@@ -490,7 +500,8 @@ static inline void extract_id_from_env(const char *env, uid_t *id)
        }
 }
 
-static inline int is_path_owned_by_current_uid(const char *path, struct strbuf *report)
+static inline int is_path_owned_by_current_uid(const char *path,
+                                              struct strbuf *report UNUSED)
 {
        struct stat st;
        uid_t euid;
@@ -568,8 +579,11 @@ static inline int git_has_dir_sep(const char *path)
 /* The sentinel attribute is valid from gcc version 4.0 */
 #if defined(__GNUC__) && (__GNUC__ >= 4)
 #define LAST_ARG_MUST_BE_NULL __attribute__((sentinel))
+/* warn_unused_result exists as of gcc 3.4.0, but be lazy and check 4.0 */
+#define RESULT_MUST_BE_USED __attribute__ ((warn_unused_result))
 #else
 #define LAST_ARG_MUST_BE_NULL
+#define RESULT_MUST_BE_USED
 #endif
 
 #define MAYBE_UNUSED __attribute__((__unused__))
@@ -998,6 +1012,28 @@ static inline unsigned long cast_size_t_to_ulong(size_t a)
        return (unsigned long)a;
 }
 
+/*
+ * Limit size of IO chunks, because huge chunks only cause pain.  OS X
+ * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
+ * the absence of bugs, large chunks can result in bad latencies when
+ * you decide to kill the process.
+ *
+ * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX
+ * that is smaller than that, clip it to SSIZE_MAX, as a call to
+ * read(2) or write(2) larger than that is allowed to fail.  As the last
+ * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value"
+ * to override this, if the definition of SSIZE_MAX given by the platform
+ * is broken.
+ */
+#ifndef MAX_IO_SIZE
+# define MAX_IO_SIZE_DEFAULT (8*1024*1024)
+# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT)
+#  define MAX_IO_SIZE SSIZE_MAX
+# else
+#  define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT
+# endif
+#endif
+
 #ifdef HAVE_ALLOCA_H
 # include <alloca.h>
 # define xalloca(size)      (alloca(size))
index 4349566c89163ea21dfb8388f494cdcdf86604c9..c68f49454cd83f48ce3452df178766606effbad9 100755 (executable)
@@ -102,7 +102,7 @@ resolve_full_httpd () {
 
 start_httpd () {
        if test -f "$fqgitdir/pid"; then
-               say "Instance already running. Restarting..."
+               echo "Instance already running. Restarting..."
                stop_httpd
        fi
 
index 343fe7bccd0d64f0caff1ad5d3f981729a8e5fc9..77e93121bf8c19714b19097b4739b206230677cb 100755 (executable)
@@ -5,6 +5,16 @@
 #
 # Resolve two trees, using enhanced multi-base read-tree.
 
+. git-sh-setup
+
+# Abort if index does not match HEAD
+if ! git diff-index --quiet --cached HEAD --
+then
+    gettextln "Error: Your local changes to the following files would be overwritten by merge"
+    git diff-index --cached --name-only HEAD -- | sed -e 's/^/    /'
+    exit 2
+fi
+
 # The first parameters up to -- are merge bases; the rest are heads.
 bases= head= remotes= sep_seen=
 for arg
index 77dc19daa61de1e7eb850292f3fd6a963b580130..d26a980e5acb66eda31d32e075d90736bcac4e3a 100755 (executable)
--- a/git-p4.py
+++ b/git-p4.py
@@ -2259,7 +2259,7 @@ class P4Submit(Command, P4UserMap):
                                 raw=True):
                             if regexp.search(line):
                                 if verbose:
-                                    print("got keyword match on %s in %s in %s" % (regex.pattern, line, file))
+                                    print("got keyword match on %s in %s in %s" % (regexp.pattern, line, file))
                                 kwfiles[file] = regexp
                                 break
 
@@ -4402,19 +4402,16 @@ class P4Unshelve(Command):
     def renameBranch(self, branch_name):
         """Rename the existing branch to branch_name.N ."""
 
-        found = True
         for i in range(0, 1000):
             backup_branch_name = "{0}.{1}".format(branch_name, i)
             if not gitBranchExists(backup_branch_name):
                 # Copy ref to backup
                 gitUpdateRef(backup_branch_name, branch_name)
                 gitDeleteRef(branch_name)
-                found = True
                 print("renamed old unshelve branch to {0}".format(backup_branch_name))
                 break
-
-        if not found:
-            sys.exit("gave up trying to rename existing branch {0}".format(sync.branch))
+        else:
+            sys.exit("gave up trying to rename existing branch {0}".format(branch_name))
 
     def findLastP4Revision(self, starting_point):
         """Look back from starting_point for the first commit created by git-p4
index d92df37e9924719b2aa6d1c84a88d56811d2bd4f..ce273fe0e48d997cb067e77707026616a9302eb2 100644 (file)
@@ -57,15 +57,6 @@ die_with_status () {
        exit "$status"
 }
 
-GIT_QUIET=
-
-say () {
-       if test -z "$GIT_QUIET"
-       then
-               printf '%s\n' "$*"
-       fi
-}
-
 if test -n "$OPTIONS_SPEC"; then
        usage() {
                "$0" -h
@@ -285,13 +276,6 @@ get_author_ident_from_commit () {
        parse_ident_from_commit author AUTHOR
 }
 
-# Clear repo-local GIT_* environment variables. Useful when switching to
-# another repository (e.g. when entering a submodule). See also the env
-# list in git_connect()
-clear_local_git_env() {
-       unset $(git rev-parse --local-env-vars)
-}
-
 # Generate a virtual base file for a two-file merge. Uses git apply to
 # remove lines from $1 that are not in $2, leaving only common lines.
 create_virtual_base() {
index fd0b4a2c947762b023999191ff6f3b5c78cb58fc..5e5d21c010f7d4337dbf95cd50eeaf8c97791ba8 100755 (executable)
@@ -30,6 +30,7 @@ GIT_PROTOCOL_FROM_USER=0
 export GIT_PROTOCOL_FROM_USER
 
 command=
+quiet=
 branch=
 force=
 reference=
@@ -40,8 +41,9 @@ require_init=
 files=
 remote=
 nofetch=
-update=
-prefix=
+rebase=
+merge=
+checkout=
 custom_name=
 depth=
 progress=
@@ -56,17 +58,6 @@ isnumber()
        n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1"
 }
 
-# Sanitize the local git environment for use within a submodule. We
-# can't simply use clear_local_git_env since we want to preserve some
-# of the settings from GIT_CONFIG_PARAMETERS.
-sanitize_submodule_env()
-{
-       save_config=$GIT_CONFIG_PARAMETERS
-       clear_local_git_env
-       GIT_CONFIG_PARAMETERS=$save_config
-       export GIT_CONFIG_PARAMETERS
-}
-
 #
 # Add a new submodule to the working tree, .gitmodules and the index
 #
@@ -90,7 +81,7 @@ cmd_add()
                        force=$1
                        ;;
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --progress)
                        progress=1
@@ -138,7 +129,7 @@ cmd_add()
                usage
        fi
 
-       git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper add ${GIT_QUIET:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper add ${quiet:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@"
 }
 
 #
@@ -154,7 +145,7 @@ cmd_foreach()
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --recursive)
                        recursive=1
@@ -169,7 +160,7 @@ cmd_foreach()
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper foreach ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper foreach ${quiet:+--quiet} ${recursive:+--recursive} -- "$@"
 }
 
 #
@@ -184,7 +175,7 @@ cmd_init()
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --)
                        shift
@@ -200,7 +191,7 @@ cmd_init()
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper init ${GIT_QUIET:+--quiet} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper init ${quiet:+--quiet} -- "$@"
 }
 
 #
@@ -217,7 +208,7 @@ cmd_deinit()
                        force=$1
                        ;;
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --all)
                        deinit_all=t
@@ -236,7 +227,7 @@ cmd_deinit()
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${quiet:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@"
 }
 
 #
@@ -251,10 +242,7 @@ cmd_update()
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
-                       ;;
-               -v)
-                       unset GIT_QUIET
+                       quiet=1
                        ;;
                --progress)
                        progress=1
@@ -263,7 +251,6 @@ cmd_update()
                        init=1
                        ;;
                --require-init)
-                       init=1
                        require_init=1
                        ;;
                --remote)
@@ -276,7 +263,7 @@ cmd_update()
                        force=$1
                        ;;
                -r|--rebase)
-                       update="rebase"
+                       rebase=1
                        ;;
                --reference)
                        case "$2" in '') usage ;; esac
@@ -290,13 +277,13 @@ cmd_update()
                        dissociate=1
                        ;;
                -m|--merge)
-                       update="merge"
+                       merge=1
                        ;;
                --recursive)
                        recursive=1
                        ;;
                --checkout)
-                       update="checkout"
+                       checkout=1
                        ;;
                --recommend-shallow)
                        recommend_shallow="--recommend-shallow"
@@ -349,7 +336,7 @@ cmd_update()
        done
 
        git ${wt_prefix:+-C "$wt_prefix"} submodule--helper update \
-               ${GIT_QUIET:+--quiet} \
+               ${quiet:+--quiet} \
                ${force:+--force} \
                ${progress:+"--progress"} \
                ${remote:+--remote} \
@@ -357,8 +344,9 @@ cmd_update()
                ${init:+--init} \
                ${nofetch:+--no-fetch} \
                ${wt_prefix:+--prefix "$wt_prefix"} \
-               ${prefix:+--recursive-prefix "$prefix"} \
-               ${update:+--update "$update"} \
+               ${rebase:+--rebase} \
+               ${merge:+--merge} \
+               ${checkout:+--checkout} \
                ${reference:+"$reference"} \
                ${dissociate:+"--dissociate"} \
                ${depth:+"$depth"} \
@@ -409,7 +397,7 @@ cmd_set_branch() {
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-branch ${GIT_QUIET:+--quiet} ${branch:+--branch "$branch"} ${default:+--default} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-branch ${quiet:+--quiet} ${branch:+--branch "$branch"} ${default:+--default} -- "$@"
 }
 
 #
@@ -422,7 +410,7 @@ cmd_set_url() {
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --)
                        shift
@@ -438,7 +426,7 @@ cmd_set_url() {
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-url ${GIT_QUIET:+--quiet} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-url ${quiet:+--quiet} -- "$@"
 }
 
 #
@@ -459,7 +447,7 @@ cmd_summary() {
        do
                case "$1" in
                --cached)
-                       cached="$1"
+                       cached=1
                        ;;
                --files)
                        files="$1"
@@ -509,7 +497,7 @@ cmd_status()
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        ;;
                --cached)
                        cached=1
@@ -531,7 +519,7 @@ cmd_status()
                shift
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper status ${GIT_QUIET:+--quiet} ${cached:+--cached} ${recursive:+--recursive} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper status ${quiet:+--quiet} ${cached:+--cached} ${recursive:+--recursive} -- "$@"
 }
 #
 # Sync remote urls for submodules
@@ -544,7 +532,7 @@ cmd_sync()
        do
                case "$1" in
                -q|--quiet)
-                       GIT_QUIET=1
+                       quiet=1
                        shift
                        ;;
                --recursive)
@@ -564,12 +552,12 @@ cmd_sync()
                esac
        done
 
-       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@"
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper sync ${quiet:+--quiet} ${recursive:+--recursive} -- "$@"
 }
 
 cmd_absorbgitdirs()
 {
-       git submodule--helper absorb-git-dirs --prefix "$wt_prefix" "$@"
+       git submodule--helper absorbgitdirs --prefix "$wt_prefix" "$@"
 }
 
 # This loop parses the command line arguments to find the
@@ -585,18 +573,10 @@ do
                command=$1
                ;;
        -q|--quiet)
-               GIT_QUIET=1
-               ;;
-       -b|--branch)
-               case "$2" in
-               '')
-                       usage
-                       ;;
-               esac
-               branch="$2"; shift
+               quiet=1
                ;;
        --cached)
-               cached="$1"
+               cached=1
                ;;
        --)
                break
@@ -622,12 +602,6 @@ then
     fi
 fi
 
-# "-b branch" is accepted only by "add" and "set-branch"
-if test -n "$branch" && (test "$command" != add || test "$command" != set-branch)
-then
-       usage
-fi
-
 # "--cached" is accepted only by "status" and "summary"
 if test -n "$cached" && test "$command" != status && test "$command" != summary
 then
diff --git a/git.c b/git.c
index d7a7a82008b41e80c17176a9865ff49bf538031a..da411c53822a1893f7774ae721b5931452741bba 100644 (file)
--- a/git.c
+++ b/git.c
@@ -489,14 +489,14 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 static struct cmd_struct commands[] = {
        { "add", cmd_add, RUN_SETUP | NEED_WORK_TREE },
        { "am", cmd_am, RUN_SETUP | NEED_WORK_TREE },
-       { "annotate", cmd_annotate, RUN_SETUP | NO_PARSEOPT },
+       { "annotate", cmd_annotate, RUN_SETUP },
        { "apply", cmd_apply, RUN_SETUP_GENTLY },
        { "archive", cmd_archive, RUN_SETUP_GENTLY },
        { "bisect--helper", cmd_bisect__helper, RUN_SETUP },
        { "blame", cmd_blame, RUN_SETUP },
        { "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG },
        { "bugreport", cmd_bugreport, RUN_SETUP_GENTLY },
-       { "bundle", cmd_bundle, RUN_SETUP_GENTLY | NO_PARSEOPT },
+       { "bundle", cmd_bundle, RUN_SETUP_GENTLY },
        { "cat-file", cmd_cat_file, RUN_SETUP },
        { "check-attr", cmd_check_attr, RUN_SETUP },
        { "check-ignore", cmd_check_ignore, RUN_SETUP | NEED_WORK_TREE },
@@ -514,7 +514,7 @@ static struct cmd_struct commands[] = {
        { "column", cmd_column, RUN_SETUP_GENTLY },
        { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
        { "commit-graph", cmd_commit_graph, RUN_SETUP },
-       { "commit-tree", cmd_commit_tree, RUN_SETUP | NO_PARSEOPT },
+       { "commit-tree", cmd_commit_tree, RUN_SETUP },
        { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
        { "count-objects", cmd_count_objects, RUN_SETUP },
        { "credential", cmd_credential, RUN_SETUP_GENTLY | NO_PARSEOPT },
@@ -522,6 +522,7 @@ static struct cmd_struct commands[] = {
        { "credential-cache--daemon", cmd_credential_cache_daemon },
        { "credential-store", cmd_credential_store },
        { "describe", cmd_describe, RUN_SETUP },
+       { "diagnose", cmd_diagnose, RUN_SETUP_GENTLY },
        { "diff", cmd_diff, NO_PARSEOPT },
        { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
        { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
@@ -553,9 +554,9 @@ static struct cmd_struct commands[] = {
        { "ls-files", cmd_ls_files, RUN_SETUP },
        { "ls-remote", cmd_ls_remote, RUN_SETUP_GENTLY },
        { "ls-tree", cmd_ls_tree, RUN_SETUP },
-       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY | NO_PARSEOPT },
+       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY },
        { "mailsplit", cmd_mailsplit, NO_PARSEOPT },
-       { "maintenance", cmd_maintenance, RUN_SETUP | NO_PARSEOPT },
+       { "maintenance", cmd_maintenance, RUN_SETUP },
        { "merge", cmd_merge, RUN_SETUP | NEED_WORK_TREE },
        { "merge-base", cmd_merge_base, RUN_SETUP },
        { "merge-file", cmd_merge_file, RUN_SETUP_GENTLY },
@@ -565,8 +566,8 @@ static struct cmd_struct commands[] = {
        { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
        { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
        { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
-       { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
-       { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
+       { "merge-tree", cmd_merge_tree, RUN_SETUP },
+       { "mktag", cmd_mktag, RUN_SETUP },
        { "mktree", cmd_mktree, RUN_SETUP },
        { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP },
        { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
@@ -627,7 +628,7 @@ static struct cmd_struct commands[] = {
        { "verify-tag", cmd_verify_tag, RUN_SETUP },
        { "version", cmd_version },
        { "whatchanged", cmd_whatchanged, RUN_SETUP },
-       { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT },
+       { "worktree", cmd_worktree, RUN_SETUP },
        { "write-tree", cmd_write_tree, RUN_SETUP },
 };
 
index f13e23c4de41a602537043c0b21fab7ccb65df0b..3b68ab2d672c4afae62f8f4dc024e0911027b58d 100644 (file)
@@ -1,8 +1,8 @@
-# The default target of this Makefile is...
-all::
+ifndef MAK_DIR_GITWEB
+$(error do not run gitweb/Makefile stand-alone anymore. The "gitweb" and \
+"install-gitweb" targets now live in the top-level Makefile)
+endif
 
-# Define V=1 to have a more verbose compile.
-#
 # Define JSMIN to point to JavaScript minifier that functions as
 # a filter to have static/gitweb.js minified.
 #
@@ -10,13 +10,6 @@ all::
 # version of static/gitweb.css
 #
 
-prefix ?= $(HOME)
-bindir ?= $(prefix)/bin
-gitwebdir ?= /var/www/cgi-bin
-
-RM ?= rm -f
-INSTALL ?= install
-
 # default configuration for gitweb
 GITWEB_CONFIG = gitweb_config.perl
 GITWEB_CONFIG_SYSTEM = /etc/gitweb.conf
@@ -30,89 +23,45 @@ GITWEB_STRICT_EXPORT =
 GITWEB_BASE_URL =
 GITWEB_LIST =
 GITWEB_HOMETEXT = indextext.html
-GITWEB_CSS = static/gitweb.css
+GITWEB_CSS_IN = static/gitweb.css
+GITWEB_CSS = $(GITWEB_CSS_IN)
 GITWEB_LOGO = static/git-logo.png
 GITWEB_FAVICON = static/git-favicon.png
-GITWEB_JS = static/gitweb.js
+GITWEB_JS_IN = static/gitweb.js
+GITWEB_JS = $(GITWEB_JS_IN)
 GITWEB_SITE_HTML_HEAD_STRING =
 GITWEB_SITE_HEADER =
 GITWEB_SITE_FOOTER =
 HIGHLIGHT_BIN = highlight
 
-# include user config
--include ../config.mak.autogen
--include ../config.mak
--include config.mak
-
-# determine version
-../GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE
-       $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) GIT-VERSION-FILE
-
-ifneq ($(MAKECMDGOALS),clean)
--include ../GIT-VERSION-FILE
-endif
-
-### Build rules
-
-SHELL_PATH ?= $(SHELL)
-PERL_PATH  ?= /usr/bin/perl
+# What targets we'll add to 'all' for "make gitweb"
+GITWEB_ALL =
+GITWEB_ALL += gitweb.cgi
+GITWEB_ALL += $(GITWEB_JS)
 
-# Shell quote;
-bindir_SQ = $(subst ','\'',$(bindir))#'
-gitwebdir_SQ = $(subst ','\'',$(gitwebdir))#'
-gitwebstaticdir_SQ = $(subst ','\'',$(gitwebdir)/static)#'
-SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))#'
-PERL_PATH_SQ  = $(subst ','\'',$(PERL_PATH))#'
-DESTDIR_SQ    = $(subst ','\'',$(DESTDIR))#'
-
-# Quiet generation (unless V=1)
-QUIET_SUBDIR0  = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1  =
-
-ifneq ($(findstring $(MAKEFLAGS),w),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
-ifneq ($(findstring $(MAKEFLAGS),s),s)
-ifndef V
-       QUIET          = @
-       QUIET_GEN      = $(QUIET)echo '   ' GEN $@;
-       QUIET_SUBDIR0  = +@subdir=
-       QUIET_SUBDIR1  = ;$(NO_SUBDIR) echo '   ' SUBDIR $$subdir; \
-                        $(MAKE) $(PRINT_DIR) -C $$subdir
-       export V
-       export QUIET
-       export QUIET_GEN
-       export QUIET_SUBDIR0
-       export QUIET_SUBDIR1
-endif
-endif
-
-all:: gitweb.cgi static/gitweb.js
+MAK_DIR_GITWEB_ALL = $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_ALL))
 
 GITWEB_PROGRAMS = gitweb.cgi
 
+GITWEB_JS_MIN = static/gitweb.min.js
 ifdef JSMIN
-GITWEB_FILES += static/gitweb.min.js
-GITWEB_JS = static/gitweb.min.js
-all:: static/gitweb.min.js
-static/gitweb.min.js: static/gitweb.js GITWEB-BUILD-OPTIONS
+GITWEB_JS = $(GITWEB_JS_MIN)
+GITWEB_ALL += $(MAK_DIR_GITWEB)$(GITWEB_JS_MIN)
+$(MAK_DIR_GITWEB)$(GITWEB_JS_MIN): $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS
+$(MAK_DIR_GITWEB)$(GITWEB_JS_MIN): $(MAK_DIR_GITWEB)$(GITWEB_JS_IN)
        $(QUIET_GEN)$(JSMIN) <$< >$@
-else
-GITWEB_FILES += static/gitweb.js
 endif
+GITWEB_FILES += $(GITWEB_JS)
 
+GITWEB_CSS_MIN = static/gitweb.min.css
 ifdef CSSMIN
-GITWEB_FILES += static/gitweb.min.css
-GITWEB_CSS = static/gitweb.min.css
-all:: static/gitweb.min.css
-static/gitweb.min.css: static/gitweb.css GITWEB-BUILD-OPTIONS
+GITWEB_CSS = $(GITWEB_CSS_MIN)
+GITWEB_ALL += $(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN)
+$(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN): $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS
+$(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN): $(MAK_DIR_GITWEB)$(GITWEB_CSS_IN)
        $(QUIET_GEN)$(CSSMIN) <$< >$@
-else
-GITWEB_FILES += static/gitweb.css
 endif
+GITWEB_FILES += $(GITWEB_CSS)
 
 GITWEB_FILES += static/git-logo.png static/git-favicon.png
 
@@ -120,6 +69,7 @@ GITWEB_FILES += static/git-logo.png static/git-favicon.png
 #
 # js/lib/common-lib.js should be always first, then js/lib/*.js,
 # then the rest of files; js/gitweb.js should be last (if it exists)
+GITWEB_JSLIB_FILES =
 GITWEB_JSLIB_FILES += static/js/lib/common-lib.js
 GITWEB_JSLIB_FILES += static/js/lib/datetime.js
 GITWEB_JSLIB_FILES += static/js/lib/cookies.js
@@ -152,46 +102,45 @@ GITWEB_REPLACE = \
        -e 's|++GITWEB_SITE_FOOTER++|$(GITWEB_SITE_FOOTER)|g' \
        -e 's|++HIGHLIGHT_BIN++|$(HIGHLIGHT_BIN)|g'
 
-GITWEB-BUILD-OPTIONS: FORCE
+.PHONY: FORCE
+$(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS: FORCE
        @rm -f $@+
        @echo "x" '$(PERL_PATH_SQ)' $(GITWEB_REPLACE) "$(JSMIN)|$(CSSMIN)" >$@+
        @cmp -s $@+ $@ && rm -f $@+ || mv -f $@+ $@
 
-gitweb.cgi: gitweb.perl GITWEB-BUILD-OPTIONS
+$(MAK_DIR_GITWEB)gitweb.cgi: $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS
+$(MAK_DIR_GITWEB)gitweb.cgi: $(MAK_DIR_GITWEB)gitweb.perl
        $(QUIET_GEN)$(RM) $@ $@+ && \
        sed -e '1s|#!.*perl|#!$(PERL_PATH_SQ)|' \
                $(GITWEB_REPLACE) $< >$@+ && \
        chmod +x $@+ && \
        mv $@+ $@
 
-static/gitweb.js: $(GITWEB_JSLIB_FILES)
+$(MAK_DIR_GITWEB)static/gitweb.js: $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_JSLIB_FILES))
        $(QUIET_GEN)$(RM) $@ $@+ && \
        cat $^ >$@+ && \
        mv $@+ $@
 
-### Testing rules
-
-test:
-       $(MAKE) -C ../t gitweb-test
-
-test-installed:
-       GITWEB_TEST_INSTALLED='$(DESTDIR_SQ)$(gitwebdir_SQ)' \
-               $(MAKE) -C ../t gitweb-test
-
 ### Installation rules
 
-install: all
+.PHONY: install-gitweb
+install-gitweb: $(MAK_DIR_GITWEB_ALL)
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitwebdir_SQ)'
-       $(INSTALL) -m 755 $(GITWEB_PROGRAMS) '$(DESTDIR_SQ)$(gitwebdir_SQ)'
+       $(INSTALL) -m 755 $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_PROGRAMS)) '$(DESTDIR_SQ)$(gitwebdir_SQ)'
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)'
-       $(INSTALL) -m 644 $(GITWEB_FILES) '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)'
+       $(INSTALL) -m 644 $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_FILES)) \
+               '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)'
+ifndef NO_GITWEB
+ifndef NO_PERL
+install: install-gitweb
+endif
+endif
 
 ### Cleaning rules
 
-clean:
-       $(RM) gitweb.cgi static/gitweb.js \
-               static/gitweb.min.js static/gitweb.min.css \
-               GITWEB-BUILD-OPTIONS
-
-.PHONY: all clean install test test-installed .FORCE-GIT-VERSION-FILE FORCE
-
+.PHONY: gitweb-clean
+gitweb-clean:
+       $(RM) $(addprefix $(MAK_DIR_GITWEB),gitweb.cgi $(GITWEB_JS_IN) \
+               $(GITWEB_JS_MIN) $(GITWEB_CSS_MIN) \
+               GITWEB-BUILD-OPTIONS)
+clean: gitweb-clean
index 1835487ab2aeed33a4aa3e6dbde84df7ae7e6d1b..e66eb3d9bad7cf627d5ed35e13e32dafb556d5cd 100755 (executable)
@@ -3560,23 +3560,6 @@ sub parse_commit_text {
                $title =~ s/^    //;
                if ($title ne "") {
                        $co{'title'} = chop_str($title, 80, 5);
-                       # remove leading stuff of merges to make the interesting part visible
-                       if (length($title) > 50) {
-                               $title =~ s/^Automatic //;
-                               $title =~ s/^merge (of|with) /Merge ... /i;
-                               if (length($title) > 50) {
-                                       $title =~ s/(http|rsync):\/\///;
-                               }
-                               if (length($title) > 50) {
-                                       $title =~ s/(master|www|rsync)\.//;
-                               }
-                               if (length($title) > 50) {
-                                       $title =~ s/kernel.org:?//;
-                               }
-                               if (length($title) > 50) {
-                                       $title =~ s/\/pub\/scm//;
-                               }
-                       }
                        $co{'title_short'} = chop_str($title, 50, 5);
                        last;
                }
index 947b58ad4da364bc7890a6ca30d065ee56d9fc8d..9aa714bdeea81e3c781cd8413b9299263030d85d 100644 (file)
@@ -165,15 +165,17 @@ static struct {
        { 0, "TRUST_", GPG_STATUS_TRUST_LEVEL },
 };
 
-static struct {
+/* Keep the order same as enum signature_trust_level */
+static struct sigcheck_gpg_trust_level {
        const char *key;
+       const char *display_key;
        enum signature_trust_level value;
 } sigcheck_gpg_trust_level[] = {
-       { "UNDEFINED", TRUST_UNDEFINED },
-       { "NEVER", TRUST_NEVER },
-       { "MARGINAL", TRUST_MARGINAL },
-       { "FULLY", TRUST_FULLY },
-       { "ULTIMATE", TRUST_ULTIMATE },
+       { "UNDEFINED", "undefined", TRUST_UNDEFINED },
+       { "NEVER", "never", TRUST_NEVER },
+       { "MARGINAL", "marginal", TRUST_MARGINAL },
+       { "FULLY", "fully", TRUST_FULLY },
+       { "ULTIMATE", "ultimate", TRUST_ULTIMATE },
 };
 
 static void replace_cstring(char **field, const char *line, const char *next)
@@ -697,7 +699,7 @@ void set_signing_key(const char *key)
        configured_signing_key = xstrdup(key);
 }
 
-int git_gpg_config(const char *var, const char *value, void *cb)
+int git_gpg_config(const char *var, const char *value, void *cb UNUSED)
 {
        struct gpg_format *fmt = NULL;
        char *fmtname = NULL;
@@ -905,6 +907,20 @@ const char *get_signing_key(void)
        return git_committer_info(IDENT_STRICT | IDENT_NO_DATE);
 }
 
+const char *gpg_trust_level_to_str(enum signature_trust_level level)
+{
+       struct sigcheck_gpg_trust_level *trust;
+
+       if (level < 0 || level >= ARRAY_SIZE(sigcheck_gpg_trust_level))
+               BUG("invalid trust level requested %d", level);
+
+       trust = &sigcheck_gpg_trust_level[level];
+       if (trust->value != level)
+               BUG("sigcheck_gpg_trust_level[] unsorted");
+
+       return sigcheck_gpg_trust_level[level].display_key;
+}
+
 int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *signing_key)
 {
        return use_format->sign_buffer(buffer, signature, signing_key);
index b30cbdcd3da546888fb1f8f206205c2373ad633b..8a9ef41779e2fe91305dcb1b13cf66a282446cf6 100644 (file)
@@ -71,6 +71,14 @@ size_t parse_signed_buffer(const char *buf, size_t size);
 int sign_buffer(struct strbuf *buffer, struct strbuf *signature,
                const char *signing_key);
 
+
+/*
+ * Returns corresponding string in lowercase for a given member of
+ * enum signature_trust_level. For example, `TRUST_ULTIMATE` will
+ * return "ultimate".
+ */
+const char *gpg_trust_level_to_str(enum signature_trust_level level);
+
 int git_gpg_config(const char *, const char *, void *);
 void set_signing_key(const char *);
 const char *get_signing_key(void);
diff --git a/grep.c b/grep.c
index 82eb7da1022ecc0c08a83ca7bc46e18602ff275b..52a894c989087c70f215fc46d9f17489de546d8c 100644 (file)
--- a/grep.c
+++ b/grep.c
@@ -1615,7 +1615,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle
                                return 0;
                        goto next_line;
                }
-               if (hit) {
+               if (hit && (opt->max_count < 0 || count < opt->max_count)) {
                        count++;
                        if (opt->status_only)
                                return 1;
diff --git a/grep.h b/grep.h
index c722d25ed9d272fd333162a64ba367bc19ff77da..bdcadce61b8027adc760cd6e7fc51aa4e94c3d06 100644 (file)
--- a/grep.h
+++ b/grep.h
@@ -171,6 +171,7 @@ struct grep_opt {
        int show_hunk_mark;
        int file_break;
        int heading;
+       int max_count;
        void *priv;
 
        void (*output)(struct grep_opt *opt, const void *data, size_t size);
@@ -181,6 +182,7 @@ struct grep_opt {
        .relative = 1, \
        .pathname = 1, \
        .max_depth = -1, \
+       .max_count = -1, \
        .pattern_type_option = GREP_PATTERN_TYPE_UNSPECIFIED, \
        .colors = { \
                [GREP_COLOR_CONTEXT] = "", \
diff --git a/hash.h b/hash.h
index 5d40368f18a4d38ef3a62ae3ff378b0167e35a74..36b64165fc96375457415a6348eeebd2944c41f9 100644 (file)
--- a/hash.h
+++ b/hash.h
@@ -4,9 +4,7 @@
 #include "git-compat-util.h"
 #include "repository.h"
 
-#if defined(SHA1_PPC)
-#include "ppc/sha1.h"
-#elif defined(SHA1_APPLE)
+#if defined(SHA1_APPLE)
 #include <CommonCrypto/CommonDigest.h>
 #elif defined(SHA1_OPENSSL)
 #include <openssl/sha.h>
@@ -16,7 +14,9 @@
 #include "block-sha1/sha1.h"
 #endif
 
-#if defined(SHA256_GCRYPT)
+#if defined(SHA256_NETTLE)
+#include "sha256/nettle.h"
+#elif defined(SHA256_GCRYPT)
 #define SHA256_NEEDS_CLONE_HELPER
 #include "sha256/gcrypt.h"
 #elif defined(SHA256_OPENSSL)
@@ -30,7 +30,7 @@
  * platform's underlying implementation of SHA-1; could be OpenSSL,
  * blk_SHA, Apple CommonCrypto, etc...  Note that the relevant
  * SHA-1 header may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
+ * own implementations like block-sha1, so we list
  * the default for OpenSSL compatible SHA-1 implementations here.
  */
 #define platform_SHA_CTX       SHA_CTX
index 134d2eec804c2eb4851689901f47c251f73b01de..cf5fea87eb02bf753d408f1eeb00f5de2a02e907 100644 (file)
--- a/hashmap.c
+++ b/hashmap.c
@@ -142,10 +142,10 @@ static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map,
        return e;
 }
 
-static int always_equal(const void *unused_cmp_data,
-                       const struct hashmap_entry *unused1,
-                       const struct hashmap_entry *unused2,
-                       const void *unused_keydata)
+static int always_equal(const void *cmp_data UNUSED,
+                       const struct hashmap_entry *entry1 UNUSED,
+                       const struct hashmap_entry *entry2 UNUSED,
+                       const void *keydata UNUSED)
 {
        return 0;
 }
@@ -313,7 +313,7 @@ struct pool_entry {
        unsigned char data[FLEX_ARRAY];
 };
 
-static int pool_entry_cmp(const void *unused_cmp_data,
+static int pool_entry_cmp(const void *cmp_data UNUSED,
                          const struct hashmap_entry *eptr,
                          const struct hashmap_entry *entry_or_key,
                          const void *keydata)
diff --git a/help.c b/help.c
index 41c41c2aa11757645be53662b6d42011d93b00e2..ec670d5f68bd6aa701209dd587b28711ef7b0d8f 100644 (file)
--- a/help.c
+++ b/help.c
@@ -38,19 +38,30 @@ static struct category_description main_categories[] = {
        { CAT_plumbinginterrogators, N_("Low-level Commands / Interrogators") },
        { CAT_synchingrepositories, N_("Low-level Commands / Syncing Repositories") },
        { CAT_purehelpers, N_("Low-level Commands / Internal Helpers") },
+       { CAT_userinterfaces, N_("User-facing repository, command and file interfaces") },
+       { CAT_developerinterfaces, N_("Developer-facing file file formats, protocols and interfaces") },
        { 0, NULL }
 };
 
 static const char *drop_prefix(const char *name, uint32_t category)
 {
        const char *new_name;
-
-       if (skip_prefix(name, "git-", &new_name))
-               return new_name;
-       if (category == CAT_guide && skip_prefix(name, "git", &new_name))
+       const char *prefix;
+
+       switch (category) {
+       case CAT_guide:
+       case CAT_userinterfaces:
+       case CAT_developerinterfaces:
+               prefix = "git";
+               break;
+       default:
+               prefix = "git-";
+               break;
+       }
+       if (skip_prefix(name, prefix, &new_name))
                return new_name;
-       return name;
 
+       return name;
 }
 
 static void extract_cmds(struct cmdname_help **p_cmds, uint32_t mask)
@@ -426,6 +437,26 @@ void list_guides_help(void)
        putchar('\n');
 }
 
+void list_user_interfaces_help(void)
+{
+       struct category_description catdesc[] = {
+               { CAT_userinterfaces, N_("User-facing repository, command and file interfaces:") },
+               { 0, NULL }
+       };
+       print_cmd_by_category(catdesc, NULL);
+       putchar('\n');
+}
+
+void list_developer_interfaces_help(void)
+{
+       struct category_description catdesc[] = {
+               { CAT_developerinterfaces, N_("File formats, protocols and other developer interfaces:") },
+               { 0, NULL }
+       };
+       print_cmd_by_category(catdesc, NULL);
+       putchar('\n');
+}
+
 static int get_alias(const char *var, const char *value, void *data)
 {
        struct string_list *list = data;
@@ -750,8 +781,9 @@ struct similar_ref_cb {
        struct string_list *similar_refs;
 };
 
-static int append_similar_ref(const char *refname, const struct object_id *oid,
-                             int flags, void *cb_data)
+static int append_similar_ref(const char *refname,
+                             const struct object_id *oid UNUSED,
+                             int flags UNUSED, void *cb_data)
 {
        struct similar_ref_cb *cb = (struct similar_ref_cb *)(cb_data);
        char *branch = strrchr(refname, '/') + 1;
diff --git a/help.h b/help.h
index 971a3ad855acdc2d02697993ed8a7730626b8572..af073a7a0263e7bdacf6dd01f13235b2a655f360 100644 (file)
--- a/help.h
+++ b/help.h
@@ -22,6 +22,8 @@ static inline void mput_char(char c, unsigned int num)
 void list_common_cmds_help(void);
 void list_all_cmds_help(int show_external_commands, int show_aliases);
 void list_guides_help(void);
+void list_user_interfaces_help(void);
+void list_developer_interfaces_help(void);
 
 void list_all_main_cmds(struct string_list *list);
 void list_all_other_cmds(struct string_list *list);
index 58b83a9f66bc9ffbd44b71f2414e59c647187700..6eb3b2fe51c6fe839f2dec5dd584cf174d5b5f73 100644 (file)
@@ -505,7 +505,7 @@ static void run_service(const char **argv, int buffer_input)
 }
 
 static int show_text_ref(const char *name, const struct object_id *oid,
-                        int flag, void *cb_data)
+                        int flag UNUSED, void *cb_data)
 {
        const char *name_nons = strip_namespace(name);
        struct strbuf *buf = cb_data;
diff --git a/http.c b/http.c
index 168ca30c5588444ba01c8da16e3e17e7c8b5bf44..5d0502f51fd85d49ab2fa03995b8afcc585b12cc 100644 (file)
--- a/http.c
+++ b/http.c
@@ -1775,7 +1775,7 @@ static void write_accept_language(struct strbuf *buf)
  *   LANGUAGE= LANG=en_US.UTF-8 -> "Accept-Language: en-US, *; q=0.1"
  *   LANGUAGE= LANG=C -> ""
  */
-static const char *get_accept_language(void)
+const char *http_get_accept_language_header(void)
 {
        if (!cached_accept_language) {
                struct strbuf buf = STRBUF_INIT;
@@ -1829,7 +1829,7 @@ static int http_request(const char *url,
                                         fwrite_buffer);
        }
 
-       accept_language = get_accept_language();
+       accept_language = http_get_accept_language_header();
 
        if (accept_language)
                headers = curl_slist_append(headers, accept_language);
diff --git a/http.h b/http.h
index ba303cfb372825fb0731faebee1ccc7cb9728b29..3c94c479100947041768b88d9e5ee10651e57520 100644 (file)
--- a/http.h
+++ b/http.h
@@ -178,6 +178,9 @@ int http_fetch_ref(const char *base, struct ref *ref);
 int http_get_info_packs(const char *base_url,
                        struct packed_git **packs_head);
 
+/* Helper for getting Accept-Language header */
+const char *http_get_accept_language_header(void);
+
 struct http_pack_request {
        char *url;
 
diff --git a/ident.c b/ident.c
index 89ca5b47008ee50749744914dd80a009d9adcaff..6de76f9421d57f38c478cca68fcb97c4ede2d36c 100644 (file)
--- a/ident.c
+++ b/ident.c
@@ -8,6 +8,7 @@
 #include "cache.h"
 #include "config.h"
 #include "date.h"
+#include "mailmap.h"
 
 static struct strbuf git_default_name = STRBUF_INIT;
 static struct strbuf git_default_email = STRBUF_INIT;
@@ -346,6 +347,79 @@ person_only:
        return 0;
 }
 
+/*
+ * Returns the difference between the new and old length of the ident line.
+ */
+static ssize_t rewrite_ident_line(const char *person, size_t len,
+                                  struct strbuf *buf,
+                                  struct string_list *mailmap)
+{
+       size_t namelen, maillen;
+       const char *name;
+       const char *mail;
+       struct ident_split ident;
+
+       if (split_ident_line(&ident, person, len))
+               return 0;
+
+       mail = ident.mail_begin;
+       maillen = ident.mail_end - ident.mail_begin;
+       name = ident.name_begin;
+       namelen = ident.name_end - ident.name_begin;
+
+       if (map_user(mailmap, &mail, &maillen, &name, &namelen)) {
+               struct strbuf namemail = STRBUF_INIT;
+               size_t newlen;
+
+               strbuf_addf(&namemail, "%.*s <%.*s>",
+                           (int)namelen, name, (int)maillen, mail);
+
+               strbuf_splice(buf, ident.name_begin - buf->buf,
+                             ident.mail_end - ident.name_begin + 1,
+                             namemail.buf, namemail.len);
+               newlen = namemail.len;
+
+               strbuf_release(&namemail);
+
+               return newlen - (ident.mail_end - ident.name_begin);
+       }
+
+       return 0;
+}
+
+void apply_mailmap_to_header(struct strbuf *buf, const char **header,
+                              struct string_list *mailmap)
+{
+       size_t buf_offset = 0;
+
+       if (!mailmap)
+               return;
+
+       for (;;) {
+               const char *person, *line;
+               size_t i;
+               int found_header = 0;
+
+               line = buf->buf + buf_offset;
+               if (!*line || *line == '\n')
+                       return; /* End of headers */
+
+               for (i = 0; header[i]; i++)
+                       if (skip_prefix(line, header[i], &person)) {
+                               const char *endp = strchrnul(person, '\n');
+                               found_header = 1;
+                               buf_offset += endp - line;
+                               buf_offset += rewrite_ident_line(person, endp - person, buf, mailmap);
+                               break;
+                       }
+
+               if (!found_header) {
+                       buf_offset = strchrnul(line, '\n') - buf->buf;
+                       if (buf->buf[buf_offset] == '\n')
+                               buf_offset++;
+               }
+       }
+}
 
 static void ident_env_hint(enum want_ident whose_ident)
 {
@@ -594,7 +668,7 @@ static int set_ident(const char *var, const char *value)
        return 0;
 }
 
-int git_ident_config(const char *var, const char *value, void *data)
+int git_ident_config(const char *var, const char *value, void *data UNUSED)
 {
        if (!strcmp(var, "user.useconfigonly")) {
                ident_use_config_only = git_config_bool(var, value);
index 4b25287886dfe7368582dd615d424e66e6f320f9..6cc4eb8e1c4b603f6f9b053b144e7f86e3a4be13 100644 (file)
@@ -207,7 +207,7 @@ static void filter_spec_append_urlencode(
        struct strbuf buf = STRBUF_INIT;
        strbuf_addstr_urlencode(&buf, raw, allow_unencoded);
        trace_printf("Add to combine filter-spec: %s\n", buf.buf);
-       string_list_append(&filter->filter_spec, strbuf_detach(&buf, NULL));
+       string_list_append_nodup(&filter->filter_spec, strbuf_detach(&buf, NULL));
 }
 
 /*
@@ -226,12 +226,13 @@ static void transform_to_combine_type(
                        xcalloc(initial_sub_alloc, sizeof(*sub_array));
                sub_array[0] = *filter_options;
                memset(filter_options, 0, sizeof(*filter_options));
+               string_list_init_dup(&filter_options->filter_spec);
                filter_options->sub = sub_array;
                filter_options->sub_alloc = initial_sub_alloc;
        }
        filter_options->sub_nr = 1;
        filter_options->choice = LOFC_COMBINE;
-       string_list_append(&filter_options->filter_spec, xstrdup("combine:"));
+       string_list_append(&filter_options->filter_spec, "combine:");
        filter_spec_append_urlencode(
                filter_options,
                list_objects_filter_spec(&filter_options->sub[0]));
@@ -256,8 +257,14 @@ void parse_list_objects_filter(
        struct strbuf errbuf = STRBUF_INIT;
        int parse_error;
 
+       if (!filter_options->filter_spec.strdup_strings) {
+               if (filter_options->filter_spec.nr)
+                       BUG("unexpected non-allocated string in filter_spec");
+               filter_options->filter_spec.strdup_strings = 1;
+       }
+
        if (!filter_options->choice) {
-               string_list_append(&filter_options->filter_spec, xstrdup(arg));
+               string_list_append(&filter_options->filter_spec, arg);
 
                parse_error = gently_parse_list_objects_filter(
                        filter_options, arg, &errbuf);
@@ -268,7 +275,7 @@ void parse_list_objects_filter(
                 */
                transform_to_combine_type(filter_options);
 
-               string_list_append(&filter_options->filter_spec, xstrdup("+"));
+               string_list_append(&filter_options->filter_spec, "+");
                filter_spec_append_urlencode(filter_options, arg);
                ALLOC_GROW_BY(filter_options->sub, filter_options->sub_nr, 1,
                              filter_options->sub_alloc);
@@ -306,7 +313,7 @@ const char *list_objects_filter_spec(struct list_objects_filter_options *filter)
                strbuf_add_separated_string_list(
                        &concatted, "", &filter->filter_spec);
                string_list_clear(&filter->filter_spec, /*free_util=*/0);
-               string_list_append(
+               string_list_append_nodup(
                        &filter->filter_spec, strbuf_detach(&concatted, NULL));
        }
 
@@ -321,7 +328,7 @@ const char *expand_list_objects_filter_spec(
                strbuf_addf(&expanded_spec, "blob:limit=%lu",
                            filter->blob_limit_value);
                string_list_clear(&filter->filter_spec, /*free_util=*/0);
-               string_list_append(
+               string_list_append_nodup(
                        &filter->filter_spec,
                        strbuf_detach(&expanded_spec, NULL));
        }
@@ -418,6 +425,7 @@ void list_objects_filter_copy(
        string_list_init_dup(&dest->filter_spec);
        for_each_string_list_item(item, &src->filter_spec)
                string_list_append(&dest->filter_spec, item->string);
+       dest->sparse_oid_name = xstrdup_or_null(src->sparse_oid_name);
 
        ALLOC_ARRAY(dest->sub, dest->sub_alloc);
        for (i = 0; i < src->sub_nr; i++)
index 14b83620191019e465291d13be023df794ffb645..8955d7e1f6eec73ef797781c2bd0d76d43d1ef19 100644 (file)
@@ -249,7 +249,8 @@ static enum ll_merge_result ll_ext_merge(const struct ll_merge_driver *fn,
 static struct ll_merge_driver *ll_user_merge, **ll_user_merge_tail;
 static const char *default_ll_merge;
 
-static int read_merge_config(const char *var, const char *value, void *cb)
+static int read_merge_config(const char *var, const char *value,
+                            void *cb UNUSED)
 {
        struct ll_merge_driver *fn;
        const char *key, *name;
index d0ac0a6327a18f5eeee6fc43f036c9a2618fd672..1dd5fcbf7be433740d83382db4c401d438a51720 100644 (file)
@@ -135,12 +135,15 @@ static int ref_filter_match(const char *refname,
 }
 
 static int add_ref_decoration(const char *refname, const struct object_id *oid,
-                             int flags, void *cb_data)
+                             int flags UNUSED,
+                             void *cb_data)
 {
+       int i;
        struct object *obj;
        enum object_type objtype;
        enum decoration_type deco_type = DECORATION_NONE;
        struct decoration_filter *filter = (struct decoration_filter *)cb_data;
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
 
        if (filter && !ref_filter_match(refname, filter))
                return 0;
@@ -165,16 +168,21 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
                return 0;
        obj = lookup_object_by_type(the_repository, oid, objtype);
 
-       if (starts_with(refname, "refs/heads/"))
-               deco_type = DECORATION_REF_LOCAL;
-       else if (starts_with(refname, "refs/remotes/"))
-               deco_type = DECORATION_REF_REMOTE;
-       else if (starts_with(refname, "refs/tags/"))
-               deco_type = DECORATION_REF_TAG;
-       else if (!strcmp(refname, "refs/stash"))
-               deco_type = DECORATION_REF_STASH;
-       else if (!strcmp(refname, "HEAD"))
-               deco_type = DECORATION_REF_HEAD;
+       for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) {
+               struct ref_namespace_info *info = &ref_namespace[i];
+
+               if (!info->decoration)
+                       continue;
+               if (info->exact) {
+                       if (!strcmp(refname, info->ref)) {
+                               deco_type = info->decoration;
+                               break;
+                       }
+               } else if (starts_with(refname, info->ref)) {
+                       deco_type = info->decoration;
+                       break;
+               }
+       }
 
        add_name_decoration(deco_type, refname, obj);
        while (obj->type == OBJ_TAG) {
@@ -956,8 +964,7 @@ static void cleanup_additional_headers(struct diff_options *o)
 
 static int do_remerge_diff(struct rev_info *opt,
                           struct commit_list *parents,
-                          struct object_id *oid,
-                          struct commit *commit)
+                          struct object_id *oid)
 {
        struct merge_options o;
        struct commit_list *bases;
@@ -1052,7 +1059,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log
                                        "for octopus merges.\n");
                                return 1;
                        }
-                       return do_remerge_diff(opt, parents, oid, commit);
+                       return do_remerge_diff(opt, parents, oid);
                }
                if (opt->combine_merges)
                        return do_diff_combined(opt, commit);
index 98e69373c8404ead982595fc9dbda5c4fe8137a1..fa0d01b47c1286767068df928e83dd74c5f7b49e 100644 (file)
--- a/ls-refs.c
+++ b/ls-refs.c
@@ -136,7 +136,8 @@ static void send_possibly_unborn_head(struct ls_refs_data *data)
        strbuf_release(&namespaced);
 }
 
-static int ls_refs_config(const char *var, const char *value, void *data)
+static int ls_refs_config(const char *var, const char *value,
+                         void *data UNUSED)
 {
        /*
         * We only serve fetches over v2 for now, so respect only "uploadpack"
index ad041061695223566ebfb2b9dacfc5aa7d215733..748924a69ba3262088c444c0145bc407750daa0a 100644 (file)
@@ -10,8 +10,8 @@ static int unclean(struct merge_options *opt, struct tree *head)
        struct strbuf sb = STRBUF_INIT;
 
        if (head && repo_index_has_changes(opt->repo, head, &sb)) {
-               fprintf(stderr, _("Your local changes to the following files would be overwritten by merge:\n  %s"),
-                   sb.buf);
+               error(_("Your local changes to the following files would be overwritten by merge:\n  %s"),
+                     sb.buf);
                strbuf_release(&sb);
                return -1;
        }
index ebb9a75425137eaf38bb5647802744e32cadbf16..99dcee2db8afa4c06f88739601e98bc3cf3dd64c 100644 (file)
@@ -349,13 +349,15 @@ struct merge_options_internal {
        struct mem_pool pool;
 
        /*
-        * output: special messages and conflict notices for various paths
+        * conflicts: logical conflicts and messages stored by _primary_ path
         *
         * This is a map of pathnames (a subset of the keys in "paths" above)
-        * to strbufs.  It gathers various warning/conflict/notice messages
-        * for later processing.
+        * to struct string_list, with each item's `util` containing a
+        * `struct logical_conflict_info`. Note, though, that for each path,
+        * it only stores the logical conflicts for which that path is the
+        * primary path; the path might be part of additional conflicts.
         */
-       struct strmap output;
+       struct strmap conflicts;
 
        /*
         * renames: various data relating to rename detection
@@ -385,8 +387,24 @@ struct merge_options_internal {
 
        /* call_depth: recursion level counter for merging merge bases */
        int call_depth;
+
+       /* field that holds submodule conflict information */
+       struct string_list conflicted_submodules;
+};
+
+struct conflicted_submodule_item {
+       char *abbrev;
+       int flag;
 };
 
+static void conflicted_submodule_item_free(void *util, const char *str)
+{
+       struct conflicted_submodule_item *item = util;
+
+       free(item->abbrev);
+       free(item);
+}
+
 struct version_info {
        struct object_id oid;
        unsigned short mode;
@@ -481,6 +499,100 @@ struct conflict_info {
        unsigned match_mask:3;
 };
 
+enum conflict_and_info_types {
+       /* "Simple" conflicts and informational messages */
+       INFO_AUTO_MERGING = 0,
+       CONFLICT_CONTENTS,       /* text file that failed to merge */
+       CONFLICT_BINARY,
+       CONFLICT_FILE_DIRECTORY,
+       CONFLICT_DISTINCT_MODES,
+       CONFLICT_MODIFY_DELETE,
+
+       /* Regular rename */
+       CONFLICT_RENAME_RENAME,   /* same file renamed differently */
+       CONFLICT_RENAME_COLLIDES, /* rename/add or two files renamed to 1 */
+       CONFLICT_RENAME_DELETE,
+
+       /* Basic directory rename */
+       CONFLICT_DIR_RENAME_SUGGESTED,
+       INFO_DIR_RENAME_APPLIED,
+
+       /* Special directory rename cases */
+       INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME,
+       CONFLICT_DIR_RENAME_FILE_IN_WAY,
+       CONFLICT_DIR_RENAME_COLLISION,
+       CONFLICT_DIR_RENAME_SPLIT,
+
+       /* Basic submodule */
+       INFO_SUBMODULE_FAST_FORWARDING,
+       CONFLICT_SUBMODULE_FAILED_TO_MERGE,
+
+       /* Special submodule cases broken out from FAILED_TO_MERGE */
+       CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION,
+       CONFLICT_SUBMODULE_NOT_INITIALIZED,
+       CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE,
+       CONFLICT_SUBMODULE_MAY_HAVE_REWINDS,
+       CONFLICT_SUBMODULE_NULL_MERGE_BASE,
+
+       /* Keep this entry _last_ in the list */
+       NB_CONFLICT_TYPES,
+};
+
+/*
+ * Short description of conflict type, relied upon by external tools.
+ *
+ * We can add more entries, but DO NOT change any of these strings.  Also,
+ * Order MUST match conflict_info_and_types.
+ */
+static const char *type_short_descriptions[] = {
+       /*** "Simple" conflicts and informational messages ***/
+       [INFO_AUTO_MERGING] = "Auto-merging",
+       [CONFLICT_CONTENTS] = "CONFLICT (contents)",
+       [CONFLICT_BINARY] = "CONFLICT (binary)",
+       [CONFLICT_FILE_DIRECTORY] = "CONFLICT (file/directory)",
+       [CONFLICT_DISTINCT_MODES] = "CONFLICT (distinct modes)",
+       [CONFLICT_MODIFY_DELETE] = "CONFLICT (modify/delete)",
+
+       /*** Regular rename ***/
+       [CONFLICT_RENAME_RENAME] = "CONFLICT (rename/rename)",
+       [CONFLICT_RENAME_COLLIDES] = "CONFLICT (rename involved in collision)",
+       [CONFLICT_RENAME_DELETE] = "CONFLICT (rename/delete)",
+
+       /*** Basic directory rename ***/
+       [CONFLICT_DIR_RENAME_SUGGESTED] =
+               "CONFLICT (directory rename suggested)",
+       [INFO_DIR_RENAME_APPLIED] = "Path updated due to directory rename",
+
+       /*** Special directory rename cases ***/
+       [INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME] =
+               "Directory rename skipped since directory was renamed on both sides",
+       [CONFLICT_DIR_RENAME_FILE_IN_WAY] =
+               "CONFLICT (file in way of directory rename)",
+       [CONFLICT_DIR_RENAME_COLLISION] = "CONFLICT(directory rename collision)",
+       [CONFLICT_DIR_RENAME_SPLIT] = "CONFLICT(directory rename unclear split)",
+
+       /*** Basic submodule ***/
+       [INFO_SUBMODULE_FAST_FORWARDING] = "Fast forwarding submodule",
+       [CONFLICT_SUBMODULE_FAILED_TO_MERGE] = "CONFLICT (submodule)",
+
+       /*** Special submodule cases broken out from FAILED_TO_MERGE ***/
+       [CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION] =
+               "CONFLICT (submodule with possible resolution)",
+       [CONFLICT_SUBMODULE_NOT_INITIALIZED] =
+               "CONFLICT (submodule not initialized)",
+       [CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE] =
+               "CONFLICT (submodule history not available)",
+       [CONFLICT_SUBMODULE_MAY_HAVE_REWINDS] =
+               "CONFLICT (submodule may have rewinds)",
+       [CONFLICT_SUBMODULE_NULL_MERGE_BASE] =
+               "CONFLICT (submodule lacks merge base)"
+};
+
+struct logical_conflict_info {
+       enum conflict_and_info_types type;
+       struct strvec paths;
+};
+
 /*** Function Grouping: various utility functions ***/
 
 /*
@@ -567,24 +679,32 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
                struct strmap_entry *e;
 
                /* Release and free each strbuf found in output */
-               strmap_for_each_entry(&opti->output, &iter, e) {
-                       struct strbuf *sb = e->value;
-                       strbuf_release(sb);
+               strmap_for_each_entry(&opti->conflicts, &iter, e) {
+                       struct string_list *list = e->value;
+                       for (int i = 0; i < list->nr; i++) {
+                               struct logical_conflict_info *info =
+                                       list->items[i].util;
+                               strvec_clear(&info->paths);
+                       }
                        /*
-                        * While strictly speaking we don't need to free(sb)
-                        * here because we could pass free_values=1 when
-                        * calling strmap_clear() on opti->output, that would
-                        * require strmap_clear to do another
-                        * strmap_for_each_entry() loop, so we just free it
-                        * while we're iterating anyway.
+                        * While strictly speaking we don't need to
+                        * free(conflicts) here because we could pass
+                        * free_values=1 when calling strmap_clear() on
+                        * opti->conflicts, that would require strmap_clear
+                        * to do another strmap_for_each_entry() loop, so we
+                        * just free it while we're iterating anyway.
                         */
-                       free(sb);
+                       string_list_clear(list, 1);
+                       free(list);
                }
-               strmap_clear(&opti->output, 0);
+               strmap_clear(&opti->conflicts, 0);
        }
 
        mem_pool_discard(&opti->pool, 0);
 
+       string_list_clear_func(&opti->conflicted_submodules,
+                                       conflicted_submodule_item_free);
+
        /* Clean out callback_data as well. */
        FREE_AND_NULL(renames->callback_data);
        renames->callback_data_nr = renames->callback_data_alloc = 0;
@@ -627,29 +747,57 @@ static void format_commit(struct strbuf *sb,
        strbuf_addch(sb, '\n');
 }
 
-__attribute__((format (printf, 4, 5)))
+__attribute__((format (printf, 8, 9)))
 static void path_msg(struct merge_options *opt,
-                    const char *path,
+                    enum conflict_and_info_types type,
                     int omittable_hint, /* skippable under --remerge-diff */
+                    const char *primary_path,
+                    const char *other_path_1, /* may be NULL */
+                    const char *other_path_2, /* may be NULL */
+                    struct string_list *other_paths, /* may be NULL */
                     const char *fmt, ...)
 {
        va_list ap;
-       struct strbuf *sb, *dest;
+       struct string_list *path_conflicts;
+       struct logical_conflict_info *info;
+       struct strbuf buf = STRBUF_INIT;
+       struct strbuf *dest;
        struct strbuf tmp = STRBUF_INIT;
 
+       /* Sanity checks */
+       assert(omittable_hint ==
+              !starts_with(type_short_descriptions[type], "CONFLICT") ||
+              type == CONFLICT_DIR_RENAME_SUGGESTED);
        if (opt->record_conflict_msgs_as_headers && omittable_hint)
                return; /* Do not record mere hints in headers */
        if (opt->priv->call_depth && opt->verbosity < 5)
                return; /* Ignore messages from inner merges */
 
-       sb = strmap_get(&opt->priv->output, path);
-       if (!sb) {
-               sb = xmalloc(sizeof(*sb));
-               strbuf_init(sb, 0);
-               strmap_put(&opt->priv->output, path, sb);
+       /* Ensure path_conflicts (ptr to array of logical_conflict) allocated */
+       path_conflicts = strmap_get(&opt->priv->conflicts, primary_path);
+       if (!path_conflicts) {
+               path_conflicts = xmalloc(sizeof(*path_conflicts));
+               string_list_init_dup(path_conflicts);
+               strmap_put(&opt->priv->conflicts, primary_path, path_conflicts);
        }
 
-       dest = (opt->record_conflict_msgs_as_headers ? &tmp : sb);
+       /* Add a logical_conflict at the end to store info from this call */
+       info = xcalloc(1, sizeof(*info));
+       info->type = type;
+       strvec_init(&info->paths);
+
+       /* Handle the list of paths */
+       strvec_push(&info->paths, primary_path);
+       if (other_path_1)
+               strvec_push(&info->paths, other_path_1);
+       if (other_path_2)
+               strvec_push(&info->paths, other_path_2);
+       if (other_paths)
+               for (int i = 0; i < other_paths->nr; i++)
+               strvec_push(&info->paths, other_paths->items[i].string);
+
+       /* Handle message and its format, in normal case */
+       dest = (opt->record_conflict_msgs_as_headers ? &tmp : &buf);
 
        va_start(ap, fmt);
        if (opt->priv->call_depth) {
@@ -660,32 +808,32 @@ static void path_msg(struct merge_options *opt,
        strbuf_vaddf(dest, fmt, ap);
        va_end(ap);
 
+       /* Handle specialized formatting of message under --remerge-diff */
        if (opt->record_conflict_msgs_as_headers) {
                int i_sb = 0, i_tmp = 0;
 
                /* Start with the specified prefix */
                if (opt->msg_header_prefix)
-                       strbuf_addf(sb, "%s ", opt->msg_header_prefix);
+                       strbuf_addf(&buf, "%s ", opt->msg_header_prefix);
 
                /* Copy tmp to sb, adding spaces after newlines */
-               strbuf_grow(sb, sb->len + 2*tmp.len); /* more than sufficient */
+               strbuf_grow(&buf, buf.len + 2*tmp.len); /* more than sufficient */
                for (; i_tmp < tmp.len; i_tmp++, i_sb++) {
                        /* Copy next character from tmp to sb */
-                       sb->buf[sb->len + i_sb] = tmp.buf[i_tmp];
+                       buf.buf[buf.len + i_sb] = tmp.buf[i_tmp];
 
                        /* If we copied a newline, add a space */
                        if (tmp.buf[i_tmp] == '\n')
-                               sb->buf[++i_sb] = ' ';
+                               buf.buf[++i_sb] = ' ';
                }
                /* Update length and ensure it's NUL-terminated */
-               sb->len += i_sb;
-               sb->buf[sb->len] = '\0';
+               buf.len += i_sb;
+               buf.buf[buf.len] = '\0';
 
                strbuf_release(&tmp);
        }
-
-       /* Add final newline character to sb */
-       strbuf_addch(sb, '\n');
+       string_list_append_nodup(path_conflicts, strbuf_detach(&buf, NULL))
+               ->util = info;
 }
 
 static struct diff_filespec *pool_alloc_filespec(struct mem_pool *pool,
@@ -1614,38 +1762,50 @@ static int merge_submodule(struct merge_options *opt,
 
        int i;
        int search = !opt->priv->call_depth;
+       int sub_not_initialized = 1;
+       int sub_flag = CONFLICT_SUBMODULE_FAILED_TO_MERGE;
 
        /* store fallback answer in result in case we fail */
        oidcpy(result, opt->priv->call_depth ? o : a);
 
        /* we can not handle deletion conflicts */
-       if (is_null_oid(o))
-               return 0;
-       if (is_null_oid(a))
-               return 0;
-       if (is_null_oid(b))
-               return 0;
+       if (is_null_oid(a) || is_null_oid(b))
+               BUG("submodule deleted on one side; this should be handled outside of merge_submodule()");
+
+       if ((sub_not_initialized = repo_submodule_init(&subrepo,
+               opt->repo, path, null_oid()))) {
+               path_msg(opt, CONFLICT_SUBMODULE_NOT_INITIALIZED, 0,
+                        path, NULL, NULL, NULL,
+                        _("Failed to merge submodule %s (not checked out)"),
+                        path);
+               sub_flag = CONFLICT_SUBMODULE_NOT_INITIALIZED;
+               goto cleanup;
+       }
 
-       if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) {
-               path_msg(opt, path, 0,
-                               _("Failed to merge submodule %s (not checked out)"),
-                               path);
-               return 0;
+       if (is_null_oid(o)) {
+               path_msg(opt, CONFLICT_SUBMODULE_NULL_MERGE_BASE, 0,
+                        path, NULL, NULL, NULL,
+                        _("Failed to merge submodule %s (no merge base)"),
+                        path);
+               goto cleanup;
        }
 
        if (!(commit_o = lookup_commit_reference(&subrepo, o)) ||
            !(commit_a = lookup_commit_reference(&subrepo, a)) ||
            !(commit_b = lookup_commit_reference(&subrepo, b))) {
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE, 0,
+                        path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s (commits not present)"),
                         path);
+               sub_flag = CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE;
                goto cleanup;
        }
 
        /* check whether both changes are forward */
        if (!repo_in_merge_bases(&subrepo, commit_o, commit_a) ||
            !repo_in_merge_bases(&subrepo, commit_o, commit_b)) {
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_SUBMODULE_MAY_HAVE_REWINDS, 0,
+                        path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s "
                           "(commits don't follow merge-base)"),
                         path);
@@ -1655,7 +1815,8 @@ static int merge_submodule(struct merge_options *opt,
        /* Case #1: a is contained in b or vice versa */
        if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) {
                oidcpy(result, b);
-               path_msg(opt, path, 1,
+               path_msg(opt, INFO_SUBMODULE_FAST_FORWARDING, 1,
+                        path, NULL, NULL, NULL,
                         _("Note: Fast-forwarding submodule %s to %s"),
                         path, oid_to_hex(b));
                ret = 1;
@@ -1663,7 +1824,8 @@ static int merge_submodule(struct merge_options *opt,
        }
        if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) {
                oidcpy(result, a);
-               path_msg(opt, path, 1,
+               path_msg(opt, INFO_SUBMODULE_FAST_FORWARDING, 1,
+                        path, NULL, NULL, NULL,
                         _("Note: Fast-forwarding submodule %s to %s"),
                         path, oid_to_hex(a));
                ret = 1;
@@ -1686,30 +1848,27 @@ static int merge_submodule(struct merge_options *opt,
                                         &merges);
        switch (parent_count) {
        case 0:
-               path_msg(opt, path, 0, _("Failed to merge submodule %s"), path);
+               path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE, 0,
+                        path, NULL, NULL, NULL,
+                        _("Failed to merge submodule %s"), path);
                break;
 
        case 1:
                format_commit(&sb, 4, &subrepo,
                              (struct commit *)merges.objects[0].item);
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION, 0,
+                        path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s, but a possible merge "
-                          "resolution exists:\n%s\n"),
+                          "resolution exists: %s"),
                         path, sb.buf);
-               path_msg(opt, path, 1,
-                        _("If this is correct simply add it to the index "
-                          "for example\n"
-                          "by using:\n\n"
-                          "  git update-index --cacheinfo 160000 %s \"%s\"\n\n"
-                          "which will accept this suggestion.\n"),
-                        oid_to_hex(&merges.objects[0].item->oid), path);
                strbuf_release(&sb);
                break;
        default:
                for (i = 0; i < merges.nr; i++)
                        format_commit(&sb, 4, &subrepo,
                                      (struct commit *)merges.objects[i].item);
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION, 0,
+                        path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s, but multiple "
                           "possible merges exist:\n%s"), path, sb.buf);
                strbuf_release(&sb);
@@ -1717,7 +1876,23 @@ static int merge_submodule(struct merge_options *opt,
 
        object_array_clear(&merges);
 cleanup:
-       repo_clear(&subrepo);
+       if (!opt->priv->call_depth && !ret) {
+               struct string_list *csub = &opt->priv->conflicted_submodules;
+               struct conflicted_submodule_item *util;
+               const char *abbrev;
+
+               util = xmalloc(sizeof(*util));
+               util->flag = sub_flag;
+               util->abbrev = NULL;
+               if (!sub_not_initialized) {
+                       abbrev = repo_find_unique_abbrev(&subrepo, b, DEFAULT_ABBREV);
+                       util->abbrev = xstrdup(abbrev);
+               }
+               string_list_append(csub, path)->util = util;
+       }
+
+       if (!sub_not_initialized)
+               repo_clear(&subrepo);
        return ret;
 }
 
@@ -1835,7 +2010,8 @@ static int merge_3way(struct merge_options *opt,
                                &src1, name1, &src2, name2,
                                &opt->priv->attr_index, &ll_opts);
        if (merge_status == LL_MERGE_BINARY_CONFLICT)
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_BINARY, 0,
+                        path, NULL, NULL, NULL,
                         "warning: Cannot merge binary files: %s (%s vs. %s)",
                         path, name1, name2);
 
@@ -1947,7 +2123,8 @@ static int handle_content_merge(struct merge_options *opt,
                if (ret)
                        return -1;
                clean &= (merge_status == 0);
-               path_msg(opt, path, 1, _("Auto-merging %s"), path);
+               path_msg(opt, INFO_AUTO_MERGING, 1, path, NULL, NULL, NULL,
+                        _("Auto-merging %s"), path);
        } else if (S_ISGITLINK(a->mode)) {
                int two_way = ((S_IFMT & o->mode) != (S_IFMT & a->mode));
                clean = merge_submodule(opt, pathnames[0],
@@ -2085,21 +2262,24 @@ static char *handle_path_level_conflicts(struct merge_options *opt,
                c_info->reported_already = 1;
                strbuf_add_separated_string_list(&collision_paths, ", ",
                                                 &c_info->source_files);
-               path_msg(opt, new_path, 0,
-                        _("CONFLICT (implicit dir rename): Existing file/dir "
-                          "at %s in the way of implicit directory rename(s) "
-                          "putting the following path(s) there: %s."),
-                      new_path, collision_paths.buf);
+               path_msg(opt, CONFLICT_DIR_RENAME_FILE_IN_WAY, 0,
+                        new_path, NULL, NULL, &c_info->source_files,
+                        _("CONFLICT (implicit dir rename): Existing "
+                          "file/dir at %s in the way of implicit "
+                          "directory rename(s) putting the following "
+                          "path(s) there: %s."),
+                        new_path, collision_paths.buf);
                clean = 0;
        } else if (c_info->source_files.nr > 1) {
                c_info->reported_already = 1;
                strbuf_add_separated_string_list(&collision_paths, ", ",
                                                 &c_info->source_files);
-               path_msg(opt, new_path, 0,
-                        _("CONFLICT (implicit dir rename): Cannot map more "
-                          "than one path to %s; implicit directory renames "
-                          "tried to put these paths there: %s"),
-                      new_path, collision_paths.buf);
+               path_msg(opt, CONFLICT_DIR_RENAME_COLLISION, 0,
+                        new_path, NULL, NULL, &c_info->source_files,
+                        _("CONFLICT (implicit dir rename): Cannot map "
+                          "more than one path to %s; implicit directory "
+                          "renames tried to put these paths there: %s"),
+                        new_path, collision_paths.buf);
                clean = 0;
        }
 
@@ -2153,13 +2333,14 @@ static void get_provisional_directory_renames(struct merge_options *opt,
                        continue;
 
                if (bad_max == max) {
-                       path_msg(opt, source_dir, 0,
-                              _("CONFLICT (directory rename split): "
-                                "Unclear where to rename %s to; it was "
-                                "renamed to multiple other directories, with "
-                                "no destination getting a majority of the "
-                                "files."),
-                              source_dir);
+                       path_msg(opt, CONFLICT_DIR_RENAME_SPLIT, 0,
+                                source_dir, NULL, NULL, NULL,
+                                _("CONFLICT (directory rename split): "
+                                  "Unclear where to rename %s to; it was "
+                                  "renamed to multiple other directories, "
+                                  "with no destination getting a majority of "
+                                  "the files."),
+                                source_dir);
                        *clean = 0;
                } else {
                        strmap_put(&renames->dir_renames[side],
@@ -2334,7 +2515,8 @@ static char *check_for_directory_rename(struct merge_options *opt,
        new_dir = rename_info->value; /* old_dir = rename_info->key; */
        otherinfo = strmap_get_entry(dir_rename_exclusions, new_dir);
        if (otherinfo) {
-               path_msg(opt, rename_info->key, 1,
+               path_msg(opt, INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME, 1,
+                        rename_info->key, path, new_dir, NULL,
                         _("WARNING: Avoiding applying %s -> %s rename "
                           "to %s, because %s itself was renamed."),
                         rename_info->key, new_dir, path, new_dir);
@@ -2475,14 +2657,16 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
        if (opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_TRUE) {
                /* Notify user of updated path */
                if (pair->status == 'A')
-                       path_msg(opt, new_path, 1,
+                       path_msg(opt, INFO_DIR_RENAME_APPLIED, 1,
+                                new_path, old_path, NULL, NULL,
                                 _("Path updated: %s added in %s inside a "
                                   "directory that was renamed in %s; moving "
                                   "it to %s."),
                                 old_path, branch_with_new_path,
                                 branch_with_dir_rename, new_path);
                else
-                       path_msg(opt, new_path, 1,
+                       path_msg(opt, INFO_DIR_RENAME_APPLIED, 1,
+                                new_path, old_path, NULL, NULL,
                                 _("Path updated: %s renamed to %s in %s, "
                                   "inside a directory that was renamed in %s; "
                                   "moving it to %s."),
@@ -2495,7 +2679,8 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
                 */
                ci->path_conflict = 1;
                if (pair->status == 'A')
-                       path_msg(opt, new_path, 1,
+                       path_msg(opt, CONFLICT_DIR_RENAME_SUGGESTED, 1,
+                                new_path, old_path, NULL, NULL,
                                 _("CONFLICT (file location): %s added in %s "
                                   "inside a directory that was renamed in %s, "
                                   "suggesting it should perhaps be moved to "
@@ -2503,7 +2688,8 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
                                 old_path, branch_with_new_path,
                                 branch_with_dir_rename, new_path);
                else
-                       path_msg(opt, new_path, 1,
+                       path_msg(opt, CONFLICT_DIR_RENAME_SUGGESTED, 1,
+                                new_path, old_path, NULL, NULL,
                                 _("CONFLICT (file location): %s renamed to %s "
                                   "in %s, inside a directory that was renamed "
                                   "in %s, suggesting it should perhaps be "
@@ -2659,7 +2845,8 @@ static int process_renames(struct merge_options *opt,
                         * and remove the setting of base->path_conflict to 1.
                         */
                        base->path_conflict = 1;
-                       path_msg(opt, oldpath, 0,
+                       path_msg(opt, CONFLICT_RENAME_RENAME, 0,
+                                pathnames[0], pathnames[1], pathnames[2], NULL,
                                 _("CONFLICT (rename/rename): %s renamed to "
                                   "%s in %s and to %s in %s."),
                                 pathnames[0],
@@ -2754,7 +2941,8 @@ static int process_renames(struct merge_options *opt,
                        memcpy(&newinfo->stages[target_index], &merged,
                               sizeof(merged));
                        if (!clean) {
-                               path_msg(opt, newpath, 0,
+                               path_msg(opt, CONFLICT_RENAME_COLLIDES, 0,
+                                        newpath, oldpath, NULL, NULL,
                                         _("CONFLICT (rename involved in "
                                           "collision): rename of %s -> %s has "
                                           "content conflicts AND collides "
@@ -2773,7 +2961,8 @@ static int process_renames(struct merge_options *opt,
                         */
 
                        newinfo->path_conflict = 1;
-                       path_msg(opt, newpath, 0,
+                       path_msg(opt, CONFLICT_RENAME_DELETE, 0,
+                                newpath, oldpath, NULL, NULL,
                                 _("CONFLICT (rename/delete): %s renamed "
                                   "to %s in %s, but deleted in %s."),
                                 oldpath, newpath, rename_branch, delete_branch);
@@ -2797,7 +2986,8 @@ static int process_renames(struct merge_options *opt,
                        } else if (source_deleted) {
                                /* rename/delete */
                                newinfo->path_conflict = 1;
-                               path_msg(opt, newpath, 0,
+                               path_msg(opt, CONFLICT_RENAME_DELETE, 0,
+                                        newpath, oldpath, NULL, NULL,
                                         _("CONFLICT (rename/delete): %s renamed"
                                           " to %s in %s, but deleted in %s."),
                                         oldpath, newpath,
@@ -3712,7 +3902,8 @@ static void process_entry(struct merge_options *opt,
                path = unique_path(opt, path, branch);
                strmap_put(&opt->priv->paths, path, new_ci);
 
-               path_msg(opt, path, 0,
+               path_msg(opt, CONFLICT_FILE_DIRECTORY, 0,
+                        path, old_path, NULL, NULL,
                         _("CONFLICT (file/directory): directory in the way "
                           "of %s from %s; moving it to %s instead."),
                         old_path, branch, path);
@@ -3788,15 +3979,23 @@ static void process_entry(struct merge_options *opt,
                                rename_b = 1;
                        }
 
+                       if (rename_a)
+                               a_path = unique_path(opt, path, opt->branch1);
+                       if (rename_b)
+                               b_path = unique_path(opt, path, opt->branch2);
+
                        if (rename_a && rename_b) {
-                               path_msg(opt, path, 0,
+                               path_msg(opt, CONFLICT_DISTINCT_MODES, 0,
+                                        path, a_path, b_path, NULL,
                                         _("CONFLICT (distinct types): %s had "
                                           "different types on each side; "
                                           "renamed both of them so each can "
                                           "be recorded somewhere."),
                                         path);
                        } else {
-                               path_msg(opt, path, 0,
+                               path_msg(opt, CONFLICT_DISTINCT_MODES, 0,
+                                        path, rename_a ? a_path : b_path,
+                                        NULL, NULL,
                                         _("CONFLICT (distinct types): %s had "
                                           "different types on each side; "
                                           "renamed one of them so each can be "
@@ -3833,14 +4032,10 @@ static void process_entry(struct merge_options *opt,
 
                        /* Insert entries into opt->priv_paths */
                        assert(rename_a || rename_b);
-                       if (rename_a) {
-                               a_path = unique_path(opt, path, opt->branch1);
+                       if (rename_a)
                                strmap_put(&opt->priv->paths, a_path, ci);
-                       }
 
-                       if (rename_b)
-                               b_path = unique_path(opt, path, opt->branch2);
-                       else
+                       if (!rename_b)
                                b_path = path;
                        strmap_put(&opt->priv->paths, b_path, new_ci);
 
@@ -3891,7 +4086,8 @@ static void process_entry(struct merge_options *opt,
                                reason = _("add/add");
                        if (S_ISGITLINK(merged_file.mode))
                                reason = _("submodule");
-                       path_msg(opt, path, 0,
+                       path_msg(opt, CONFLICT_CONTENTS, 0,
+                                path, NULL, NULL, NULL,
                                 _("CONFLICT (%s): Merge conflict in %s"),
                                 reason, path);
                }
@@ -3935,7 +4131,8 @@ static void process_entry(struct merge_options *opt,
                         * since the contents were not modified.
                         */
                } else {
-                       path_msg(opt, path, 0,
+                       path_msg(opt, CONFLICT_MODIFY_DELETE, 0,
+                                path, NULL, NULL, NULL,
                                 _("CONFLICT (modify/delete): %s deleted in %s "
                                   "and modified in %s.  Version %s of %s left "
                                   "in tree."),
@@ -4223,21 +4420,8 @@ static int record_conflicted_index_entries(struct merge_options *opt)
                         * the CE_SKIP_WORKTREE bit and manually write those
                         * files to the working disk here.
                         */
-                       if (ce_skip_worktree(ce)) {
-                               struct stat st;
-
-                               if (!lstat(path, &st)) {
-                                       char *new_name = unique_path(opt,
-                                                                    path,
-                                                                    "cruft");
-
-                                       path_msg(opt, path, 1,
-                                                _("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"),
-                                                path, new_name);
-                                       errs |= rename(path, new_name);
-                               }
+                       if (ce_skip_worktree(ce))
                                errs |= checkout_entry(ce, &state, NULL, NULL);
-                       }
 
                        /*
                         * Mark this cache entry for removal and instead add
@@ -4279,6 +4463,152 @@ static int record_conflicted_index_entries(struct merge_options *opt)
        return errs;
 }
 
+static void print_submodule_conflict_suggestion(struct string_list *csub) {
+       struct string_list_item *item;
+       struct strbuf msg = STRBUF_INIT;
+       struct strbuf tmp = STRBUF_INIT;
+       struct strbuf subs = STRBUF_INIT;
+
+       if (!csub->nr)
+               return;
+
+       strbuf_add_separated_string_list(&subs, " ", csub);
+       for_each_string_list_item(item, csub) {
+               struct conflicted_submodule_item *util = item->util;
+
+               /*
+                * NEEDSWORK: The steps to resolve these errors deserve a more
+                * detailed explanation than what is currently printed below.
+                */
+               if (util->flag == CONFLICT_SUBMODULE_NOT_INITIALIZED ||
+                   util->flag == CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE)
+                       continue;
+
+               /*
+                * TRANSLATORS: This is a line of advice to resolve a merge
+                * conflict in a submodule. The first argument is the submodule
+                * name, and the second argument is the abbreviated id of the
+                * commit that needs to be merged.  For example:
+                *  - go to submodule (mysubmodule), and either merge commit abc1234"
+                */
+               strbuf_addf(&tmp, _(" - go to submodule (%s), and either merge commit %s\n"
+                                   "   or update to an existing commit which has merged those changes\n"),
+                           item->string, util->abbrev);
+       }
+
+       /*
+        * TRANSLATORS: This is a detailed message for resolving submodule
+        * conflicts.  The first argument is string containing one step per
+        * submodule.  The second is a space-separated list of submodule names.
+        */
+       strbuf_addf(&msg,
+                   _("Recursive merging with submodules currently only supports trivial cases.\n"
+                     "Please manually handle the merging of each conflicted submodule.\n"
+                     "This can be accomplished with the following steps:\n"
+                     "%s"
+                     " - come back to superproject and run:\n\n"
+                     "      git add %s\n\n"
+                     "   to record the above merge or update\n"
+                     " - resolve any other conflicts in the superproject\n"
+                     " - commit the resulting index in the superproject\n"),
+                   tmp.buf, subs.buf);
+
+       printf("%s", msg.buf);
+
+       strbuf_release(&subs);
+       strbuf_release(&tmp);
+       strbuf_release(&msg);
+}
+
+void merge_display_update_messages(struct merge_options *opt,
+                                  int detailed,
+                                  struct merge_result *result)
+{
+       struct merge_options_internal *opti = result->priv;
+       struct hashmap_iter iter;
+       struct strmap_entry *e;
+       struct string_list olist = STRING_LIST_INIT_NODUP;
+
+       if (opt->record_conflict_msgs_as_headers)
+               BUG("Either display conflict messages or record them as headers, not both");
+
+       trace2_region_enter("merge", "display messages", opt->repo);
+
+       /* Hack to pre-allocate olist to the desired size */
+       ALLOC_GROW(olist.items, strmap_get_size(&opti->conflicts),
+                  olist.alloc);
+
+       /* Put every entry from output into olist, then sort */
+       strmap_for_each_entry(&opti->conflicts, &iter, e) {
+               string_list_append(&olist, e->key)->util = e->value;
+       }
+       string_list_sort(&olist);
+
+       /* Iterate over the items, printing them */
+       for (int path_nr = 0; path_nr < olist.nr; ++path_nr) {
+               struct string_list *conflicts = olist.items[path_nr].util;
+               for (int i = 0; i < conflicts->nr; i++) {
+                       struct logical_conflict_info *info =
+                               conflicts->items[i].util;
+
+                       if (detailed) {
+                               printf("%lu", (unsigned long)info->paths.nr);
+                               putchar('\0');
+                               for (int n = 0; n < info->paths.nr; n++) {
+                                       fputs(info->paths.v[n], stdout);
+                                       putchar('\0');
+                               }
+                               fputs(type_short_descriptions[info->type],
+                                     stdout);
+                               putchar('\0');
+                       }
+                       puts(conflicts->items[i].string);
+                       if (detailed)
+                               putchar('\0');
+               }
+       }
+       string_list_clear(&olist, 0);
+
+       print_submodule_conflict_suggestion(&opti->conflicted_submodules);
+
+       /* Also include needed rename limit adjustment now */
+       diff_warn_rename_limit("merge.renamelimit",
+                              opti->renames.needed_limit, 0);
+
+       trace2_region_leave("merge", "display messages", opt->repo);
+}
+
+void merge_get_conflicted_files(struct merge_result *result,
+                               struct string_list *conflicted_files)
+{
+       struct hashmap_iter iter;
+       struct strmap_entry *e;
+       struct merge_options_internal *opti = result->priv;
+
+       strmap_for_each_entry(&opti->conflicted, &iter, e) {
+               const char *path = e->key;
+               struct conflict_info *ci = e->value;
+               int i;
+
+               VERIFY_CI(ci);
+
+               for (i = MERGE_BASE; i <= MERGE_SIDE2; i++) {
+                       struct stage_info *si;
+
+                       if (!(ci->filemask & (1ul << i)))
+                               continue;
+
+                       si = xmalloc(sizeof(*si));
+                       si->stage = i+1;
+                       si->mode = ci->stages[i].mode;
+                       oidcpy(&si->oid, &ci->stages[i].oid);
+                       string_list_append(conflicted_files, path)->util = si;
+               }
+       }
+       /* string_list_sort() uses a stable sort, so we're good */
+       string_list_sort(conflicted_files);
+}
+
 void merge_switch_to_result(struct merge_options *opt,
                            struct tree *head,
                            struct merge_result *result,
@@ -4321,43 +4651,8 @@ void merge_switch_to_result(struct merge_options *opt,
                fclose(fp);
                trace2_region_leave("merge", "write_auto_merge", opt->repo);
        }
-
-       if (display_update_msgs) {
-               struct merge_options_internal *opti = result->priv;
-               struct hashmap_iter iter;
-               struct strmap_entry *e;
-               struct string_list olist = STRING_LIST_INIT_NODUP;
-               int i;
-
-               if (opt->record_conflict_msgs_as_headers)
-                       BUG("Either display conflict messages or record them as headers, not both");
-
-               trace2_region_enter("merge", "display messages", opt->repo);
-
-               /* Hack to pre-allocate olist to the desired size */
-               ALLOC_GROW(olist.items, strmap_get_size(&opti->output),
-                          olist.alloc);
-
-               /* Put every entry from output into olist, then sort */
-               strmap_for_each_entry(&opti->output, &iter, e) {
-                       string_list_append(&olist, e->key)->util = e->value;
-               }
-               string_list_sort(&olist);
-
-               /* Iterate over the items, printing them */
-               for (i = 0; i < olist.nr; ++i) {
-                       struct strbuf *sb = olist.items[i].util;
-
-                       printf("%s", sb->buf);
-               }
-               string_list_clear(&olist, 0);
-
-               /* Also include needed rename limit adjustment now */
-               diff_warn_rename_limit("merge.renamelimit",
-                                      opti->renames.needed_limit, 0);
-
-               trace2_region_leave("merge", "display messages", opt->repo);
-       }
+       if (display_update_msgs)
+               merge_display_update_messages(opt, /* detailed */ 0, result);
 
        merge_finalize(opt, result);
 }
@@ -4477,6 +4772,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
        trace2_region_enter("merge", "allocate/init", opt->repo);
        if (opt->priv) {
                clear_or_reinit_internal_opts(opt->priv, 1);
+               string_list_init_nodup(&opt->priv->conflicted_submodules);
                trace2_region_leave("merge", "allocate/init", opt->repo);
                return;
        }
@@ -4531,11 +4827,11 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
        strmap_init_with_options(&opt->priv->conflicted, pool, 0);
 
        /*
-        * keys & strbufs in output will sometimes need to outlive "paths",
-        * so it will have a copy of relevant keys.  It's probably a small
-        * subset of the overall paths that have special output.
+        * keys & string_lists in conflicts will sometimes need to outlive
+        * "paths", so it will have a copy of relevant keys.  It's probably
+        * a small subset of the overall paths that have special output.
         */
-       strmap_init(&opt->priv->output);
+       strmap_init(&opt->priv->conflicts);
 
        trace2_region_leave("merge", "allocate/init", opt->repo);
 }
@@ -4636,7 +4932,8 @@ redo:
        trace2_region_leave("merge", "process_entries", opt->repo);
 
        /* Set return values */
-       result->path_messages = &opt->priv->output;
+       result->path_messages = &opt->priv->conflicts;
+
        result->tree = parse_tree_indirect(&working_tree_oid);
        /* existence of conflicted entries implies unclean */
        result->clean &= strmap_empty(&opt->priv->conflicted);
index fe599b8786891c74cbc5e911689f4a4d319152cd..a994c9a5fcdb040b3ecb80db0489a50c1394ac75 100644 (file)
@@ -2,6 +2,7 @@
 #define MERGE_ORT_H
 
 #include "merge-recursive.h"
+#include "hash.h"
 
 struct commit;
 struct tree;
@@ -27,7 +28,7 @@ struct merge_result {
        /*
         * Special messages and conflict notices for various paths
         *
-        * This is a map of pathnames to strbufs.  It contains various
+        * This is a map of pathnames to a string_list. It contains various
         * warning/conflict/notice messages (possibly multiple per path)
         * that callers may want to use.
         */
@@ -80,6 +81,35 @@ void merge_switch_to_result(struct merge_options *opt,
                            int update_worktree_and_index,
                            int display_update_msgs);
 
+/*
+ * Display messages about conflicts and which files were 3-way merged.
+ * Automatically called by merge_switch_to_result() with stream == stdout,
+ * so only call this when bypassing merge_switch_to_result().
+ */
+void merge_display_update_messages(struct merge_options *opt,
+                                  int detailed,
+                                  struct merge_result *result);
+
+struct stage_info {
+       struct object_id oid;
+       int mode;
+       int stage;
+};
+
+/*
+ * Provide a list of path -> {struct stage_info*} mappings for
+ * all conflicted files.  Note that each path could appear up to three
+ * times in the list, corresponding to 3 different stage entries.  In short,
+ * this basically provides the info that would be printed by `ls-files -u`.
+ *
+ * result should have been populated by a call to
+ * one of the merge_incore_[non]recursive() functions.
+ *
+ * conflicted_files should be empty before calling this function.
+ */
+void merge_get_conflicted_files(struct merge_result *result,
+                               struct string_list *conflicted_files);
+
 /* Do needed cleanup when not calling merge_switch_to_result() */
 void merge_finalize(struct merge_options *opt,
                    struct merge_result *result);
index b83a129b4313d9c6a4f6434fbe2002068ef7a542..4ddd3adea003e32fb7f16fbfa56a886a5d4a0712 100644 (file)
@@ -45,7 +45,7 @@ struct path_hashmap_entry {
        char path[FLEX_ARRAY];
 };
 
-static int path_hashmap_cmp(const void *cmp_data,
+static int path_hashmap_cmp(const void *cmp_data UNUSED,
                            const struct hashmap_entry *eptr,
                            const struct hashmap_entry *entry_or_key,
                            const void *keydata)
@@ -89,10 +89,10 @@ static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
        return hashmap_get_entry(hashmap, &key, ent, NULL);
 }
 
-static int dir_rename_cmp(const void *unused_cmp_data,
+static int dir_rename_cmp(const void *cmp_data UNUSED,
                          const struct hashmap_entry *eptr,
                          const struct hashmap_entry *entry_or_key,
-                         const void *unused_keydata)
+                         const void *keydata UNUSED)
 {
        const struct dir_rename_entry *e1, *e2;
 
@@ -134,10 +134,10 @@ static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
        return hashmap_get_entry(hashmap, &key, ent, NULL);
 }
 
-static int collision_cmp(const void *unused_cmp_data,
+static int collision_cmp(const void *cmp_data UNUSED,
                         const struct hashmap_entry *eptr,
                         const struct hashmap_entry *entry_or_key,
-                        const void *unused_keydata)
+                        const void *keydata UNUSED)
 {
        const struct collision_entry *e1, *e2;
 
@@ -456,7 +456,7 @@ static void unpack_trees_finish(struct merge_options *opt)
        clear_unpack_trees_porcelain(&opt->priv->unpack_opts);
 }
 
-static int save_files_dirs(const struct object_id *oid,
+static int save_files_dirs(const struct object_id *oid UNUSED,
                           struct strbuf *base, const char *path,
                           unsigned int mode, void *context)
 {
diff --git a/mergesort.c b/mergesort.c
deleted file mode 100644 (file)
index bd9c6ef..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-#include "cache.h"
-#include "mergesort.h"
-
-/* Combine two sorted lists.  Take from `list` on equality. */
-static void *llist_merge(void *list, void *other,
-                        void *(*get_next_fn)(const void *),
-                        void (*set_next_fn)(void *, void *),
-                        int (*compare_fn)(const void *, const void *))
-{
-       void *result = list, *tail;
-
-       if (compare_fn(list, other) > 0) {
-               result = other;
-               goto other;
-       }
-       for (;;) {
-               do {
-                       tail = list;
-                       list = get_next_fn(list);
-                       if (!list) {
-                               set_next_fn(tail, other);
-                               return result;
-                       }
-               } while (compare_fn(list, other) <= 0);
-               set_next_fn(tail, other);
-       other:
-               do {
-                       tail = other;
-                       other = get_next_fn(other);
-                       if (!other) {
-                               set_next_fn(tail, list);
-                               return result;
-                       }
-               } while (compare_fn(list, other) > 0);
-               set_next_fn(tail, list);
-       }
-}
-
-/*
- * Perform an iterative mergesort using an array of sublists.
- *
- * n is the number of items.
- * ranks[i] is undefined if n & 2^i == 0, and assumed empty.
- * ranks[i] contains a sublist of length 2^i otherwise.
- *
- * The number of bits in a void pointer limits the number of objects
- * that can be created, and thus the number of array elements necessary
- * to be able to sort any valid list.
- *
- * Adding an item to this array is like incrementing a binary number;
- * positional values for set bits correspond to sublist lengths.
- */
-void *llist_mergesort(void *list,
-                     void *(*get_next_fn)(const void *),
-                     void (*set_next_fn)(void *, void *),
-                     int (*compare_fn)(const void *, const void *))
-{
-       void *ranks[bitsizeof(void *)];
-       size_t n = 0;
-       int i;
-
-       while (list) {
-               void *next = get_next_fn(list);
-               if (next)
-                       set_next_fn(list, NULL);
-               for (i = 0; n & ((size_t)1 << i); i++)
-                       list = llist_merge(ranks[i], list, get_next_fn,
-                                          set_next_fn, compare_fn);
-               n++;
-               ranks[i] = list;
-               list = next;
-       }
-
-       for (i = 0; n; i++, n >>= 1) {
-               if (!(n & 1))
-                       continue;
-               if (list)
-                       list = llist_merge(ranks[i], list, get_next_fn,
-                                          set_next_fn, compare_fn);
-               else
-                       list = ranks[i];
-       }
-       return list;
-}
index 644cff1f9640f4f3a146d7217a8deab34282ff53..7c36f08bd5f9668521ff993986b757b9766ec1c0 100644 (file)
 #ifndef MERGESORT_H
 #define MERGESORT_H
 
+/* Combine two sorted lists.  Take from `list` on equality. */
+#define DEFINE_LIST_MERGE_INTERNAL(name, type)                         \
+static type *name##__merge(type *list, type *other,                    \
+                          int (*compare_fn)(const type *, const type *))\
+{                                                                      \
+       type *result = list, *tail;                                     \
+       int prefer_list = compare_fn(list, other) <= 0;                 \
+                                                                       \
+       if (!prefer_list) {                                             \
+               result = other;                                         \
+               SWAP(list, other);                                      \
+       }                                                               \
+       for (;;) {                                                      \
+               do {                                                    \
+                       tail = list;                                    \
+                       list = name##__get_next(list);                  \
+                       if (!list) {                                    \
+                               name##__set_next(tail, other);          \
+                               return result;                          \
+                       }                                               \
+               } while (compare_fn(list, other) < prefer_list);        \
+               name##__set_next(tail, other);                          \
+               prefer_list ^= 1;                                       \
+               SWAP(list, other);                                      \
+       }                                                               \
+}
+
 /*
- * Sort linked list in place.
- * - get_next_fn() returns the next element given an element of a linked list.
- * - set_next_fn() takes two elements A and B, and makes B the "next" element
- *   of A on the list.
- * - compare_fn() takes two elements A and B, and returns negative, 0, positive
- *   as the same sign as "subtracting" B from A.
+ * Perform an iterative mergesort using an array of sublists.
+ *
+ * n is the number of items.
+ * ranks[i] is undefined if n & 2^i == 0, and assumed empty.
+ * ranks[i] contains a sublist of length 2^i otherwise.
+ *
+ * The number of bits in a void pointer limits the number of objects
+ * that can be created, and thus the number of array elements necessary
+ * to be able to sort any valid list.
+ *
+ * Adding an item to this array is like incrementing a binary number;
+ * positional values for set bits correspond to sublist lengths.
  */
-void *llist_mergesort(void *list,
-                     void *(*get_next_fn)(const void *),
-                     void (*set_next_fn)(void *, void *),
-                     int (*compare_fn)(const void *, const void *));
+#define DEFINE_LIST_SORT_INTERNAL(scope, name, type)                   \
+scope void name(type **listp,                                          \
+               int (*compare_fn)(const type *, const type *))          \
+{                                                                      \
+       type *list = *listp;                                            \
+       type *ranks[bitsizeof(type *)];                                 \
+       size_t n = 0;                                                   \
+                                                                       \
+       if (!list)                                                      \
+               return;                                                 \
+                                                                       \
+       for (;;) {                                                      \
+               int i;                                                  \
+               size_t m;                                               \
+               type *next = name##__get_next(list);                    \
+               if (next)                                               \
+                       name##__set_next(list, NULL);                   \
+               for (i = 0, m = n;; i++, m >>= 1) {                     \
+                       if (m & 1) {                                    \
+                               list = name##__merge(ranks[i], list,    \
+                                                   compare_fn);        \
+                       } else if (next) {                              \
+                               break;                                  \
+                       } else if (!m) {                                \
+                               *listp = list;                          \
+                               return;                                 \
+                       }                                               \
+               }                                                       \
+               n++;                                                    \
+               ranks[i] = list;                                        \
+               list = next;                                            \
+       }                                                               \
+}
+
+#define DECLARE_LIST_SORT(scope, name, type)                   \
+scope void name(type **listp,                                  \
+               int (*compare_fn)(const type *, const type *))
+
+#define DEFINE_LIST_SORT_DEBUG(scope, name, type, next_member, \
+                              on_get_next, on_set_next)        \
+                                                               \
+static inline type *name##__get_next(const type *elem)         \
+{                                                              \
+       on_get_next;                                            \
+       return elem->next_member;                               \
+}                                                              \
+                                                               \
+static inline void name##__set_next(type *elem, type *next)    \
+{                                                              \
+       on_set_next;                                            \
+       elem->next_member = next;                               \
+}                                                              \
+                                                               \
+DEFINE_LIST_MERGE_INTERNAL(name, type)                         \
+DEFINE_LIST_SORT_INTERNAL(scope, name, type)                   \
+DECLARE_LIST_SORT(scope, name, type)
+
+#define DEFINE_LIST_SORT(scope, name, type, next_member) \
+DEFINE_LIST_SORT_DEBUG(scope, name, type, next_member, (void)0, (void)0)
 
 #endif
diff --git a/midx.c b/midx.c
index 5f0dd386b0266b6549c85499ab50e964ed868f1d..c27d0e5f1510c32ad7cbf0fca8dc1d05ec4917a3 100644 (file)
--- a/midx.c
+++ b/midx.c
@@ -577,6 +577,78 @@ static void fill_pack_entry(uint32_t pack_int_id,
        entry->preferred = !!preferred;
 }
 
+struct midx_fanout {
+       struct pack_midx_entry *entries;
+       uint32_t nr;
+       uint32_t alloc;
+};
+
+static void midx_fanout_grow(struct midx_fanout *fanout, uint32_t nr)
+{
+       ALLOC_GROW(fanout->entries, nr, fanout->alloc);
+}
+
+static void midx_fanout_sort(struct midx_fanout *fanout)
+{
+       QSORT(fanout->entries, fanout->nr, midx_oid_compare);
+}
+
+static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
+                                       struct multi_pack_index *m,
+                                       uint32_t cur_fanout,
+                                       int preferred_pack)
+{
+       uint32_t start = 0, end;
+       uint32_t cur_object;
+
+       if (cur_fanout)
+               start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
+       end = ntohl(m->chunk_oid_fanout[cur_fanout]);
+
+       for (cur_object = start; cur_object < end; cur_object++) {
+               if ((preferred_pack > -1) &&
+                   (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
+                       /*
+                        * Objects from preferred packs are added
+                        * separately.
+                        */
+                       continue;
+               }
+
+               midx_fanout_grow(fanout, fanout->nr + 1);
+               nth_midxed_pack_midx_entry(m,
+                                          &fanout->entries[fanout->nr],
+                                          cur_object);
+               fanout->entries[fanout->nr].preferred = 0;
+               fanout->nr++;
+       }
+}
+
+static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
+                                       struct pack_info *info,
+                                       uint32_t cur_pack,
+                                       int preferred,
+                                       uint32_t cur_fanout)
+{
+       struct packed_git *pack = info[cur_pack].p;
+       uint32_t start = 0, end;
+       uint32_t cur_object;
+
+       if (cur_fanout)
+               start = get_pack_fanout(pack, cur_fanout - 1);
+       end = get_pack_fanout(pack, cur_fanout);
+
+       for (cur_object = start; cur_object < end; cur_object++) {
+               midx_fanout_grow(fanout, fanout->nr + 1);
+               fill_pack_entry(cur_pack,
+                               info[cur_pack].p,
+                               cur_object,
+                               &fanout->entries[fanout->nr],
+                               preferred);
+               fanout->nr++;
+       }
+}
+
 /*
  * It is possible to artificially get into a state where there are many
  * duplicate copies of objects. That can create high memory pressure if
@@ -595,8 +667,8 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
                                                  int preferred_pack)
 {
        uint32_t cur_fanout, cur_pack, cur_object;
-       uint32_t alloc_fanout, alloc_objects, total_objects = 0;
-       struct pack_midx_entry *entries_by_fanout = NULL;
+       uint32_t alloc_objects, total_objects = 0;
+       struct midx_fanout fanout = { 0 };
        struct pack_midx_entry *deduplicated_entries = NULL;
        uint32_t start_pack = m ? m->num_packs : 0;
 
@@ -608,74 +680,51 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
         * slices to be evenly distributed, with some noise. Hence,
         * allocate slightly more than one 256th.
         */
-       alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16;
+       alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16;
 
-       ALLOC_ARRAY(entries_by_fanout, alloc_fanout);
+       ALLOC_ARRAY(fanout.entries, fanout.alloc);
        ALLOC_ARRAY(deduplicated_entries, alloc_objects);
        *nr_objects = 0;
 
        for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
-               uint32_t nr_fanout = 0;
-
-               if (m) {
-                       uint32_t start = 0, end;
-
-                       if (cur_fanout)
-                               start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
-                       end = ntohl(m->chunk_oid_fanout[cur_fanout]);
-
-                       for (cur_object = start; cur_object < end; cur_object++) {
-                               ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
-                               nth_midxed_pack_midx_entry(m,
-                                                          &entries_by_fanout[nr_fanout],
-                                                          cur_object);
-                               if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack)
-                                       entries_by_fanout[nr_fanout].preferred = 1;
-                               else
-                                       entries_by_fanout[nr_fanout].preferred = 0;
-                               nr_fanout++;
-                       }
-               }
+               fanout.nr = 0;
+
+               if (m)
+                       midx_fanout_add_midx_fanout(&fanout, m, cur_fanout,
+                                                   preferred_pack);
 
                for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
-                       uint32_t start = 0, end;
                        int preferred = cur_pack == preferred_pack;
-
-                       if (cur_fanout)
-                               start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
-                       end = get_pack_fanout(info[cur_pack].p, cur_fanout);
-
-                       for (cur_object = start; cur_object < end; cur_object++) {
-                               ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
-                               fill_pack_entry(cur_pack,
-                                               info[cur_pack].p,
-                                               cur_object,
-                                               &entries_by_fanout[nr_fanout],
-                                               preferred);
-                               nr_fanout++;
-                       }
+                       midx_fanout_add_pack_fanout(&fanout,
+                                                   info, cur_pack,
+                                                   preferred, cur_fanout);
                }
 
-               QSORT(entries_by_fanout, nr_fanout, midx_oid_compare);
+               if (-1 < preferred_pack && preferred_pack < start_pack)
+                       midx_fanout_add_pack_fanout(&fanout, info,
+                                                   preferred_pack, 1,
+                                                   cur_fanout);
+
+               midx_fanout_sort(&fanout);
 
                /*
                 * The batch is now sorted by OID and then mtime (descending).
                 * Take only the first duplicate.
                 */
-               for (cur_object = 0; cur_object < nr_fanout; cur_object++) {
-                       if (cur_object && oideq(&entries_by_fanout[cur_object - 1].oid,
-                                               &entries_by_fanout[cur_object].oid))
+               for (cur_object = 0; cur_object < fanout.nr; cur_object++) {
+                       if (cur_object && oideq(&fanout.entries[cur_object - 1].oid,
+                                               &fanout.entries[cur_object].oid))
                                continue;
 
                        ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects);
                        memcpy(&deduplicated_entries[*nr_objects],
-                              &entries_by_fanout[cur_object],
+                              &fanout.entries[cur_object],
                               sizeof(struct pack_midx_entry));
                        (*nr_objects)++;
                }
        }
 
-       free(entries_by_fanout);
+       free(fanout.entries);
        return deduplicated_entries;
 }
 
@@ -1053,40 +1102,37 @@ static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr
        return cb.commits;
 }
 
-static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash,
-                            struct write_midx_context *ctx,
-                            const char *refs_snapshot,
+static int write_midx_bitmap(const char *midx_name,
+                            const unsigned char *midx_hash,
+                            struct packing_data *pdata,
+                            struct commit **commits,
+                            uint32_t commits_nr,
+                            uint32_t *pack_order,
                             unsigned flags)
 {
-       struct packing_data pdata;
-       struct pack_idx_entry **index;
-       struct commit **commits = NULL;
-       uint32_t i, commits_nr;
+       int ret, i;
        uint16_t options = 0;
-       char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash));
-       int ret;
-
-       if (!ctx->entries_nr)
-               BUG("cannot write a bitmap without any objects");
+       struct pack_idx_entry **index;
+       char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
+                                       hash_to_hex(midx_hash));
 
        if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
                options |= BITMAP_OPT_HASH_CACHE;
 
-       prepare_midx_packing_data(&pdata, ctx);
-
-       commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, ctx);
+       if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE)
+               options |= BITMAP_OPT_LOOKUP_TABLE;
 
        /*
         * Build the MIDX-order index based on pdata.objects (which is already
         * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
         * this order).
         */
-       ALLOC_ARRAY(index, pdata.nr_objects);
-       for (i = 0; i < pdata.nr_objects; i++)
-               index[i] = &pdata.objects[i].idx;
+       ALLOC_ARRAY(index, pdata->nr_objects);
+       for (i = 0; i < pdata->nr_objects; i++)
+               index[i] = &pdata->objects[i].idx;
 
        bitmap_writer_show_progress(flags & MIDX_PROGRESS);
-       bitmap_writer_build_type_index(&pdata, index, pdata.nr_objects);
+       bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
 
        /*
         * bitmap_writer_finish expects objects in lex order, but pack_order
@@ -1101,16 +1147,16 @@ static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash,
         * happens between bitmap_writer_build_type_index() and
         * bitmap_writer_finish().
         */
-       for (i = 0; i < pdata.nr_objects; i++)
-               index[ctx->pack_order[i]] = &pdata.objects[i].idx;
+       for (i = 0; i < pdata->nr_objects; i++)
+               index[pack_order[i]] = &pdata->objects[i].idx;
 
        bitmap_writer_select_commits(commits, commits_nr, -1);
-       ret = bitmap_writer_build(&pdata);
+       ret = bitmap_writer_build(pdata);
        if (ret < 0)
                goto cleanup;
 
        bitmap_writer_set_checksum(midx_hash);
-       bitmap_writer_finish(index, pdata.nr_objects, bitmap_name, options);
+       bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options);
 
 cleanup:
        free(index);
@@ -1443,14 +1489,40 @@ static int write_midx_internal(const char *object_dir,
        if (flags & MIDX_WRITE_REV_INDEX &&
            git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
                write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
+
        if (flags & MIDX_WRITE_BITMAP) {
-               if (write_midx_bitmap(midx_name.buf, midx_hash, &ctx,
-                                     refs_snapshot, flags) < 0) {
+               struct packing_data pdata;
+               struct commit **commits;
+               uint32_t commits_nr;
+
+               if (!ctx.entries_nr)
+                       BUG("cannot write a bitmap without any objects");
+
+               prepare_midx_packing_data(&pdata, &ctx);
+
+               commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx);
+
+               /*
+                * The previous steps translated the information from
+                * 'entries' into information suitable for constructing
+                * bitmaps. We no longer need that array, so clear it to
+                * reduce memory pressure.
+                */
+               FREE_AND_NULL(ctx.entries);
+               ctx.entries_nr = 0;
+
+               if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata,
+                                     commits, commits_nr, ctx.pack_order,
+                                     flags) < 0) {
                        error(_("could not write multi-pack bitmap"));
                        result = 1;
                        goto cleanup;
                }
        }
+       /*
+        * NOTE: Do not use ctx.entries beyond this point, since it might
+        * have been freed in the previous if block.
+        */
 
        if (ctx.m)
                close_object_store(the_repository->objects);
diff --git a/midx.h b/midx.h
index 22e8e53288ec226f7673d8a0f277961d5bb1d856..5578cd7b835e2b396e502e8abaf4560ab765c850 100644 (file)
--- a/midx.h
+++ b/midx.h
@@ -47,6 +47,7 @@ struct multi_pack_index {
 #define MIDX_WRITE_REV_INDEX (1 << 1)
 #define MIDX_WRITE_BITMAP (1 << 2)
 #define MIDX_WRITE_BITMAP_HASH_CACHE (1 << 3)
+#define MIDX_WRITE_BITMAP_LOOKUP_TABLE (1 << 4)
 
 const unsigned char *get_midx_checksum(struct multi_pack_index *m);
 void get_midx_filename(struct strbuf *out, const char *object_dir);
index 7487d331240e2aa25d1ecfbe22cee9eb4998df50..cd009c7c8ae455324bc9e48aa0f1f4df24211b66 100644 (file)
@@ -18,7 +18,7 @@ struct dir_entry {
        char name[FLEX_ARRAY];
 };
 
-static int dir_entry_cmp(const void *unused_cmp_data,
+static int dir_entry_cmp(const void *cmp_data UNUSED,
                         const struct hashmap_entry *eptr,
                         const struct hashmap_entry *entry_or_key,
                         const void *keydata)
@@ -120,7 +120,7 @@ static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
                add_dir_entry(istate, ce);
 }
 
-static int cache_entry_cmp(const void *unused_cmp_data,
+static int cache_entry_cmp(const void *cmp_data UNUSED,
                           const struct hashmap_entry *eptr,
                           const struct hashmap_entry *entry_or_key,
                           const void *remove)
index 434189ae5dc64a2b6bbdac01d82f4f284f9855c6..b7e79feaf042290c041fbcec439fc2aa8e84a3df 100644 (file)
@@ -36,7 +36,8 @@ static void rev_list_push(struct negotiation_state *ns,
 }
 
 static int clear_marks(const char *refname, const struct object_id *oid,
-                      int flag, void *cb_data)
+                      int flag UNUSED,
+                      void *cb_data UNUSED)
 {
        struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
 
index 1236e7922484a2d9b98c0d87fb0973c0b6aa6587..c4398f5ae15d320b5f7167105a08d94fe6bbf4fa 100644 (file)
@@ -72,7 +72,8 @@ static struct entry *rev_list_push(struct data *data, struct commit *commit, int
 }
 
 static int clear_marks(const char *refname, const struct object_id *oid,
-                      int flag, void *cb_data)
+                      int flag UNUSED,
+                      void *cb_data UNUSED)
 {
        struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
 
diff --git a/notes.c b/notes.c
index 7452e71cc8dd289c7ace9361a6c6e090b8b113f9..f2805d51bb15131c46d41fa9554c43ab2e9a492d 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -924,8 +924,9 @@ out:
        return ret;
 }
 
-static int string_list_add_one_ref(const char *refname, const struct object_id *oid,
-                                  int flag, void *cb)
+static int string_list_add_one_ref(const char *refname,
+                                  const struct object_id *oid UNUSED,
+                                  int flag UNUSED, void *cb)
 {
        struct string_list *refs = cb;
        if (!unsorted_string_list_has_string(refs, refname))
@@ -1005,6 +1006,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
 
        if (!notes_ref)
                notes_ref = default_notes_ref();
+       update_ref_namespace(NAMESPACE_NOTES, xstrdup(notes_ref));
 
        if (!combine_notes)
                combine_notes = combine_notes_concatenate;
index 6c8e3b16602300923ea2dec61c39e40f624237b8..5b270f046dda1d4234af6174cd80c07f64f9dff6 100644 (file)
@@ -1951,6 +1951,96 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename)
        return fd;
 }
 
+/**
+ * Common steps for loose object writers to start writing loose
+ * objects:
+ *
+ * - Create tmpfile for the loose object.
+ * - Setup zlib stream for compression.
+ * - Start to feed header to zlib stream.
+ *
+ * Returns a "fd", which should later be provided to
+ * end_loose_object_common().
+ */
+static int start_loose_object_common(struct strbuf *tmp_file,
+                                    const char *filename, unsigned flags,
+                                    git_zstream *stream,
+                                    unsigned char *buf, size_t buflen,
+                                    git_hash_ctx *c,
+                                    char *hdr, int hdrlen)
+{
+       int fd;
+
+       fd = create_tmpfile(tmp_file, filename);
+       if (fd < 0) {
+               if (flags & HASH_SILENT)
+                       return -1;
+               else if (errno == EACCES)
+                       return error(_("insufficient permission for adding "
+                                      "an object to repository database %s"),
+                                    get_object_directory());
+               else
+                       return error_errno(
+                               _("unable to create temporary file"));
+       }
+
+       /*  Setup zlib stream for compression */
+       git_deflate_init(stream, zlib_compression_level);
+       stream->next_out = buf;
+       stream->avail_out = buflen;
+       the_hash_algo->init_fn(c);
+
+       /*  Start to feed header to zlib stream */
+       stream->next_in = (unsigned char *)hdr;
+       stream->avail_in = hdrlen;
+       while (git_deflate(stream, 0) == Z_OK)
+               ; /* nothing */
+       the_hash_algo->update_fn(c, hdr, hdrlen);
+
+       return fd;
+}
+
+/**
+ * Common steps for the inner git_deflate() loop for writing loose
+ * objects. Returns what git_deflate() returns.
+ */
+static int write_loose_object_common(git_hash_ctx *c,
+                                    git_zstream *stream, const int flush,
+                                    unsigned char *in0, const int fd,
+                                    unsigned char *compressed,
+                                    const size_t compressed_len)
+{
+       int ret;
+
+       ret = git_deflate(stream, flush ? Z_FINISH : 0);
+       the_hash_algo->update_fn(c, in0, stream->next_in - in0);
+       if (write_buffer(fd, compressed, stream->next_out - compressed) < 0)
+               die(_("unable to write loose object file"));
+       stream->next_out = compressed;
+       stream->avail_out = compressed_len;
+
+       return ret;
+}
+
+/**
+ * Common steps for loose object writers to end writing loose objects:
+ *
+ * - End the compression of zlib stream.
+ * - Get the calculated oid to "oid".
+ */
+static int end_loose_object_common(git_hash_ctx *c, git_zstream *stream,
+                                  struct object_id *oid)
+{
+       int ret;
+
+       ret = git_deflate_end_gently(stream);
+       if (ret != Z_OK)
+               return ret;
+       the_hash_algo->final_oid_fn(oid, c);
+
+       return Z_OK;
+}
+
 static int write_loose_object(const struct object_id *oid, char *hdr,
                              int hdrlen, const void *buf, unsigned long len,
                              time_t mtime, unsigned flags)
@@ -1968,50 +2058,29 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
 
        loose_object_path(the_repository, &filename, oid);
 
-       fd = create_tmpfile(&tmp_file, filename.buf);
-       if (fd < 0) {
-               if (flags & HASH_SILENT)
-                       return -1;
-               else if (errno == EACCES)
-                       return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
-               else
-                       return error_errno(_("unable to create temporary file"));
-       }
-
-       /* Set it up */
-       git_deflate_init(&stream, zlib_compression_level);
-       stream.next_out = compressed;
-       stream.avail_out = sizeof(compressed);
-       the_hash_algo->init_fn(&c);
-
-       /* First header.. */
-       stream.next_in = (unsigned char *)hdr;
-       stream.avail_in = hdrlen;
-       while (git_deflate(&stream, 0) == Z_OK)
-               ; /* nothing */
-       the_hash_algo->update_fn(&c, hdr, hdrlen);
+       fd = start_loose_object_common(&tmp_file, filename.buf, flags,
+                                      &stream, compressed, sizeof(compressed),
+                                      &c, hdr, hdrlen);
+       if (fd < 0)
+               return -1;
 
        /* Then the data itself.. */
        stream.next_in = (void *)buf;
        stream.avail_in = len;
        do {
                unsigned char *in0 = stream.next_in;
-               ret = git_deflate(&stream, Z_FINISH);
-               the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
-               if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
-                       die(_("unable to write loose object file"));
-               stream.next_out = compressed;
-               stream.avail_out = sizeof(compressed);
+
+               ret = write_loose_object_common(&c, &stream, 1, in0, fd,
+                                               compressed, sizeof(compressed));
        } while (ret == Z_OK);
 
        if (ret != Z_STREAM_END)
                die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
                    ret);
-       ret = git_deflate_end_gently(&stream);
+       ret = end_loose_object_common(&c, &stream, &parano_oid);
        if (ret != Z_OK)
                die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
                    ret);
-       the_hash_algo->final_oid_fn(&parano_oid, &c);
        if (!oideq(oid, &parano_oid))
                die(_("confused by unstable object source data for %s"),
                    oid_to_hex(oid));
@@ -2050,6 +2119,110 @@ static int freshen_packed_object(const struct object_id *oid)
        return 1;
 }
 
+int stream_loose_object(struct input_stream *in_stream, size_t len,
+                       struct object_id *oid)
+{
+       int fd, ret, err = 0, flush = 0;
+       unsigned char compressed[4096];
+       git_zstream stream;
+       git_hash_ctx c;
+       struct strbuf tmp_file = STRBUF_INIT;
+       struct strbuf filename = STRBUF_INIT;
+       int dirlen;
+       char hdr[MAX_HEADER_LEN];
+       int hdrlen;
+
+       if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
+               prepare_loose_object_bulk_checkin();
+
+       /* Since oid is not determined, save tmp file to odb path. */
+       strbuf_addf(&filename, "%s/", get_object_directory());
+       hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len);
+
+       /*
+        * Common steps for write_loose_object and stream_loose_object to
+        * start writing loose objects:
+        *
+        *  - Create tmpfile for the loose object.
+        *  - Setup zlib stream for compression.
+        *  - Start to feed header to zlib stream.
+        */
+       fd = start_loose_object_common(&tmp_file, filename.buf, 0,
+                                      &stream, compressed, sizeof(compressed),
+                                      &c, hdr, hdrlen);
+       if (fd < 0) {
+               err = -1;
+               goto cleanup;
+       }
+
+       /* Then the data itself.. */
+       do {
+               unsigned char *in0 = stream.next_in;
+
+               if (!stream.avail_in && !in_stream->is_finished) {
+                       const void *in = in_stream->read(in_stream, &stream.avail_in);
+                       stream.next_in = (void *)in;
+                       in0 = (unsigned char *)in;
+                       /* All data has been read. */
+                       if (in_stream->is_finished)
+                               flush = 1;
+               }
+               ret = write_loose_object_common(&c, &stream, flush, in0, fd,
+                                               compressed, sizeof(compressed));
+               /*
+                * Unlike write_loose_object(), we do not have the entire
+                * buffer. If we get Z_BUF_ERROR due to too few input bytes,
+                * then we'll replenish them in the next input_stream->read()
+                * call when we loop.
+                */
+       } while (ret == Z_OK || ret == Z_BUF_ERROR);
+
+       if (stream.total_in != len + hdrlen)
+               die(_("write stream object %ld != %"PRIuMAX), stream.total_in,
+                   (uintmax_t)len + hdrlen);
+
+       /*
+        * Common steps for write_loose_object and stream_loose_object to
+        * end writing loose oject:
+        *
+        *  - End the compression of zlib stream.
+        *  - Get the calculated oid.
+        */
+       if (ret != Z_STREAM_END)
+               die(_("unable to stream deflate new object (%d)"), ret);
+       ret = end_loose_object_common(&c, &stream, oid);
+       if (ret != Z_OK)
+               die(_("deflateEnd on stream object failed (%d)"), ret);
+       close_loose_object(fd, tmp_file.buf);
+
+       if (freshen_packed_object(oid) || freshen_loose_object(oid)) {
+               unlink_or_warn(tmp_file.buf);
+               goto cleanup;
+       }
+
+       loose_object_path(the_repository, &filename, oid);
+
+       /* We finally know the object path, and create the missing dir. */
+       dirlen = directory_size(filename.buf);
+       if (dirlen) {
+               struct strbuf dir = STRBUF_INIT;
+               strbuf_add(&dir, filename.buf, dirlen);
+
+               if (mkdir_in_gitdir(dir.buf) && errno != EEXIST) {
+                       err = error_errno(_("unable to create directory %s"), dir.buf);
+                       strbuf_release(&dir);
+                       goto cleanup;
+               }
+               strbuf_release(&dir);
+       }
+
+       err = finalize_object_file(tmp_file.buf, filename.buf);
+cleanup:
+       strbuf_release(&tmp_file);
+       strbuf_release(&filename);
+       return err;
+}
+
 int write_object_file_flags(const void *buf, unsigned long len,
                            enum object_type type, struct object_id *oid,
                            unsigned flags)
index 4d2746574cde0b813a73772c13d71a30db696128..2dd1a0f56e1e442dec47dfbbcdd46d58aecc812c 100644 (file)
@@ -1306,7 +1306,8 @@ struct handle_one_ref_cb {
 };
 
 static int handle_one_ref(const char *path, const struct object_id *oid,
-                         int flag, void *cb_data)
+                         int flag UNUSED,
+                         void *cb_data)
 {
        struct handle_one_ref_cb *cb = cb_data;
        struct commit_list **list = cb->list;
@@ -1384,8 +1385,11 @@ struct grab_nth_branch_switch_cbdata {
        struct strbuf *sb;
 };
 
-static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
-                                 const char *email, timestamp_t timestamp, int tz,
+static int grab_nth_branch_switch(struct object_id *ooid UNUSED,
+                                 struct object_id *noid UNUSED,
+                                 const char *email UNUSED,
+                                 timestamp_t timestamp UNUSED,
+                                 int tz UNUSED,
                                  const char *message, void *cb_data)
 {
        struct grab_nth_branch_switch_cbdata *cb = cb_data;
index 539ea439046dc18625917769040c7f824c490c24..1be57abaf10d7aa527df7f22ebaf660f6430d4d8 100644 (file)
@@ -46,6 +46,12 @@ struct object_directory {
        char *path;
 };
 
+struct input_stream {
+       const void *(*read)(struct input_stream *, unsigned long *len);
+       void *data;
+       int is_finished;
+};
+
 KHASH_INIT(odb_path_map, const char * /* key: odb_path */,
        struct object_directory *, 1, fspathhash, fspatheq)
 
@@ -135,7 +141,7 @@ struct packed_git {
 
 struct multi_pack_index;
 
-static inline int pack_map_entry_cmp(const void *unused_cmp_data,
+static inline int pack_map_entry_cmp(const void *cmp_data UNUSED,
                                     const struct hashmap_entry *entry,
                                     const struct hashmap_entry *entry2,
                                     const void *keydata)
@@ -269,6 +275,8 @@ static inline int write_object_file(const void *buf, unsigned long len,
 int write_object_file_literally(const void *buf, unsigned long len,
                                const char *type, struct object_id *oid,
                                unsigned flags);
+int stream_loose_object(struct input_stream *in_stream, size_t len,
+                       struct object_id *oid);
 
 /*
  * Add an object file to the in-memory object store, without writing it
index 588b8156f1d634a5cb9c25038961ca9a0d28d255..2e4589bae50105da847ad5b861d1752fb26dec77 100644 (file)
--- a/object.c
+++ b/object.c
@@ -263,8 +263,11 @@ struct object *parse_object_or_die(const struct object_id *oid,
        die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid));
 }
 
-struct object *parse_object(struct repository *r, const struct object_id *oid)
+struct object *parse_object_with_flags(struct repository *r,
+                                      const struct object_id *oid,
+                                      enum parse_object_flags flags)
 {
+       int skip_hash = !!(flags & PARSE_OBJECT_SKIP_HASH_CHECK);
        unsigned long size;
        enum object_type type;
        int eaten;
@@ -276,10 +279,16 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
        if (obj && obj->parsed)
                return obj;
 
+       if (skip_hash) {
+               struct commit *commit = lookup_commit_in_graph(r, repl);
+               if (commit)
+                       return &commit->object;
+       }
+
        if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
            (!obj && repo_has_object_file(r, oid) &&
             oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
-               if (stream_object_signature(r, repl) < 0) {
+               if (!skip_hash && stream_object_signature(r, repl) < 0) {
                        error(_("hash mismatch %s"), oid_to_hex(oid));
                        return NULL;
                }
@@ -289,7 +298,8 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
 
        buffer = repo_read_object_file(r, oid, &type, &size);
        if (buffer) {
-               if (check_object_signature(r, repl, buffer, size, type) < 0) {
+               if (!skip_hash &&
+                   check_object_signature(r, repl, buffer, size, type) < 0) {
                        free(buffer);
                        error(_("hash mismatch %s"), oid_to_hex(repl));
                        return NULL;
@@ -304,6 +314,11 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
        return NULL;
 }
 
+struct object *parse_object(struct repository *r, const struct object_id *oid)
+{
+       return parse_object_with_flags(r, oid, 0);
+}
+
 struct object_list *object_list_insert(struct object *item,
                                       struct object_list **list_p)
 {
index a2219464c2b31043692fb9c0d224fdf0b1c2c253..31ebe114585bb14cb95f4c74486635754d199955 100644 (file)
--- a/object.h
+++ b/object.h
@@ -59,7 +59,7 @@ struct object_array {
 
 /*
  * object flag allocation:
- * revision.h:               0---------10         15             23------26
+ * revision.h:               0---------10         15             23------27
  * fetch-pack.c:             01    67
  * negotiator/default.c:       2--5
  * walker.c:                 0-2
@@ -128,7 +128,13 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet);
  *
  * Returns NULL if the object is missing or corrupt.
  */
+enum parse_object_flags {
+       PARSE_OBJECT_SKIP_HASH_CHECK = 1 << 0,
+};
 struct object *parse_object(struct repository *r, const struct object_id *oid);
+struct object *parse_object_with_flags(struct repository *r,
+                                      const struct object_id *oid,
+                                      enum parse_object_flags flags);
 
 /*
  * Like parse_object, but will die() instead of returning NULL. If the
index 286a04a53c20119fe567eec4accbaba4d9f04a97..49965fe856814393c9381788dced2b05d35aed1f 100644 (file)
--- a/oidmap.c
+++ b/oidmap.c
@@ -1,7 +1,7 @@
 #include "cache.h"
 #include "oidmap.h"
 
-static int oidmap_neq(const void *hashmap_cmp_fn_data,
+static int oidmap_neq(const void *hashmap_cmp_fn_data UNUSED,
                      const struct hashmap_entry *e1,
                      const struct hashmap_entry *e2,
                      const void *keydata)
index c43375bd344f22473adcd7cac09c9f1572e27079..a213f5eddc5df2401f1232f37514050ea91efc31 100644 (file)
@@ -649,21 +649,18 @@ static const struct object_id *oid_access(size_t pos, const void *table)
 }
 
 static void write_selected_commits_v1(struct hashfile *f,
-                                     struct pack_idx_entry **index,
-                                     uint32_t index_nr)
+                                     uint32_t *commit_positions,
+                                     off_t *offsets)
 {
        int i;
 
        for (i = 0; i < writer.selected_nr; ++i) {
                struct bitmapped_commit *stored = &writer.selected[i];
 
-               int commit_pos =
-                       oid_pos(&stored->commit->object.oid, index, index_nr, oid_access);
+               if (offsets)
+                       offsets[i] = hashfile_total(f);
 
-               if (commit_pos < 0)
-                       BUG("trying to write commit not in index");
-
-               hashwrite_be32(f, commit_pos);
+               hashwrite_be32(f, commit_positions[i]);
                hashwrite_u8(f, stored->xor_offset);
                hashwrite_u8(f, stored->flags);
 
@@ -671,6 +668,79 @@ static void write_selected_commits_v1(struct hashfile *f,
        }
 }
 
+static int table_cmp(const void *_va, const void *_vb, void *_data)
+{
+       uint32_t *commit_positions = _data;
+       uint32_t a = commit_positions[*(uint32_t *)_va];
+       uint32_t b = commit_positions[*(uint32_t *)_vb];
+
+       if (a > b)
+               return 1;
+       else if (a < b)
+               return -1;
+
+       return 0;
+}
+
+static void write_lookup_table(struct hashfile *f,
+                              uint32_t *commit_positions,
+                              off_t *offsets)
+{
+       uint32_t i;
+       uint32_t *table, *table_inv;
+
+       ALLOC_ARRAY(table, writer.selected_nr);
+       ALLOC_ARRAY(table_inv, writer.selected_nr);
+
+       for (i = 0; i < writer.selected_nr; i++)
+               table[i] = i;
+
+       /*
+        * At the end of this sort table[j] = i means that the i'th
+        * bitmap corresponds to j'th bitmapped commit (among the selected
+        * commits) in lex order of OIDs.
+        */
+       QSORT_S(table, writer.selected_nr, table_cmp, commit_positions);
+
+       /* table_inv helps us discover that relationship (i'th bitmap
+        * to j'th commit by j = table_inv[i])
+        */
+       for (i = 0; i < writer.selected_nr; i++)
+               table_inv[table[i]] = i;
+
+       trace2_region_enter("pack-bitmap-write", "writing_lookup_table", the_repository);
+       for (i = 0; i < writer.selected_nr; i++) {
+               struct bitmapped_commit *selected = &writer.selected[table[i]];
+               uint32_t xor_offset = selected->xor_offset;
+               uint32_t xor_row;
+
+               if (xor_offset) {
+                       /*
+                        * xor_index stores the index (in the bitmap entries)
+                        * of the corresponding xor bitmap. But we need to convert
+                        * this index into lookup table's index. So, table_inv[xor_index]
+                        * gives us the index position w.r.t. the lookup table.
+                        *
+                        * If "k = table[i] - xor_offset" then the xor base is the k'th
+                        * bitmap. `table_inv[k]` gives us the position of that bitmap
+                        * in the lookup table.
+                        */
+                       uint32_t xor_index = table[i] - xor_offset;
+                       xor_row = table_inv[xor_index];
+               } else {
+                       xor_row = 0xffffffff;
+               }
+
+               hashwrite_be32(f, commit_positions[table[i]]);
+               hashwrite_be64(f, (uint64_t)offsets[table[i]]);
+               hashwrite_be32(f, xor_row);
+       }
+       trace2_region_leave("pack-bitmap-write", "writing_lookup_table", the_repository);
+
+       free(table);
+       free(table_inv);
+}
+
 static void write_hash_cache(struct hashfile *f,
                             struct pack_idx_entry **index,
                             uint32_t index_nr)
@@ -683,7 +753,7 @@ static void write_hash_cache(struct hashfile *f,
        }
 }
 
-void bitmap_writer_set_checksum(unsigned char *sha1)
+void bitmap_writer_set_checksum(const unsigned char *sha1)
 {
        hashcpy(writer.pack_checksum, sha1);
 }
@@ -697,6 +767,9 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        static uint16_t flags = BITMAP_OPT_FULL_DAG;
        struct strbuf tmp_file = STRBUF_INIT;
        struct hashfile *f;
+       uint32_t *commit_positions = NULL;
+       off_t *offsets = NULL;
+       uint32_t i;
 
        struct bitmap_disk_header header;
 
@@ -715,7 +788,26 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        dump_bitmap(f, writer.trees);
        dump_bitmap(f, writer.blobs);
        dump_bitmap(f, writer.tags);
-       write_selected_commits_v1(f, index, index_nr);
+
+       if (options & BITMAP_OPT_LOOKUP_TABLE)
+               CALLOC_ARRAY(offsets, index_nr);
+
+       ALLOC_ARRAY(commit_positions, writer.selected_nr);
+
+       for (i = 0; i < writer.selected_nr; i++) {
+               struct bitmapped_commit *stored = &writer.selected[i];
+               int commit_pos = oid_pos(&stored->commit->object.oid, index, index_nr, oid_access);
+
+               if (commit_pos < 0)
+                       BUG(_("trying to write commit not in index"));
+
+               commit_positions[i] = commit_pos;
+       }
+
+       write_selected_commits_v1(f, commit_positions, offsets);
+
+       if (options & BITMAP_OPT_LOOKUP_TABLE)
+               write_lookup_table(f, commit_positions, offsets);
 
        if (options & BITMAP_OPT_HASH_CACHE)
                write_hash_cache(f, index, index_nr);
@@ -730,4 +822,6 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
                die_errno("unable to rename temporary bitmap file to '%s'", filename);
 
        strbuf_release(&tmp_file);
+       free(commit_positions);
+       free(offsets);
 }
index 36134222d7a00acc1ebf5d52b3c3d817c10af8e9..9a208abc1fdd920926a1b8ee22e0a8bf5371743e 100644 (file)
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "commit.h"
+#include "strbuf.h"
 #include "tag.h"
 #include "diff.h"
 #include "revision.h"
@@ -82,6 +83,12 @@ struct bitmap_index {
        /* The checksum of the packfile or MIDX; points into map. */
        const unsigned char *checksum;
 
+       /*
+        * If not NULL, this point into the commit table extension
+        * (within the memory mapped region `map`).
+        */
+       unsigned char *table_lookup;
+
        /*
         * Extended index.
         *
@@ -138,7 +145,7 @@ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index)
                index->map_size - index->map_pos);
 
        if (bitmap_size < 0) {
-               error("Failed to load bitmap index (corrupted?)");
+               error(_("failed to load bitmap index (corrupted?)"));
                ewah_pool_free(b);
                return NULL;
        }
@@ -160,14 +167,14 @@ static int load_bitmap_header(struct bitmap_index *index)
        size_t header_size = sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz;
 
        if (index->map_size < header_size + the_hash_algo->rawsz)
-               return error("Corrupted bitmap index (too small)");
+               return error(_("corrupted bitmap index (too small)"));
 
        if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
-               return error("Corrupted bitmap index file (wrong header)");
+               return error(_("corrupted bitmap index file (wrong header)"));
 
        index->version = ntohs(header->version);
        if (index->version != 1)
-               return error("Unsupported version for bitmap index file (%d)", index->version);
+               return error(_("unsupported version '%d' for bitmap index file"), index->version);
 
        /* Parse known bitmap format options */
        {
@@ -176,15 +183,25 @@ static int load_bitmap_header(struct bitmap_index *index)
                unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz;
 
                if ((flags & BITMAP_OPT_FULL_DAG) == 0)
-                       return error("Unsupported options for bitmap index file "
+                       BUG("unsupported options for bitmap index file "
                                "(Git requires BITMAP_OPT_FULL_DAG)");
 
                if (flags & BITMAP_OPT_HASH_CACHE) {
                        if (cache_size > index_end - index->map - header_size)
-                               return error("corrupted bitmap index file (too short to fit hash cache)");
+                               return error(_("corrupted bitmap index file (too short to fit hash cache)"));
                        index->hashes = (void *)(index_end - cache_size);
                        index_end -= cache_size;
                }
+
+               if (flags & BITMAP_OPT_LOOKUP_TABLE) {
+                       size_t table_size = st_mult(ntohl(header->entry_count),
+                                                   BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH);
+                       if (table_size > index_end - index->map - header_size)
+                               return error(_("corrupted bitmap index file (too short to fit lookup table)"));
+                       if (git_env_bool("GIT_TEST_READ_COMMIT_TABLE", 1))
+                               index->table_lookup = (void *)(index_end - table_size);
+                       index_end -= table_size;
+               }
        }
 
        index->entry_count = ntohl(header->entry_count);
@@ -211,11 +228,13 @@ static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
 
        hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret);
 
-       /* a 0 return code means the insertion succeeded with no changes,
-        * because the SHA1 already existed on the map. this is bad, there
-        * shouldn't be duplicated commits in the index */
+       /*
+        * A 0 return code means the insertion succeeded with no changes,
+        * because the SHA1 already existed on the map. This is bad, there
+        * shouldn't be duplicated commits in the index.
+        */
        if (ret == 0) {
-               error("Duplicate entry in bitmap index: %s", oid_to_hex(oid));
+               error(_("duplicate entry in bitmap index: '%s'"), oid_to_hex(oid));
                return NULL;
        }
 
@@ -259,14 +278,14 @@ static int load_bitmap_entries_v1(struct bitmap_index *index)
                struct object_id oid;
 
                if (index->map_size - index->map_pos < 6)
-                       return error("corrupt ewah bitmap: truncated header for entry %d", i);
+                       return error(_("corrupt ewah bitmap: truncated header for entry %d"), i);
 
                commit_idx_pos = read_be32(index->map, &index->map_pos);
                xor_offset = read_u8(index->map, &index->map_pos);
                flags = read_u8(index->map, &index->map_pos);
 
                if (nth_bitmap_object_oid(index, &oid, commit_idx_pos) < 0)
-                       return error("corrupt ewah bitmap: commit index %u out of range",
+                       return error(_("corrupt ewah bitmap: commit index %u out of range"),
                                     (unsigned)commit_idx_pos);
 
                bitmap = read_bitmap_1(index);
@@ -274,13 +293,13 @@ static int load_bitmap_entries_v1(struct bitmap_index *index)
                        return -1;
 
                if (xor_offset > MAX_XOR_OFFSET || xor_offset > i)
-                       return error("Corrupted bitmap pack index");
+                       return error(_("corrupted bitmap pack index"));
 
                if (xor_offset > 0) {
                        xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET];
 
                        if (!xor_bitmap)
-                               return error("Invalid XOR offset in bitmap pack index");
+                               return error(_("invalid XOR offset in bitmap pack index"));
                }
 
                recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap(
@@ -313,17 +332,21 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
                              struct multi_pack_index *midx)
 {
        struct stat st;
-       char *idx_name = midx_bitmap_filename(midx);
-       int fd = git_open(idx_name);
+       char *bitmap_name = midx_bitmap_filename(midx);
+       int fd = git_open(bitmap_name);
        uint32_t i;
        struct packed_git *preferred;
 
-       free(idx_name);
-
-       if (fd < 0)
+       if (fd < 0) {
+               if (errno != ENOENT)
+                       warning_errno("cannot open '%s'", bitmap_name);
+               free(bitmap_name);
                return -1;
+       }
+       free(bitmap_name);
 
        if (fstat(fd, &st)) {
+               error_errno(_("cannot fstat bitmap file"));
                close(fd);
                return -1;
        }
@@ -332,7 +355,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
                struct strbuf buf = STRBUF_INIT;
                get_midx_filename(&buf, midx->object_dir);
                /* ignore extra bitmap file; we can only handle one */
-               warning("ignoring extra bitmap file: %s", buf.buf);
+               warning(_("ignoring extra bitmap file: '%s'"), buf.buf);
                close(fd);
                strbuf_release(&buf);
                return -1;
@@ -348,8 +371,10 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
        if (load_bitmap_header(bitmap_git) < 0)
                goto cleanup;
 
-       if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum))
+       if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) {
+               error(_("checksum doesn't match in MIDX and bitmap"));
                goto cleanup;
+       }
 
        if (load_midx_revindex(bitmap_git->midx) < 0) {
                warning(_("multi-pack bitmap is missing required reverse index"));
@@ -384,26 +409,31 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
 {
        int fd;
        struct stat st;
-       char *idx_name;
+       char *bitmap_name;
 
        if (open_pack_index(packfile))
                return -1;
 
-       idx_name = pack_bitmap_filename(packfile);
-       fd = git_open(idx_name);
-       free(idx_name);
+       bitmap_name = pack_bitmap_filename(packfile);
+       fd = git_open(bitmap_name);
 
-       if (fd < 0)
+       if (fd < 0) {
+               if (errno != ENOENT)
+                       warning_errno("cannot open '%s'", bitmap_name);
+               free(bitmap_name);
                return -1;
+       }
+       free(bitmap_name);
 
        if (fstat(fd, &st)) {
+               error_errno(_("cannot fstat bitmap file"));
                close(fd);
                return -1;
        }
 
        if (bitmap_git->pack || bitmap_git->midx) {
                /* ignore extra bitmap file; we can only handle one */
-               warning("ignoring extra bitmap file: %s", packfile->pack_name);
+               warning(_("ignoring extra bitmap file: '%s'"), packfile->pack_name);
                close(fd);
                return -1;
        }
@@ -470,7 +500,7 @@ static int load_bitmap(struct bitmap_index *bitmap_git)
                !(bitmap_git->tags = read_bitmap_1(bitmap_git)))
                goto failed;
 
-       if (load_bitmap_entries_v1(bitmap_git) < 0)
+       if (!bitmap_git->table_lookup && load_bitmap_entries_v1(bitmap_git) < 0)
                goto failed;
 
        return 0;
@@ -508,15 +538,16 @@ static int open_pack_bitmap(struct repository *r,
 static int open_midx_bitmap(struct repository *r,
                            struct bitmap_index *bitmap_git)
 {
+       int ret = -1;
        struct multi_pack_index *midx;
 
        assert(!bitmap_git->map);
 
        for (midx = get_multi_pack_index(r); midx; midx = midx->next) {
                if (!open_midx_bitmap_1(bitmap_git, midx))
-                       return 0;
+                       ret = 0;
        }
-       return -1;
+       return ret;
 }
 
 static int open_bitmap(struct repository *r,
@@ -557,13 +588,256 @@ struct include_data {
        struct bitmap *seen;
 };
 
+struct bitmap_lookup_table_triplet {
+       uint32_t commit_pos;
+       uint64_t offset;
+       uint32_t xor_row;
+};
+
+struct bitmap_lookup_table_xor_item {
+       struct object_id oid;
+       uint64_t offset;
+};
+
+/*
+ * Given a `triplet` struct pointer and pointer `p`, this
+ * function reads the triplet beginning at `p` into the struct.
+ * Note that this function assumes that there is enough memory
+ * left for filling the `triplet` struct from `p`.
+ */
+static int bitmap_lookup_table_get_triplet_by_pointer(struct bitmap_lookup_table_triplet *triplet,
+                                                     const unsigned char *p)
+{
+       if (!triplet)
+               return -1;
+
+       triplet->commit_pos = get_be32(p);
+       p += sizeof(uint32_t);
+       triplet->offset = get_be64(p);
+       p += sizeof(uint64_t);
+       triplet->xor_row = get_be32(p);
+       return 0;
+}
+
+/*
+ * This function gets the raw triplet from `row`'th row in the
+ * lookup table and fills that data to the `triplet`.
+ */
+static int bitmap_lookup_table_get_triplet(struct bitmap_index *bitmap_git,
+                                          uint32_t pos,
+                                          struct bitmap_lookup_table_triplet *triplet)
+{
+       unsigned char *p = NULL;
+       if (pos >= bitmap_git->entry_count)
+               return error(_("corrupt bitmap lookup table: triplet position out of index"));
+
+       p = bitmap_git->table_lookup + st_mult(pos, BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH);
+
+       return bitmap_lookup_table_get_triplet_by_pointer(triplet, p);
+}
+
+/*
+ * Searches for a matching triplet. `commit_pos` is a pointer
+ * to the wanted commit position value. `table_entry` points to
+ * a triplet in lookup table. The first 4 bytes of each
+ * triplet (pointed by `table_entry`) are compared with `*commit_pos`.
+ */
+static int triplet_cmp(const void *commit_pos, const void *table_entry)
+{
+
+       uint32_t a = *(uint32_t *)commit_pos;
+       uint32_t b = get_be32(table_entry);
+       if (a > b)
+               return 1;
+       else if (a < b)
+               return -1;
+
+       return 0;
+}
+
+static uint32_t bitmap_bsearch_pos(struct bitmap_index *bitmap_git,
+                           struct object_id *oid,
+                           uint32_t *result)
+{
+       int found;
+
+       if (bitmap_is_midx(bitmap_git))
+               found = bsearch_midx(oid, bitmap_git->midx, result);
+       else
+               found = bsearch_pack(oid, bitmap_git->pack, result);
+
+       return found;
+}
+
+/*
+ * `bsearch_triplet_by_pos` function searches for the raw triplet
+ * having commit position same as `commit_pos` and fills `triplet`
+ * object from the raw triplet. Returns 1 on success and 0 on
+ * failure.
+ */
+static int bitmap_bsearch_triplet_by_pos(uint32_t commit_pos,
+                                 struct bitmap_index *bitmap_git,
+                                 struct bitmap_lookup_table_triplet *triplet)
+{
+       unsigned char *p = bsearch(&commit_pos, bitmap_git->table_lookup, bitmap_git->entry_count,
+                                  BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH, triplet_cmp);
+
+       if (!p)
+               return -1;
+
+       return bitmap_lookup_table_get_triplet_by_pointer(triplet, p);
+}
+
+static struct stored_bitmap *lazy_bitmap_for_commit(struct bitmap_index *bitmap_git,
+                                                   struct commit *commit)
+{
+       uint32_t commit_pos, xor_row;
+       uint64_t offset;
+       int flags;
+       struct bitmap_lookup_table_triplet triplet;
+       struct object_id *oid = &commit->object.oid;
+       struct ewah_bitmap *bitmap;
+       struct stored_bitmap *xor_bitmap = NULL;
+       const int bitmap_header_size = 6;
+       static struct bitmap_lookup_table_xor_item *xor_items = NULL;
+       static size_t xor_items_nr = 0, xor_items_alloc = 0;
+       static int is_corrupt = 0;
+       int xor_flags;
+       khiter_t hash_pos;
+       struct bitmap_lookup_table_xor_item *xor_item;
+
+       if (is_corrupt)
+               return NULL;
+
+       if (!bitmap_bsearch_pos(bitmap_git, oid, &commit_pos))
+               return NULL;
+
+       if (bitmap_bsearch_triplet_by_pos(commit_pos, bitmap_git, &triplet) < 0)
+               return NULL;
+
+       xor_items_nr = 0;
+       offset = triplet.offset;
+       xor_row = triplet.xor_row;
+
+       while (xor_row != 0xffffffff) {
+               ALLOC_GROW(xor_items, xor_items_nr + 1, xor_items_alloc);
+
+               if (xor_items_nr + 1 >= bitmap_git->entry_count) {
+                       error(_("corrupt bitmap lookup table: xor chain exceed entry count"));
+                       goto corrupt;
+               }
+
+               if (bitmap_lookup_table_get_triplet(bitmap_git, xor_row, &triplet) < 0)
+                       goto corrupt;
+
+               xor_item = &xor_items[xor_items_nr];
+               xor_item->offset = triplet.offset;
+
+               if (nth_bitmap_object_oid(bitmap_git, &xor_item->oid, triplet.commit_pos) < 0) {
+                       error(_("corrupt bitmap lookup table: commit index %u out of range"),
+                               triplet.commit_pos);
+                       goto corrupt;
+               }
+
+               hash_pos = kh_get_oid_map(bitmap_git->bitmaps, xor_item->oid);
+
+               /*
+                * If desired bitmap is already stored, we don't need
+                * to iterate further. Because we know that bitmaps
+                * that are needed to be parsed to parse this bitmap
+                * has already been stored. So, assign this stored bitmap
+                * to the xor_bitmap.
+                */
+               if (hash_pos < kh_end(bitmap_git->bitmaps) &&
+                       (xor_bitmap = kh_value(bitmap_git->bitmaps, hash_pos)))
+                       break;
+               xor_items_nr++;
+               xor_row = triplet.xor_row;
+       }
+
+       while (xor_items_nr) {
+               xor_item = &xor_items[xor_items_nr - 1];
+               bitmap_git->map_pos = xor_item->offset;
+               if (bitmap_git->map_size - bitmap_git->map_pos < bitmap_header_size) {
+                       error(_("corrupt ewah bitmap: truncated header for bitmap of commit \"%s\""),
+                               oid_to_hex(&xor_item->oid));
+                       goto corrupt;
+               }
+
+               bitmap_git->map_pos += sizeof(uint32_t) + sizeof(uint8_t);
+               xor_flags = read_u8(bitmap_git->map, &bitmap_git->map_pos);
+               bitmap = read_bitmap_1(bitmap_git);
+
+               if (!bitmap)
+                       goto corrupt;
+
+               xor_bitmap = store_bitmap(bitmap_git, bitmap, &xor_item->oid, xor_bitmap, xor_flags);
+               xor_items_nr--;
+       }
+
+       bitmap_git->map_pos = offset;
+       if (bitmap_git->map_size - bitmap_git->map_pos < bitmap_header_size) {
+               error(_("corrupt ewah bitmap: truncated header for bitmap of commit \"%s\""),
+                       oid_to_hex(oid));
+               goto corrupt;
+       }
+
+       /*
+        * Don't bother reading the commit's index position or its xor
+        * offset:
+        *
+        *   - The commit's index position is irrelevant to us, since
+        *     load_bitmap_entries_v1 only uses it to learn the object
+        *     id which is used to compute the hashmap's key. We already
+        *     have an object id, so no need to look it up again.
+        *
+        *   - The xor_offset is unusable for us, since it specifies how
+        *     many entries previous to ours we should look at. This
+        *     makes sense when reading the bitmaps sequentially (as in
+        *     load_bitmap_entries_v1()), since we can keep track of
+        *     each bitmap as we read them.
+        *
+        *     But it can't work for us, since the bitmap's don't have a
+        *     fixed size. So we learn the position of the xor'd bitmap
+        *     from the commit table (and resolve it to a bitmap in the
+        *     above if-statement).
+        *
+        * Instead, we can skip ahead and immediately read the flags and
+        * ewah bitmap.
+        */
+       bitmap_git->map_pos += sizeof(uint32_t) + sizeof(uint8_t);
+       flags = read_u8(bitmap_git->map, &bitmap_git->map_pos);
+       bitmap = read_bitmap_1(bitmap_git);
+
+       if (!bitmap)
+               goto corrupt;
+
+       return store_bitmap(bitmap_git, bitmap, oid, xor_bitmap, flags);
+
+corrupt:
+       free(xor_items);
+       is_corrupt = 1;
+       return NULL;
+}
+
 struct ewah_bitmap *bitmap_for_commit(struct bitmap_index *bitmap_git,
                                      struct commit *commit)
 {
        khiter_t hash_pos = kh_get_oid_map(bitmap_git->bitmaps,
                                           commit->object.oid);
-       if (hash_pos >= kh_end(bitmap_git->bitmaps))
-               return NULL;
+       if (hash_pos >= kh_end(bitmap_git->bitmaps)) {
+               struct stored_bitmap *bitmap = NULL;
+               if (!bitmap_git->table_lookup)
+                       return NULL;
+
+               trace2_region_enter("pack-bitmap", "reading_lookup_table", the_repository);
+               /* NEEDSWORK: cache misses aren't recorded */
+               bitmap = lazy_bitmap_for_commit(bitmap_git, commit);
+               trace2_region_leave("pack-bitmap", "reading_lookup_table", the_repository);
+               if (!bitmap)
+                       return NULL;
+               return lookup_stored_bitmap(bitmap);
+       }
        return lookup_stored_bitmap(kh_value(bitmap_git->bitmaps, hash_pos));
 }
 
@@ -831,7 +1105,7 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git,
                revs->include_check_data = &incdata;
 
                if (prepare_revision_walk(revs))
-                       die("revision walk setup failed");
+                       die(_("revision walk setup failed"));
 
                show_data.bitmap_git = bitmap_git;
                show_data.base = base;
@@ -1640,15 +1914,15 @@ static void test_bitmap_type(struct bitmap_test_data *tdata,
        }
 
        if (bitmap_type == OBJ_NONE)
-               die("object %s not found in type bitmaps",
+               die(_("object '%s' not found in type bitmaps"),
                    oid_to_hex(&obj->oid));
 
        if (bitmaps_nr > 1)
-               die("object %s does not have a unique type",
+               die(_("object '%s' does not have a unique type"),
                    oid_to_hex(&obj->oid));
 
        if (bitmap_type != obj->type)
-               die("object %s: real type %s, expected: %s",
+               die(_("object '%s': real type '%s', expected: '%s'"),
                    oid_to_hex(&obj->oid),
                    type_name(obj->type),
                    type_name(bitmap_type));
@@ -1662,7 +1936,7 @@ static void test_show_object(struct object *object, const char *name,
 
        bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
        if (bitmap_pos < 0)
-               die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
+               die(_("object not in bitmap: '%s'"), oid_to_hex(&object->oid));
        test_bitmap_type(tdata, object, bitmap_pos);
 
        bitmap_set(tdata->base, bitmap_pos);
@@ -1677,7 +1951,7 @@ static void test_show_commit(struct commit *commit, void *data)
        bitmap_pos = bitmap_position(tdata->bitmap_git,
                                     &commit->object.oid);
        if (bitmap_pos < 0)
-               die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
+               die(_("object not in bitmap: '%s'"), oid_to_hex(&commit->object.oid));
        test_bitmap_type(tdata, &commit->object, bitmap_pos);
 
        bitmap_set(tdata->base, bitmap_pos);
@@ -1694,26 +1968,28 @@ void test_bitmap_walk(struct rev_info *revs)
        struct ewah_bitmap *bm;
 
        if (!(bitmap_git = prepare_bitmap_git(revs->repo)))
-               die("failed to load bitmap indexes");
+               die(_("failed to load bitmap indexes"));
 
        if (revs->pending.nr != 1)
-               die("you must specify exactly one commit to test");
+               die(_("you must specify exactly one commit to test"));
 
-       fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n",
-               bitmap_git->version, bitmap_git->entry_count);
+       fprintf_ln(stderr, "Bitmap v%d test (%d entries%s)",
+               bitmap_git->version,
+               bitmap_git->entry_count,
+               bitmap_git->table_lookup ? "" : " loaded");
 
        root = revs->pending.objects[0].item;
        bm = bitmap_for_commit(bitmap_git, (struct commit *)root);
 
        if (bm) {
-               fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n",
+               fprintf_ln(stderr, "Found bitmap for '%s'. %d bits / %08x checksum",
                        oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm));
 
                result = ewah_to_bitmap(bm);
        }
 
        if (!result)
-               die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid));
+               die(_("commit '%s' doesn't have an indexed bitmap"), oid_to_hex(&root->oid));
 
        revs->tag_objects = 1;
        revs->tree_objects = 1;
@@ -1722,7 +1998,7 @@ void test_bitmap_walk(struct rev_info *revs)
        result_popcnt = bitmap_popcount(result);
 
        if (prepare_revision_walk(revs))
-               die("revision walk setup failed");
+               die(_("revision walk setup failed"));
 
        tdata.bitmap_git = bitmap_git;
        tdata.base = bitmap_new();
@@ -1738,9 +2014,9 @@ void test_bitmap_walk(struct rev_info *revs)
        stop_progress(&tdata.prg);
 
        if (bitmap_equals(result, tdata.base))
-               fprintf(stderr, "OK!\n");
+               fprintf_ln(stderr, "OK!");
        else
-               die("mismatch in bitmap results");
+               die(_("mismatch in bitmap results"));
 
        bitmap_free(result);
        bitmap_free(tdata.base);
@@ -1753,15 +2029,24 @@ void test_bitmap_walk(struct rev_info *revs)
 
 int test_bitmap_commits(struct repository *r)
 {
-       struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
        struct object_id oid;
        MAYBE_UNUSED void *value;
+       struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
 
        if (!bitmap_git)
-               die("failed to load bitmap indexes");
+               die(_("failed to load bitmap indexes"));
+
+       /*
+        * As this function is only used to print bitmap selected
+        * commits, we don't have to read the commit table.
+        */
+       if (bitmap_git->table_lookup) {
+               if (load_bitmap_entries_v1(bitmap_git) < 0)
+                       die(_("failed to load bitmap indexes"));
+       }
 
        kh_foreach(bitmap_git->bitmaps, oid, value, {
-               printf("%s\n", oid_to_hex(&oid));
+               printf_ln("%s", oid_to_hex(&oid));
        });
 
        free_bitmap_index(bitmap_git);
@@ -1786,7 +2071,7 @@ int test_bitmap_hashes(struct repository *r)
 
                nth_bitmap_object_oid(bitmap_git, &oid, index_pos);
 
-               printf("%s %"PRIu32"\n",
+               printf_ln("%s %"PRIu32"",
                       oid_to_hex(&oid), get_be32(bitmap_git->hashes + index_pos));
        }
 
@@ -1948,7 +2233,7 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git,
                                        struct object_id oid;
                                        nth_midxed_object_oid(&oid, bitmap_git->midx, midx_pos);
 
-                                       die(_("could not find %s in pack %s at offset %"PRIuMAX),
+                                       die(_("could not find '%s' in pack '%s' at offset %"PRIuMAX),
                                            oid_to_hex(&oid),
                                            pack->pack_name,
                                            (uintmax_t)offset);
@@ -1984,7 +2269,7 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git)
                        continue;
 
                if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)
-                       die(_("unable to get disk usage of %s"),
+                       die(_("unable to get disk usage of '%s'"),
                            oid_to_hex(&obj->oid));
 
                total += object_size;
index 3d3ddd77345002f3075047b1f0bf545a4e229157..f0180b5276b15d6567eda9984637333811fa851b 100644 (file)
@@ -23,9 +23,19 @@ struct bitmap_disk_header {
 
 #define NEEDS_BITMAP (1u<<22)
 
+/*
+ * The width in bytes of a single triplet in the lookup table
+ * extension:
+ *     (commit_pos, offset, xor_row)
+ *
+ * whose fields ar 32-, 64-, 32- bits wide, respectively.
+ */
+#define BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH (16)
+
 enum pack_bitmap_opts {
-       BITMAP_OPT_FULL_DAG = 1,
-       BITMAP_OPT_HASH_CACHE = 4,
+       BITMAP_OPT_FULL_DAG = 0x1,
+       BITMAP_OPT_HASH_CACHE = 0x4,
+       BITMAP_OPT_LOOKUP_TABLE = 0x10,
 };
 
 enum pack_bitmap_flags {
@@ -75,7 +85,7 @@ int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_i
 off_t get_disk_usage_from_bitmap(struct bitmap_index *, struct rev_info *);
 
 void bitmap_writer_show_progress(int show);
-void bitmap_writer_set_checksum(unsigned char *sha1);
+void bitmap_writer_set_checksum(const unsigned char *sha1);
 void bitmap_writer_build_type_index(struct packing_data *to_pack,
                                    struct pack_idx_entry **index,
                                    uint32_t index_nr);
index 74f4eae668d5555f99fa4d6af0f486ddc94b5447..4974e75eb4d0e9d002fc70a87e5bb4526e1e49f7 100644 (file)
@@ -22,7 +22,7 @@
  *
  *   - pack position refers to an object's position within a non-existent pack
  *     described by the MIDX. The pack structure is described in
- *     Documentation/technical/pack-format.txt.
+ *     gitformat-pack(5).
  *
  *     It is effectively a concatanation of all packs in the MIDX (ordered by
  *     their numeric ID within the MIDX) in their original order within each
index e5b1d0ed7670a72a664ea72dbd5d37e49f15b46e..c0d7dd93f46dfbc07ea0f17839d4a2d1a9d43622 100644 (file)
@@ -941,20 +941,10 @@ unsigned long repo_approximate_object_count(struct repository *r)
        return r->objects->approximate_object_count;
 }
 
-static void *get_next_packed_git(const void *p)
-{
-       return ((const struct packed_git *)p)->next;
-}
-
-static void set_next_packed_git(void *p, void *next)
-{
-       ((struct packed_git *)p)->next = next;
-}
+DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next);
 
-static int sort_pack(const void *a_, const void *b_)
+static int sort_pack(const struct packed_git *a, const struct packed_git *b)
 {
-       const struct packed_git *a = a_;
-       const struct packed_git *b = b_;
        int st;
 
        /*
@@ -981,9 +971,7 @@ static int sort_pack(const void *a_, const void *b_)
 
 static void rearrange_packed_git(struct repository *r)
 {
-       r->objects->packed_git = llist_mergesort(
-               r->objects->packed_git, get_next_packed_git,
-               set_next_packed_git, sort_pack);
+       sort_packs(&r->objects->packed_git, sort_pack);
 }
 
 static void prepare_packed_git_mru(struct repository *r)
@@ -1404,7 +1392,7 @@ static int delta_base_cache_key_eq(const struct delta_base_cache_key *a,
        return a->p == b->p && a->base_offset == b->base_offset;
 }
 
-static int delta_base_cache_hash_cmp(const void *unused_cmp_data,
+static int delta_base_cache_hash_cmp(const void *cmp_data UNUSED,
                                     const struct hashmap_entry *va,
                                     const struct hashmap_entry *vb,
                                     const void *vkey)
@@ -2229,7 +2217,17 @@ static int add_promisor_object(const struct object_id *oid,
                               void *set_)
 {
        struct oidset *set = set_;
-       struct object *obj = parse_object(the_repository, oid);
+       struct object *obj;
+       int we_parsed_object;
+
+       obj = lookup_object(the_repository, oid);
+       if (obj && obj->parsed) {
+               we_parsed_object = 0;
+       } else {
+               we_parsed_object = 1;
+               obj = parse_object(the_repository, oid);
+       }
+
        if (!obj)
                return 1;
 
@@ -2251,7 +2249,8 @@ static int add_promisor_object(const struct object_id *oid,
                        return 0;
                while (tree_entry_gently(&desc, &entry))
                        oidset_insert(set, &entry.oid);
-               free_tree_buffer(tree);
+               if (we_parsed_object)
+                       free_tree_buffer(tree);
        } else if (obj->type == OBJ_COMMIT) {
                struct commit *commit = (struct commit *) obj;
                struct commit_list *parents = commit->parents;
@@ -2275,7 +2274,8 @@ int is_promisor_object(const struct object_id *oid)
                if (has_promisor_remote()) {
                        for_each_packed_object(add_promisor_object,
                                               &promisor_objects,
-                                              FOR_EACH_OBJECT_PROMISOR_ONLY);
+                                              FOR_EACH_OBJECT_PROMISOR_ONLY |
+                                              FOR_EACH_OBJECT_PACK_ORDER);
                }
                promisor_objects_prepared = 1;
        }
diff --git a/pager.c b/pager.c
index 5cfe23b025c77b08715591c3893017111c2a697d..b66bbff2785cb6ed032edb4f0d9cd68f6a841c36 100644 (file)
--- a/pager.c
+++ b/pager.c
@@ -38,7 +38,8 @@ static void wait_for_pager_signal(int signo)
        raise(signo);
 }
 
-static int core_pager_config(const char *var, const char *value, void *data)
+static int core_pager_config(const char *var, const char *value,
+                            void *data UNUSED)
 {
        if (!strcmp(var, "core.pager"))
                return git_config_string(&pager_program, var, value);
index edf55d3ef5d7428b68e793d6d684c0299710c628..a1ec932f0f9ff32e82223735dfd927473051fd3b 100644 (file)
@@ -324,6 +324,8 @@ static enum parse_opt_result parse_long_opt(
                const char *rest, *long_name = options->long_name;
                enum opt_parsed flags = OPT_LONG, opt_flags = OPT_LONG;
 
+               if (options->type == OPTION_SUBCOMMAND)
+                       continue;
                if (!long_name)
                        continue;
 
@@ -332,7 +334,7 @@ again:
                        rest = NULL;
                if (!rest) {
                        /* abbreviated? */
-                       if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) &&
+                       if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN_OPT) &&
                            !strncmp(long_name, arg, arg_end - arg)) {
 is_abbreviated:
                                if (abbrev_option &&
@@ -419,6 +421,19 @@ static enum parse_opt_result parse_nodash_opt(struct parse_opt_ctx_t *p,
        return PARSE_OPT_ERROR;
 }
 
+static enum parse_opt_result parse_subcommand(const char *arg,
+                                             const struct option *options)
+{
+       for (; options->type != OPTION_END; options++)
+               if (options->type == OPTION_SUBCOMMAND &&
+                   !strcmp(options->long_name, arg)) {
+                       *(parse_opt_subcommand_fn **)options->value = options->subcommand_fn;
+                       return PARSE_OPT_SUBCOMMAND;
+               }
+
+       return PARSE_OPT_UNKNOWN;
+}
+
 static void check_typos(const char *arg, const struct option *options)
 {
        if (strlen(arg) < 3)
@@ -442,6 +457,7 @@ static void check_typos(const char *arg, const struct option *options)
 static void parse_options_check(const struct option *opts)
 {
        char short_opts[128];
+       void *subcommand_value = NULL;
 
        memset(short_opts, '\0', sizeof(short_opts));
        for (; opts->type != OPTION_END; opts++) {
@@ -489,6 +505,14 @@ static void parse_options_check(const struct option *opts)
                               "Are you using parse_options_step() directly?\n"
                               "That case is not supported yet.");
                        break;
+               case OPTION_SUBCOMMAND:
+                       if (!opts->value || !opts->subcommand_fn)
+                               optbug(opts, "OPTION_SUBCOMMAND needs a value and a subcommand function");
+                       if (!subcommand_value)
+                               subcommand_value = opts->value;
+                       else if (subcommand_value != opts->value)
+                               optbug(opts, "all OPTION_SUBCOMMANDs need the same value");
+                       break;
                default:
                        ; /* ok. (usually accepts an argument) */
                }
@@ -499,6 +523,14 @@ static void parse_options_check(const struct option *opts)
        BUG_if_bug("invalid 'struct option'");
 }
 
+static int has_subcommands(const struct option *options)
+{
+       for (; options->type != OPTION_END; options++)
+               if (options->type == OPTION_SUBCOMMAND)
+                       return 1;
+       return 0;
+}
+
 static void parse_options_start_1(struct parse_opt_ctx_t *ctx,
                                  int argc, const char **argv, const char *prefix,
                                  const struct option *options,
@@ -515,7 +547,20 @@ static void parse_options_start_1(struct parse_opt_ctx_t *ctx,
        ctx->prefix = prefix;
        ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0);
        ctx->flags = flags;
-       if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
+       ctx->has_subcommands = has_subcommands(options);
+       if (!ctx->has_subcommands && (flags & PARSE_OPT_SUBCOMMAND_OPTIONAL))
+               BUG("Using PARSE_OPT_SUBCOMMAND_OPTIONAL without subcommands");
+       if (ctx->has_subcommands) {
+               if (flags & PARSE_OPT_STOP_AT_NON_OPTION)
+                       BUG("subcommands are incompatible with PARSE_OPT_STOP_AT_NON_OPTION");
+               if (!(flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)) {
+                       if (flags & PARSE_OPT_KEEP_UNKNOWN_OPT)
+                               BUG("subcommands are incompatible with PARSE_OPT_KEEP_UNKNOWN_OPT unless in combination with PARSE_OPT_SUBCOMMAND_OPTIONAL");
+                       if (flags & PARSE_OPT_KEEP_DASHDASH)
+                               BUG("subcommands are incompatible with PARSE_OPT_KEEP_DASHDASH unless in combination with PARSE_OPT_SUBCOMMAND_OPTIONAL");
+               }
+       }
+       if ((flags & PARSE_OPT_KEEP_UNKNOWN_OPT) &&
            (flags & PARSE_OPT_STOP_AT_NON_OPTION) &&
            !(flags & PARSE_OPT_ONE_SHOT))
                BUG("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
@@ -589,6 +634,7 @@ static int show_gitcomp(const struct option *opts, int show_all)
        int nr_noopts = 0;
 
        for (; opts->type != OPTION_END; opts++) {
+               const char *prefix = "--";
                const char *suffix = "";
 
                if (!opts->long_name)
@@ -598,6 +644,9 @@ static int show_gitcomp(const struct option *opts, int show_all)
                        continue;
 
                switch (opts->type) {
+               case OPTION_SUBCOMMAND:
+                       prefix = "";
+                       break;
                case OPTION_GROUP:
                        continue;
                case OPTION_STRING:
@@ -620,7 +669,8 @@ static int show_gitcomp(const struct option *opts, int show_all)
                        suffix = "=";
                if (starts_with(opts->long_name, "no-"))
                        nr_noopts++;
-               printf(" --%s%s", opts->long_name, suffix);
+               printf("%s%s%s%s", opts == original_opts ? "" : " ",
+                      prefix, opts->long_name, suffix);
        }
        show_negated_gitcomp(original_opts, show_all, -1);
        show_negated_gitcomp(original_opts, show_all, nr_noopts);
@@ -743,10 +793,38 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx,
                if (*arg != '-' || !arg[1]) {
                        if (parse_nodash_opt(ctx, arg, options) == 0)
                                continue;
-                       if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION)
-                               return PARSE_OPT_NON_OPTION;
-                       ctx->out[ctx->cpidx++] = ctx->argv[0];
-                       continue;
+                       if (!ctx->has_subcommands) {
+                               if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION)
+                                       return PARSE_OPT_NON_OPTION;
+                               ctx->out[ctx->cpidx++] = ctx->argv[0];
+                               continue;
+                       }
+                       switch (parse_subcommand(arg, options)) {
+                       case PARSE_OPT_SUBCOMMAND:
+                               return PARSE_OPT_SUBCOMMAND;
+                       case PARSE_OPT_UNKNOWN:
+                               if (ctx->flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)
+                                       /*
+                                        * arg is neither a short or long
+                                        * option nor a subcommand.  Since
+                                        * this command has a default
+                                        * operation mode, we have to treat
+                                        * this arg and all remaining args
+                                        * as args meant to that default
+                                        * operation mode.
+                                        * So we are done parsing.
+                                        */
+                                       return PARSE_OPT_DONE;
+                               error(_("unknown subcommand: `%s'"), arg);
+                               usage_with_options(usagestr, options);
+                       case PARSE_OPT_COMPLETE:
+                       case PARSE_OPT_HELP:
+                       case PARSE_OPT_ERROR:
+                       case PARSE_OPT_DONE:
+                       case PARSE_OPT_NON_OPTION:
+                               /* Impossible. */
+                               BUG("parse_subcommand() cannot return these");
+                       }
                }
 
                /* lone -h asks for help */
@@ -774,6 +852,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx,
                                        goto show_usage;
                                goto unknown;
                        case PARSE_OPT_NON_OPTION:
+                       case PARSE_OPT_SUBCOMMAND:
                        case PARSE_OPT_HELP:
                        case PARSE_OPT_COMPLETE:
                                BUG("parse_short_opt() cannot return these");
@@ -799,6 +878,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx,
                                        *(char *)ctx->argv[0] = '-';
                                        goto unknown;
                                case PARSE_OPT_NON_OPTION:
+                               case PARSE_OPT_SUBCOMMAND:
                                case PARSE_OPT_COMPLETE:
                                case PARSE_OPT_HELP:
                                        BUG("parse_short_opt() cannot return these");
@@ -830,6 +910,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx,
                case PARSE_OPT_HELP:
                        goto show_usage;
                case PARSE_OPT_NON_OPTION:
+               case PARSE_OPT_SUBCOMMAND:
                case PARSE_OPT_COMPLETE:
                        BUG("parse_long_opt() cannot return these");
                case PARSE_OPT_DONE:
@@ -839,7 +920,19 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx,
 unknown:
                if (ctx->flags & PARSE_OPT_ONE_SHOT)
                        break;
-               if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN))
+               if (ctx->has_subcommands &&
+                   (ctx->flags & PARSE_OPT_SUBCOMMAND_OPTIONAL) &&
+                   (ctx->flags & PARSE_OPT_KEEP_UNKNOWN_OPT)) {
+                       /*
+                        * Found an unknown option given to a command with
+                        * subcommands that has a default operation mode:
+                        * we treat this option and all remaining args as
+                        * arguments meant to that default operation mode.
+                        * So we are done parsing.
+                        */
+                       return PARSE_OPT_DONE;
+               }
+               if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN_OPT))
                        return PARSE_OPT_UNKNOWN;
                ctx->out[ctx->cpidx++] = ctx->argv[0];
                ctx->opt = NULL;
@@ -884,7 +977,14 @@ int parse_options(int argc, const char **argv,
        case PARSE_OPT_COMPLETE:
                exit(0);
        case PARSE_OPT_NON_OPTION:
+       case PARSE_OPT_SUBCOMMAND:
+               break;
        case PARSE_OPT_DONE:
+               if (ctx.has_subcommands &&
+                   !(flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)) {
+                       error(_("need a subcommand"));
+                       usage_with_options(usagestr, options);
+               }
                break;
        case PARSE_OPT_UNKNOWN:
                if (ctx.argv[0][1] == '-') {
@@ -1009,6 +1109,8 @@ static enum parse_opt_result usage_with_options_internal(struct parse_opt_ctx_t
                size_t pos;
                int pad;
 
+               if (opts->type == OPTION_SUBCOMMAND)
+                       continue;
                if (opts->type == OPTION_GROUP) {
                        fputc('\n', outfile);
                        need_newline = 0;
index 685fccac137fc23884f0e816d53b7be549506441..b6ef86e0d15e3dc36364977dd3515fa118dee165 100644 (file)
@@ -11,6 +11,7 @@ enum parse_opt_type {
        OPTION_GROUP,
        OPTION_NUMBER,
        OPTION_ALIAS,
+       OPTION_SUBCOMMAND,
        /* options with no arguments */
        OPTION_BIT,
        OPTION_NEGBIT,
@@ -30,10 +31,11 @@ enum parse_opt_flags {
        PARSE_OPT_KEEP_DASHDASH = 1 << 0,
        PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1,
        PARSE_OPT_KEEP_ARGV0 = 1 << 2,
-       PARSE_OPT_KEEP_UNKNOWN = 1 << 3,
+       PARSE_OPT_KEEP_UNKNOWN_OPT = 1 << 3,
        PARSE_OPT_NO_INTERNAL_HELP = 1 << 4,
        PARSE_OPT_ONE_SHOT = 1 << 5,
        PARSE_OPT_SHELL_EVAL = 1 << 6,
+       PARSE_OPT_SUBCOMMAND_OPTIONAL = 1 << 7,
 };
 
 enum parse_opt_option_flags {
@@ -56,6 +58,7 @@ enum parse_opt_result {
        PARSE_OPT_ERROR = -1,   /* must be the same as error() */
        PARSE_OPT_DONE = 0,     /* fixed so that "return 0" works */
        PARSE_OPT_NON_OPTION,
+       PARSE_OPT_SUBCOMMAND,
        PARSE_OPT_UNKNOWN
 };
 
@@ -67,6 +70,9 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
                                              const struct option *opt,
                                              const char *arg, int unset);
 
+typedef int parse_opt_subcommand_fn(int argc, const char **argv,
+                                   const char *prefix);
+
 /*
  * `type`::
  *   holds the type of the option, you must have an OPTION_END last in your
@@ -76,7 +82,8 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  *   the character to use as a short option name, '\0' if none.
  *
  * `long_name`::
- *   the long option name, without the leading dashes, NULL if none.
+ *   the long option (without the leading dashes) or subcommand name,
+ *   NULL if none.
  *
  * `value`::
  *   stores pointers to the values to be filled.
@@ -93,7 +100,7 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  *
  * `help`::
  *   the short help associated to what the option does.
- *   Must never be NULL (except for OPTION_END).
+ *   Must never be NULL (except for OPTION_END and OPTION_SUBCOMMAND).
  *   OPTION_GROUP uses this pointer to store the group header.
  *   Should be wrapped by N_() for translation.
  *
@@ -109,7 +116,8 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  *                             is last on the command line. If the option is
  *                             not last it will require an argument.
  *                             Should not be used with PARSE_OPT_OPTARG.
- *   PARSE_OPT_NODASH: this option doesn't start with a dash.
+ *   PARSE_OPT_NODASH: this option doesn't start with a dash; can only be a
+ *                    short option and can't accept arguments.
  *   PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets
  *                             (i.e. '<argh>') in the help message.
  *                             Useful for options with multiple parameters.
@@ -130,6 +138,9 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  * `ll_callback`::
  *   pointer to the callback to use for OPTION_LOWLEVEL_CALLBACK
  *
+ * `subcommand_fn`::
+ *   pointer to a function to use for OPTION_SUBCOMMAND.
+ *   It will be put in value when the subcommand is given on the command line.
  */
 struct option {
        enum parse_opt_type type;
@@ -144,6 +155,7 @@ struct option {
        intptr_t defval;
        parse_opt_ll_cb *ll_callback;
        intptr_t extra;
+       parse_opt_subcommand_fn *subcommand_fn;
 };
 
 #define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
@@ -205,6 +217,14 @@ struct option {
 #define OPT_ALIAS(s, l, source_long_name) \
        { OPTION_ALIAS, (s), (l), (source_long_name) }
 
+#define OPT_SUBCOMMAND_F(l, v, fn, f) { \
+       .type = OPTION_SUBCOMMAND, \
+       .long_name = (l), \
+       .value = (v), \
+       .flags = (f), \
+       .subcommand_fn = (fn) }
+#define OPT_SUBCOMMAND(l, v, fn)    OPT_SUBCOMMAND_F((l), (v), (fn), 0)
+
 /*
  * parse_options() will filter out the processed options and leave the
  * non-option arguments in argv[]. argv0 is assumed program name and
@@ -294,6 +314,7 @@ struct parse_opt_ctx_t {
        int argc, cpidx, total;
        const char *opt;
        enum parse_opt_flags flags;
+       unsigned has_subcommands;
        const char *prefix;
        const char **alias_groups; /* must be in groups of 3 elements! */
        struct option *updated_options;
index 8bf425555de252f12935b4fae68f375f3a815987..46c6a8f3eab519c88c0732677c9d09703f5882a3 100644 (file)
@@ -38,7 +38,7 @@ int commit_patch_id(struct commit *commit, struct diff_options *options,
 static int patch_id_neq(const void *cmpfn_data,
                        const struct hashmap_entry *eptr,
                        const struct hashmap_entry *entry_or_key,
-                       const void *unused_keydata)
+                       const void *keydata UNUSED)
 {
        /* NEEDSWORK: const correctness? */
        struct diff_options *opt = (void *)cmpfn_data;
index 84ad9c73cfb50a9fd34eed3b5e68fae522c796fe..46e77a85fee9d86e6e16e9f466ecfc3db893fd16 100644 (file)
@@ -759,3 +759,92 @@ int match_pathspec_attrs(struct index_state *istate,
 
        return 1;
 }
+
+int pathspec_needs_expanded_index(struct index_state *istate,
+                                 const struct pathspec *pathspec)
+{
+       unsigned int i, pos;
+       int res = 0;
+       char *skip_worktree_seen = NULL;
+
+       /*
+        * If index is not sparse, no index expansion is needed.
+        */
+       if (!istate->sparse_index)
+               return 0;
+
+       /*
+        * When using a magic pathspec, assume for the sake of simplicity that
+        * the index needs to be expanded to match all matchable files.
+        */
+       if (pathspec->magic)
+               return 1;
+
+       for (i = 0; i < pathspec->nr; i++) {
+               struct pathspec_item item = pathspec->items[i];
+
+               /*
+                * If the pathspec item has a wildcard, the index should be expanded
+                * if the pathspec has the possibility of matching a subset of entries inside
+                * of a sparse directory (but not the entire directory).
+                *
+                * If the pathspec item is a literal path, the index only needs to be expanded
+                * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't
+                * expand for in-cone files) and b) it doesn't match any sparse directories
+                * (since we can reset whole sparse directories without expanding them).
+                */
+               if (item.nowildcard_len < item.len) {
+                       /*
+                        * Special case: if the pattern is a path inside the cone
+                        * followed by only wildcards, the pattern cannot match
+                        * partial sparse directories, so we know we don't need to
+                        * expand the index.
+                        *
+                        * Examples:
+                        * - in-cone/foo***: doesn't need expanded index
+                        * - not-in-cone/bar*: may need expanded index
+                        * - **.c: may need expanded index
+                        */
+                       if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len &&
+                           path_in_cone_mode_sparse_checkout(item.original, istate))
+                               continue;
+
+                       for (pos = 0; pos < istate->cache_nr; pos++) {
+                               struct cache_entry *ce = istate->cache[pos];
+
+                               if (!S_ISSPARSEDIR(ce->ce_mode))
+                                       continue;
+
+                               /*
+                                * If the pre-wildcard length is longer than the sparse
+                                * directory name and the sparse directory is the first
+                                * component of the pathspec, need to expand the index.
+                                */
+                               if (item.nowildcard_len > ce_namelen(ce) &&
+                                   !strncmp(item.original, ce->name, ce_namelen(ce))) {
+                                       res = 1;
+                                       break;
+                               }
+
+                               /*
+                                * If the pre-wildcard length is shorter than the sparse
+                                * directory and the pathspec does not match the whole
+                                * directory, need to expand the index.
+                                */
+                               if (!strncmp(item.original, ce->name, item.nowildcard_len) &&
+                                   wildmatch(item.original, ce->name, 0)) {
+                                       res = 1;
+                                       break;
+                               }
+                       }
+               } else if (!path_in_cone_mode_sparse_checkout(item.original, istate) &&
+                          !matches_skip_worktree(pathspec, i, &skip_worktree_seen))
+                       res = 1;
+
+               if (res > 0)
+                       break;
+       }
+
+       free(skip_worktree_seen);
+       return res;
+}
index 402ebb808081e386da46786fe1d5a1521e3cc4ac..41f6adfbb421fee745e3717ffd57ff92acfc82b1 100644 (file)
@@ -171,4 +171,16 @@ int match_pathspec_attrs(struct index_state *istate,
                         const char *name, int namelen,
                         const struct pathspec_item *item);
 
+/*
+ * Determine whether a pathspec will match only entire index entries (non-sparse
+ * files and/or entire sparse directories). If the pathspec has the potential to
+ * match partial contents of a sparse directory, return 1 to indicate the index
+ * should be expanded to match the  appropriate index entries.
+ *
+ * For the sake of simplicity, always return 1 if using a more complex "magic"
+ * pathspec.
+ */
+int pathspec_needs_expanded_index(struct index_state *istate,
+                                 const struct pathspec *pathspec);
+
 #endif /* PATHSPEC_H */
index 8e43c2def4ca4fa470b7a318ffa8a060b5c39838..ce4e73b6833a48119dfc5f36b64daa8e962dc9c4 100644 (file)
@@ -309,7 +309,8 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out)
        return err;
 }
 
-int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out)
+int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len,
+                                            int fd_out, int *packet_counter)
 {
        int err = 0;
        size_t bytes_written = 0;
@@ -324,6 +325,8 @@ int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_ou
                        break;
                err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
                bytes_written += bytes_to_write;
+               if (packet_counter)
+                       (*packet_counter)++;
        }
        return err;
 }
index 1f623de60a856944b8c1b3e844228e36c8c07162..79c538b99e477660fb8819d7fd0175daf4520cc5 100644 (file)
@@ -32,7 +32,13 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f
 int packet_flush_gently(int fd);
 int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
-int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
+int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len,
+                                            int fd_out, int *packet_counter);
+static inline int write_packetized_from_buf_no_flush(const char *src_in,
+                                                    size_t len, int fd_out)
+{
+       return write_packetized_from_buf_no_flush_count(src_in, len, fd_out, NULL);
+}
 
 /*
  * Stdio versions of packet_write functions. When mixing these with fd
diff --git a/ppc/sha1.c b/ppc/sha1.c
deleted file mode 100644 (file)
index 1b705ce..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * SHA-1 implementation.
- *
- * Copyright (C) 2005 Paul Mackerras <paulus@samba.org>
- *
- * This version assumes we are running on a big-endian machine.
- * It calls an external sha1_core() to process blocks of 64 bytes.
- */
-#include <stdio.h>
-#include <string.h>
-#include "sha1.h"
-
-void ppc_sha1_core(uint32_t *hash, const unsigned char *p,
-                  unsigned int nblocks);
-
-int ppc_SHA1_Init(ppc_SHA_CTX *c)
-{
-       c->hash[0] = 0x67452301;
-       c->hash[1] = 0xEFCDAB89;
-       c->hash[2] = 0x98BADCFE;
-       c->hash[3] = 0x10325476;
-       c->hash[4] = 0xC3D2E1F0;
-       c->len = 0;
-       c->cnt = 0;
-       return 0;
-}
-
-int ppc_SHA1_Update(ppc_SHA_CTX *c, const void *ptr, unsigned long n)
-{
-       unsigned long nb;
-       const unsigned char *p = ptr;
-
-       c->len += (uint64_t) n << 3;
-       while (n != 0) {
-               if (c->cnt || n < 64) {
-                       nb = 64 - c->cnt;
-                       if (nb > n)
-                               nb = n;
-                       memcpy(&c->buf.b[c->cnt], p, nb);
-                       if ((c->cnt += nb) == 64) {
-                               ppc_sha1_core(c->hash, c->buf.b, 1);
-                               c->cnt = 0;
-                       }
-               } else {
-                       nb = n >> 6;
-                       ppc_sha1_core(c->hash, p, nb);
-                       nb <<= 6;
-               }
-               n -= nb;
-               p += nb;
-       }
-       return 0;
-}
-
-int ppc_SHA1_Final(unsigned char *hash, ppc_SHA_CTX *c)
-{
-       unsigned int cnt = c->cnt;
-
-       c->buf.b[cnt++] = 0x80;
-       if (cnt > 56) {
-               if (cnt < 64)
-                       memset(&c->buf.b[cnt], 0, 64 - cnt);
-               ppc_sha1_core(c->hash, c->buf.b, 1);
-               cnt = 0;
-       }
-       if (cnt < 56)
-               memset(&c->buf.b[cnt], 0, 56 - cnt);
-       c->buf.l[7] = c->len;
-       ppc_sha1_core(c->hash, c->buf.b, 1);
-       memcpy(hash, c->hash, 20);
-       return 0;
-}
diff --git a/ppc/sha1.h b/ppc/sha1.h
deleted file mode 100644 (file)
index 9b24b32..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * SHA-1 implementation.
- *
- * Copyright (C) 2005 Paul Mackerras <paulus@samba.org>
- */
-#include <stdint.h>
-
-typedef struct {
-       uint32_t hash[5];
-       uint32_t cnt;
-       uint64_t len;
-       union {
-               unsigned char b[64];
-               uint64_t l[8];
-       } buf;
-} ppc_SHA_CTX;
-
-int ppc_SHA1_Init(ppc_SHA_CTX *c);
-int ppc_SHA1_Update(ppc_SHA_CTX *c, const void *p, unsigned long n);
-int ppc_SHA1_Final(unsigned char *hash, ppc_SHA_CTX *c);
-
-#define platform_SHA_CTX       ppc_SHA_CTX
-#define platform_SHA1_Init     ppc_SHA1_Init
-#define platform_SHA1_Update   ppc_SHA1_Update
-#define platform_SHA1_Final    ppc_SHA1_Final
diff --git a/ppc/sha1ppc.S b/ppc/sha1ppc.S
deleted file mode 100644 (file)
index 1711eef..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * SHA-1 implementation for PowerPC.
- *
- * Copyright (C) 2005 Paul Mackerras <paulus@samba.org>
- */
-
-/*
- * PowerPC calling convention:
- * %r0 - volatile temp
- * %r1 - stack pointer.
- * %r2 - reserved
- * %r3-%r12 - Incoming arguments & return values; volatile.
- * %r13-%r31 - Callee-save registers
- * %lr - Return address, volatile
- * %ctr - volatile
- *
- * Register usage in this routine:
- * %r0 - temp
- * %r3 - argument (pointer to 5 words of SHA state)
- * %r4 - argument (pointer to data to hash)
- * %r5 - Constant K in SHA round (initially number of blocks to hash)
- * %r6-%r10 - Working copies of SHA variables A..E (actually E..A order)
- * %r11-%r26 - Data being hashed W[].
- * %r27-%r31 - Previous copies of A..E, for final add back.
- * %ctr - loop count
- */
-
-
-/*
- * We roll the registers for A, B, C, D, E around on each
- * iteration; E on iteration t is D on iteration t+1, and so on.
- * We use registers 6 - 10 for this.  (Registers 27 - 31 hold
- * the previous values.)
- */
-#define RA(t)  (((t)+4)%5+6)
-#define RB(t)  (((t)+3)%5+6)
-#define RC(t)  (((t)+2)%5+6)
-#define RD(t)  (((t)+1)%5+6)
-#define RE(t)  (((t)+0)%5+6)
-
-/* We use registers 11 - 26 for the W values */
-#define W(t)   ((t)%16+11)
-
-/* Register 5 is used for the constant k */
-
-/*
- * The basic SHA-1 round function is:
- * E += ROTL(A,5) + F(B,C,D) + W[i] + K;  B = ROTL(B,30)
- * Then the variables are renamed: (A,B,C,D,E) = (E,A,B,C,D).
- *
- * Every 20 rounds, the function F() and the constant K changes:
- * - 20 rounds of f0(b,c,d) = "bit wise b ? c : d" =  (^b & d) + (b & c)
- * - 20 rounds of f1(b,c,d) = b^c^d = (b^d)^c
- * - 20 rounds of f2(b,c,d) = majority(b,c,d) = (b&d) + ((b^d)&c)
- * - 20 more rounds of f1(b,c,d)
- *
- * These are all scheduled for near-optimal performance on a G4.
- * The G4 is a 3-issue out-of-order machine with 3 ALUs, but it can only
- * *consider* starting the oldest 3 instructions per cycle.  So to get
- * maximum performance out of it, you have to treat it as an in-order
- * machine.  Which means interleaving the computation round t with the
- * computation of W[t+4].
- *
- * The first 16 rounds use W values loaded directly from memory, while the
- * remaining 64 use values computed from those first 16.  We preload
- * 4 values before starting, so there are three kinds of rounds:
- * - The first 12 (all f0) also load the W values from memory.
- * - The next 64 compute W(i+4) in parallel. 8*f0, 20*f1, 20*f2, 16*f1.
- * - The last 4 (all f1) do not do anything with W.
- *
- * Therefore, we have 6 different round functions:
- * STEPD0_LOAD(t,s) - Perform round t and load W(s).  s < 16
- * STEPD0_UPDATE(t,s) - Perform round t and compute W(s).  s >= 16.
- * STEPD1_UPDATE(t,s)
- * STEPD2_UPDATE(t,s)
- * STEPD1(t) - Perform round t with no load or update.
- *
- * The G5 is more fully out-of-order, and can find the parallelism
- * by itself.  The big limit is that it has a 2-cycle ALU latency, so
- * even though it's 2-way, the code has to be scheduled as if it's
- * 4-way, which can be a limit.  To help it, we try to schedule the
- * read of RA(t) as late as possible so it doesn't stall waiting for
- * the previous round's RE(t-1), and we try to rotate RB(t) as early
- * as possible while reading RC(t) (= RB(t-1)) as late as possible.
- */
-
-/* the initial loads. */
-#define LOADW(s) \
-       lwz     W(s),(s)*4(%r4)
-
-/*
- * Perform a step with F0, and load W(s).  Uses W(s) as a temporary
- * before loading it.
- * This is actually 10 instructions, which is an awkward fit.
- * It can execute grouped as listed, or delayed one instruction.
- * (If delayed two instructions, there is a stall before the start of the
- * second line.)  Thus, two iterations take 7 cycles, 3.5 cycles per round.
- */
-#define STEPD0_LOAD(t,s) \
-add RE(t),RE(t),W(t); andc   %r0,RD(t),RB(t);  and    W(s),RC(t),RB(t); \
-add RE(t),RE(t),%r0;  rotlwi %r0,RA(t),5;      rotlwi RB(t),RB(t),30;   \
-add RE(t),RE(t),W(s); add    %r0,%r0,%r5;      lwz    W(s),(s)*4(%r4);  \
-add RE(t),RE(t),%r0
-
-/*
- * This is likewise awkward, 13 instructions.  However, it can also
- * execute starting with 2 out of 3 possible moduli, so it does 2 rounds
- * in 9 cycles, 4.5 cycles/round.
- */
-#define STEPD0_UPDATE(t,s,loadk...) \
-add RE(t),RE(t),W(t); andc   %r0,RD(t),RB(t); xor    W(s),W((s)-16),W((s)-3); \
-add RE(t),RE(t),%r0;  and    %r0,RC(t),RB(t); xor    W(s),W(s),W((s)-8);      \
-add RE(t),RE(t),%r0;  rotlwi %r0,RA(t),5;     xor    W(s),W(s),W((s)-14);     \
-add RE(t),RE(t),%r5;  loadk; rotlwi RB(t),RB(t),30;  rotlwi W(s),W(s),1;     \
-add RE(t),RE(t),%r0
-
-/* Nicely optimal.  Conveniently, also the most common. */
-#define STEPD1_UPDATE(t,s,loadk...) \
-add RE(t),RE(t),W(t); xor    %r0,RD(t),RB(t); xor    W(s),W((s)-16),W((s)-3); \
-add RE(t),RE(t),%r5;  loadk; xor %r0,%r0,RC(t);  xor W(s),W(s),W((s)-8);      \
-add RE(t),RE(t),%r0;  rotlwi %r0,RA(t),5;     xor    W(s),W(s),W((s)-14);     \
-add RE(t),RE(t),%r0;  rotlwi RB(t),RB(t),30;  rotlwi W(s),W(s),1
-
-/*
- * The naked version, no UPDATE, for the last 4 rounds.  3 cycles per.
- * We could use W(s) as a temp register, but we don't need it.
- */
-#define STEPD1(t) \
-                        add   RE(t),RE(t),W(t); xor    %r0,RD(t),RB(t); \
-rotlwi RB(t),RB(t),30;  add   RE(t),RE(t),%r5;  xor    %r0,%r0,RC(t);   \
-add    RE(t),RE(t),%r0; rotlwi %r0,RA(t),5;     /* spare slot */        \
-add    RE(t),RE(t),%r0
-
-/*
- * 14 instructions, 5 cycles per.  The majority function is a bit
- * awkward to compute.  This can execute with a 1-instruction delay,
- * but it causes a 2-instruction delay, which triggers a stall.
- */
-#define STEPD2_UPDATE(t,s,loadk...) \
-add RE(t),RE(t),W(t); and    %r0,RD(t),RB(t); xor    W(s),W((s)-16),W((s)-3); \
-add RE(t),RE(t),%r0;  xor    %r0,RD(t),RB(t); xor    W(s),W(s),W((s)-8);      \
-add RE(t),RE(t),%r5;  loadk; and %r0,%r0,RC(t);  xor W(s),W(s),W((s)-14);     \
-add RE(t),RE(t),%r0;  rotlwi %r0,RA(t),5;     rotlwi W(s),W(s),1;             \
-add RE(t),RE(t),%r0;  rotlwi RB(t),RB(t),30
-
-#define STEP0_LOAD4(t,s)               \
-       STEPD0_LOAD(t,s);               \
-       STEPD0_LOAD((t+1),(s)+1);       \
-       STEPD0_LOAD((t)+2,(s)+2);       \
-       STEPD0_LOAD((t)+3,(s)+3)
-
-#define STEPUP4(fn, t, s, loadk...)            \
-       STEP##fn##_UPDATE(t,s,);                \
-       STEP##fn##_UPDATE((t)+1,(s)+1,);        \
-       STEP##fn##_UPDATE((t)+2,(s)+2,);        \
-       STEP##fn##_UPDATE((t)+3,(s)+3,loadk)
-
-#define STEPUP20(fn, t, s, loadk...)   \
-       STEPUP4(fn, t, s,);             \
-       STEPUP4(fn, (t)+4, (s)+4,);     \
-       STEPUP4(fn, (t)+8, (s)+8,);     \
-       STEPUP4(fn, (t)+12, (s)+12,);   \
-       STEPUP4(fn, (t)+16, (s)+16, loadk)
-
-       .globl  ppc_sha1_core
-ppc_sha1_core:
-       stwu    %r1,-80(%r1)
-       stmw    %r13,4(%r1)
-
-       /* Load up A - E */
-       lmw     %r27,0(%r3)
-
-       mtctr   %r5
-
-1:
-       LOADW(0)
-       lis     %r5,0x5a82
-       mr      RE(0),%r31
-       LOADW(1)
-       mr      RD(0),%r30
-       mr      RC(0),%r29
-       LOADW(2)
-       ori     %r5,%r5,0x7999  /* K0-19 */
-       mr      RB(0),%r28
-       LOADW(3)
-       mr      RA(0),%r27
-
-       STEP0_LOAD4(0, 4)
-       STEP0_LOAD4(4, 8)
-       STEP0_LOAD4(8, 12)
-       STEPUP4(D0, 12, 16,)
-       STEPUP4(D0, 16, 20, lis %r5,0x6ed9)
-
-       ori     %r5,%r5,0xeba1  /* K20-39 */
-       STEPUP20(D1, 20, 24, lis %r5,0x8f1b)
-
-       ori     %r5,%r5,0xbcdc  /* K40-59 */
-       STEPUP20(D2, 40, 44, lis %r5,0xca62)
-
-       ori     %r5,%r5,0xc1d6  /* K60-79 */
-       STEPUP4(D1, 60, 64,)
-       STEPUP4(D1, 64, 68,)
-       STEPUP4(D1, 68, 72,)
-       STEPUP4(D1, 72, 76,)
-       addi    %r4,%r4,64
-       STEPD1(76)
-       STEPD1(77)
-       STEPD1(78)
-       STEPD1(79)
-
-       /* Add results to original values */
-       add     %r31,%r31,RE(0)
-       add     %r30,%r30,RD(0)
-       add     %r29,%r29,RC(0)
-       add     %r28,%r28,RB(0)
-       add     %r27,%r27,RA(0)
-
-       bdnz    1b
-
-       /* Save final hash, restore registers, and return */
-       stmw    %r27,0(%r3)
-       lmw     %r13,4(%r1)
-       addi    %r1,%r1,80
-       blr
index e5529a586366d4031a5300d7ab0dfa6ec97489b8..100f7a374dca1b8ffaae1ae9a2c00011d05a208a 100644 (file)
@@ -151,6 +151,12 @@ void preload_index(struct index_state *index,
        }
        stop_progress(&pd.progress);
 
+       if (pathspec) {
+               /* earlier we made deep copies for each thread to work with */
+               for (i = 0; i < threads; i++)
+                       clear_pathspec(&data[i].pathspec);
+       }
+
        trace_performance_leave("preload index");
 
        trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat);
index ee6114e3f0aa1dcf8737794bbe8dc8f6e23f5f7b..6cb363ae1c9e90e47e2002fb7e89a1f03a12f23c 100644 (file)
--- a/pretty.c
+++ b/pretty.c
@@ -43,7 +43,8 @@ static void save_user_format(struct rev_info *rev, const char *cp, int is_tforma
        rev->commit_format = CMIT_FMT_USERFORMAT;
 }
 
-static int git_pretty_formats_config(const char *var, const char *value, void *cb)
+static int git_pretty_formats_config(const char *var, const char *value,
+                                    void *cb UNUSED)
 {
        struct cmt_fmt_map *commit_format = NULL;
        const char *name;
@@ -477,6 +478,16 @@ end:
        }
 }
 
+static int use_in_body_from(const struct pretty_print_context *pp,
+                           const struct ident_split *ident)
+{
+       if (pp->rev && pp->rev->force_in_body_from)
+               return 1;
+       if (ident_cmp(pp->from_ident, ident))
+               return 1;
+       return 0;
+}
+
 void pp_user_info(struct pretty_print_context *pp,
                  const char *what, struct strbuf *sb,
                  const char *line, const char *encoding)
@@ -503,7 +514,7 @@ void pp_user_info(struct pretty_print_context *pp,
                map_user(pp->mailmap, &mailbuf, &maillen, &namebuf, &namelen);
 
        if (cmit_fmt_is_mail(pp->fmt)) {
-               if (pp->from_ident && ident_cmp(pp->from_ident, &ident)) {
+               if (pp->from_ident && use_in_body_from(pp, &ident)) {
                        struct strbuf buf = STRBUF_INIT;
 
                        strbuf_addstr(&buf, "From: ");
@@ -1575,23 +1586,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                                strbuf_addstr(sb, c->signature_check.primary_key_fingerprint);
                        break;
                case 'T':
-                       switch (c->signature_check.trust_level) {
-                       case TRUST_UNDEFINED:
-                               strbuf_addstr(sb, "undefined");
-                               break;
-                       case TRUST_NEVER:
-                               strbuf_addstr(sb, "never");
-                               break;
-                       case TRUST_MARGINAL:
-                               strbuf_addstr(sb, "marginal");
-                               break;
-                       case TRUST_FULLY:
-                               strbuf_addstr(sb, "fully");
-                               break;
-                       case TRUST_ULTIMATE:
-                               strbuf_addstr(sb, "ultimate");
-                               break;
-                       }
+                       strbuf_addstr(sb, gpg_trust_level_to_str(c->signature_check.trust_level));
                        break;
                default:
                        return 0;
index 5b33f88bca17a29921d459663115ec9bc683c8a1..68f46f5ec70b93b28fa7c75988d671a4046e8fee 100644 (file)
@@ -146,7 +146,7 @@ static void promisor_remote_init(struct repository *r)
        if (r->promisor_remote_config)
                return;
        config = r->promisor_remote_config =
-               xcalloc(sizeof(*r->promisor_remote_config), 1);
+               xcalloc(1, sizeof(*r->promisor_remote_config));
        config->promisors_tail = &config->promisors;
 
        repo_config(r, promisor_remote_config, config);
index f63b3ffc200a8b71df3d7287322321ac70be2595..8b7d81adc1be09d91a1551c370c08058766f76f6 100644 (file)
@@ -57,9 +57,9 @@ static int read_patches(const char *range, struct string_list *list,
                     "--pretty=medium",
                     "--notes",
                     NULL);
+       strvec_push(&cp.args, range);
        if (other_arg)
                strvec_pushv(&cp.args, other_arg->v);
-       strvec_push(&cp.args, range);
        cp.out = -1;
        cp.no_stdin = 1;
        cp.git_cmd = 1;
@@ -224,8 +224,10 @@ cleanup:
        return ret;
 }
 
-static int patch_util_cmp(const void *dummy, const struct patch_util *a,
-                         const struct patch_util *b, const char *keydata)
+static int patch_util_cmp(const void *cmp_data UNUSED,
+                         const struct patch_util *a,
+                         const struct patch_util *b,
+                         const char *keydata)
 {
        return strcmp(a->diff, keydata ? keydata : b->diff);
 }
index 87649d0c016a371f83710a04140909d54a553aa7..7407c593191679c757573e567a28f35c65c9cb33 100644 (file)
@@ -54,9 +54,12 @@ void append_todo_help(int command_count,
 "l, label <label> = label current HEAD with a name\n"
 "t, reset <label> = reset HEAD to a label\n"
 "m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n"
-".       create a merge commit using the original merge commit's\n"
-".       message (or the oneline, if no original merge commit was\n"
-".       specified); use -c <commit> to reword the commit message\n"
+"        create a merge commit using the original merge commit's\n"
+"        message (or the oneline, if no original merge commit was\n"
+"        specified); use -c <commit> to reword the commit message\n"
+"u, update-ref <ref> = track a placeholder for the <ref> to be updated\n"
+"                      to this position in the new commits. The <ref> is\n"
+"                      updated at the end of the rebase\n"
 "\n"
 "These lines can be re-ordered; they are executed from top to bottom.\n");
        unsigned edit_todo = !(shortrevisions && shortonto);
@@ -143,6 +146,12 @@ int edit_todo_list(struct repository *r, struct todo_list *todo_list,
                return -4;
        }
 
+       /*
+        * See if branches need to be added or removed from the update-refs
+        * file based on the new todo list.
+        */
+       todo_list_filter_update_refs(r, new_todo);
+
        return 0;
 }
 
index d3c90e5dbe2fdb4cdbad6b93290e27b46e205fbb..fd1cb14b0f131de522cd60c71127c9369fe84648 100644 (file)
@@ -89,7 +89,7 @@ struct ref_to_worktree_entry {
        struct worktree *wt; /* key is wt->head_ref */
 };
 
-static int ref_to_worktree_map_cmpfnc(const void *unused_lookupdata,
+static int ref_to_worktree_map_cmpfnc(const void *lookupdata UNUSED,
                                      const struct hashmap_entry *eptr,
                                      const struct hashmap_entry *kptr,
                                      const void *keydata_aka_refname)
@@ -2405,6 +2405,7 @@ static void reach_filter(struct ref_array *array,
 int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int type)
 {
        struct ref_filter_cbdata ref_cbdata;
+       int save_commit_buffer_orig;
        int ret = 0;
 
        ref_cbdata.array = array;
@@ -2412,6 +2413,9 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int
 
        filter->kind = type & FILTER_REFS_KIND_MASK;
 
+       save_commit_buffer_orig = save_commit_buffer;
+       save_commit_buffer = 0;
+
        init_contains_cache(&ref_cbdata.contains_cache);
        init_contains_cache(&ref_cbdata.no_contains_cache);
 
@@ -2444,6 +2448,7 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int
        reach_filter(array, filter->reachable_from, INCLUDE_REACHED);
        reach_filter(array, filter->unreachable_from, EXCLUDE_REACHED);
 
+       save_commit_buffer = save_commit_buffer_orig;
        return ret;
 }
 
index 135a1a6e41cf51afaf2ffc730a225409147f81b0..d258fd31995fbb4f8a198ba0bd275564182fe43c 100644 (file)
--- a/reflog.c
+++ b/reflog.c
@@ -240,8 +240,9 @@ static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit
  * Return true iff the specified reflog entry should be expired.
  */
 int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
-                            const char *email, timestamp_t timestamp, int tz,
-                            const char *message, void *cb_data)
+                            const char *email UNUSED,
+                            timestamp_t timestamp, int tz UNUSED,
+                            const char *message UNUSED, void *cb_data)
 {
        struct expire_reflog_policy_cb *cb = cb_data;
        struct commit *old_commit, *new_commit;
@@ -294,7 +295,8 @@ int should_expire_reflog_ent_verbose(struct object_id *ooid,
        return expire;
 }
 
-static int push_tip_to_list(const char *refname, const struct object_id *oid,
+static int push_tip_to_list(const char *refname UNUSED,
+                           const struct object_id *oid,
                            int flags, void *cb_data)
 {
        struct commit_list **list = cb_data;
@@ -378,9 +380,11 @@ void reflog_expiry_cleanup(void *cb_data)
        }
 }
 
-int count_reflog_ent(struct object_id *ooid, struct object_id *noid,
-                    const char *email, timestamp_t timestamp, int tz,
-                    const char *message, void *cb_data)
+int count_reflog_ent(struct object_id *ooid UNUSED,
+                    struct object_id *noid UNUSED,
+                    const char *email UNUSED,
+                    timestamp_t timestamp, int tz UNUSED,
+                    const char *message UNUSED, void *cb_data)
 {
        struct cmd_reflog_expire_cb *cb = cb_data;
        if (!cb->expire_total || timestamp < cb->expire_total)
diff --git a/refs.c b/refs.c
index 90bcb2716873592864e2496951f913618521cb45..c89d558892569b6326d092eb61a7bba28ef4b3d4 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -20,6 +20,7 @@
 #include "repository.h"
 #include "sigchain.h"
 #include "date.h"
+#include "commit.h"
 
 /*
  * List of all available backends
@@ -56,6 +57,88 @@ static unsigned char refname_disposition[256] = {
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 4, 4
 };
 
+struct ref_namespace_info ref_namespace[] = {
+       [NAMESPACE_HEAD] = {
+               .ref = "HEAD",
+               .decoration = DECORATION_REF_HEAD,
+               .exact = 1,
+       },
+       [NAMESPACE_BRANCHES] = {
+               .ref = "refs/heads/",
+               .decoration = DECORATION_REF_LOCAL,
+       },
+       [NAMESPACE_TAGS] = {
+               .ref = "refs/tags/",
+               .decoration = DECORATION_REF_TAG,
+       },
+       [NAMESPACE_REMOTE_REFS] = {
+               /*
+                * The default refspec for new remotes copies refs from
+                * refs/heads/ on the remote into refs/remotes/<remote>/.
+                * As such, "refs/remotes/" has special handling.
+                */
+               .ref = "refs/remotes/",
+               .decoration = DECORATION_REF_REMOTE,
+       },
+       [NAMESPACE_STASH] = {
+               /*
+                * The single ref "refs/stash" stores the latest stash.
+                * Older stashes can be found in the reflog.
+                */
+               .ref = "refs/stash",
+               .exact = 1,
+               .decoration = DECORATION_REF_STASH,
+       },
+       [NAMESPACE_REPLACE] = {
+               /*
+                * This namespace allows Git to act as if one object ID
+                * points to the content of another. Unlike the other
+                * ref namespaces, this one can be changed by the
+                * GIT_REPLACE_REF_BASE environment variable. This
+                * .namespace value will be overwritten in setup_git_env().
+                */
+               .ref = "refs/replace/",
+               .decoration = DECORATION_GRAFTED,
+       },
+       [NAMESPACE_NOTES] = {
+               /*
+                * The refs/notes/commit ref points to the tip of a
+                * parallel commit history that adds metadata to commits
+                * in the normal history. This ref can be overwritten
+                * by the core.notesRef config variable or the
+                * GIT_NOTES_REFS environment variable.
+                */
+               .ref = "refs/notes/commit",
+               .exact = 1,
+       },
+       [NAMESPACE_PREFETCH] = {
+               /*
+                * Prefetch refs are written by the background 'fetch'
+                * maintenance task. It allows faster foreground fetches
+                * by advertising these previously-downloaded tips without
+                * updating refs/remotes/ without user intervention.
+                */
+               .ref = "refs/prefetch/",
+       },
+       [NAMESPACE_REWRITTEN] = {
+               /*
+                * Rewritten refs are used by the 'label' command in the
+                * sequencer. These are particularly useful during an
+                * interactive rebase that uses the 'merge' command.
+                */
+               .ref = "refs/rewritten/",
+       },
+};
+
+void update_ref_namespace(enum ref_namespace namespace, char *ref)
+{
+       struct ref_namespace_info *info = &ref_namespace[namespace];
+       if (info->ref_updated)
+               free(info->ref);
+       info->ref = ref;
+       info->ref_updated = 1;
+}
+
 /*
  * Try to read one refname component from the front of refname.
  * Return the length of the component found, or -1 if the component is
@@ -358,7 +441,8 @@ struct warn_if_dangling_data {
        const char *msg_fmt;
 };
 
-static int warn_if_dangling_symref(const char *refname, const struct object_id *oid,
+static int warn_if_dangling_symref(const char *refname,
+                                  const struct object_id *oid UNUSED,
                                   int flags, void *cb_data)
 {
        struct warn_if_dangling_data *d = cb_data;
@@ -455,11 +539,16 @@ void normalize_glob_ref(struct string_list_item *item, const char *prefix,
        if (*pattern == '/')
                BUG("pattern must not start with '/'");
 
-       if (prefix) {
+       if (prefix)
                strbuf_addstr(&normalized_pattern, prefix);
-       }
-       else if (!starts_with(pattern, "refs/"))
+       else if (!starts_with(pattern, "refs/") &&
+                  strcmp(pattern, "HEAD"))
                strbuf_addstr(&normalized_pattern, "refs/");
+       /*
+        * NEEDSWORK: Special case other symrefs such as REBASE_HEAD,
+        * MERGE_HEAD, etc.
+        */
+
        strbuf_addstr(&normalized_pattern, pattern);
        strbuf_strip_suffix(&normalized_pattern, "/");
 
@@ -893,8 +982,9 @@ static void set_read_ref_cutoffs(struct read_ref_at_cb *cb,
 }
 
 static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid,
-               const char *email, timestamp_t timestamp, int tz,
-               const char *message, void *cb_data)
+                          const char *email UNUSED,
+                          timestamp_t timestamp, int tz,
+                          const char *message, void *cb_data)
 {
        struct read_ref_at_cb *cb = cb_data;
        int reached_count;
@@ -934,9 +1024,11 @@ static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid,
        return cb->found_it;
 }
 
-static int read_ref_at_ent_newest(struct object_id *ooid, struct object_id *noid,
-                                 const char *email, timestamp_t timestamp,
-                                 int tz, const char *message, void *cb_data)
+static int read_ref_at_ent_newest(struct object_id *ooid UNUSED,
+                                 struct object_id *noid,
+                                 const char *email UNUSED,
+                                 timestamp_t timestamp, int tz,
+                                 const char *message, void *cb_data)
 {
        struct read_ref_at_cb *cb = cb_data;
 
@@ -947,8 +1039,9 @@ static int read_ref_at_ent_newest(struct object_id *ooid, struct object_id *noid
 }
 
 static int read_ref_at_ent_oldest(struct object_id *ooid, struct object_id *noid,
-                                 const char *email, timestamp_t timestamp,
-                                 int tz, const char *message, void *cb_data)
+                                 const char *email UNUSED,
+                                 timestamp_t timestamp, int tz,
+                                 const char *message, void *cb_data)
 {
        struct read_ref_at_cb *cb = cb_data;
 
@@ -1524,6 +1617,7 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
 
 int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data)
 {
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
        return do_for_each_repo_ref(r, git_replace_ref_base, fn,
                                    strlen(git_replace_ref_base),
                                    DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
@@ -1810,7 +1904,7 @@ struct ref_store_hash_entry
        char name[FLEX_ARRAY];
 };
 
-static int ref_store_hash_cmp(const void *unused_cmp_data,
+static int ref_store_hash_cmp(const void *cmp_data UNUSED,
                              const struct hashmap_entry *eptr,
                              const struct hashmap_entry *entry_or_key,
                              const void *keydata)
diff --git a/refs.h b/refs.h
index 47cb9edbaa8913c3af721d347744beefacb1e754..d6575b8c2bdf0d3e9021f7000b42308ff33203af 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -2,6 +2,7 @@
 #define REFS_H
 
 #include "cache.h"
+#include "commit.h"
 
 struct object_id;
 struct ref_store;
@@ -930,4 +931,49 @@ struct ref_store *get_main_ref_store(struct repository *r);
 struct ref_store *get_submodule_ref_store(const char *submodule);
 struct ref_store *get_worktree_ref_store(const struct worktree *wt);
 
+/*
+ * Some of the names specified by refs have special meaning to Git.
+ * Organize these namespaces in a comon 'ref_namespace' array for
+ * reference from multiple places in the codebase.
+ */
+
+struct ref_namespace_info {
+       char *ref;
+       enum decoration_type decoration;
+
+       /*
+        * If 'exact' is true, then we must match the 'ref' exactly.
+        * Otherwise, use a prefix match.
+        *
+        * 'ref_updated' is for internal use. It represents whether the
+        * 'ref' value was replaced from its original literal version.
+        */
+       unsigned exact:1,
+                ref_updated:1;
+};
+
+enum ref_namespace {
+       NAMESPACE_HEAD,
+       NAMESPACE_BRANCHES,
+       NAMESPACE_TAGS,
+       NAMESPACE_REMOTE_REFS,
+       NAMESPACE_STASH,
+       NAMESPACE_REPLACE,
+       NAMESPACE_NOTES,
+       NAMESPACE_PREFETCH,
+       NAMESPACE_REWRITTEN,
+
+       /* Must be last */
+       NAMESPACE__COUNT
+};
+
+/* See refs.c for the contents of this array. */
+extern struct ref_namespace_info ref_namespace[NAMESPACE__COUNT];
+
+/*
+ * Some ref namespaces can be modified by config values or environment
+ * variables. Modify a namespace as specified by its ref_namespace key.
+ */
+void update_ref_namespace(enum ref_namespace namespace, char *ref);
+
 #endif /* REFS_H */
index 8db7882aacb533ebf6635a0457c8b2ac7855c392..e4009b3c421f5bddf9b3480a9fc881cee0d2307b 100644 (file)
@@ -2202,8 +2202,8 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
        return ok;
 }
 
-static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
-                                     struct object_id *peeled)
+static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
+                                     struct object_id *peeled UNUSED)
 {
        BUG("ref_iterator_peel() called for reflog_iterator");
 }
@@ -2257,7 +2257,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
 static enum iterator_selection reflog_iterator_select(
        struct ref_iterator *iter_worktree,
        struct ref_iterator *iter_common,
-       void *cb_data)
+       void *cb_data UNUSED)
 {
        if (iter_worktree) {
                /*
@@ -2985,7 +2985,7 @@ cleanup:
 
 static int files_transaction_abort(struct ref_store *ref_store,
                                   struct ref_transaction *transaction,
-                                  struct strbuf *err)
+                                  struct strbuf *err UNUSED)
 {
        struct files_ref_store *refs =
                files_downcast(ref_store, 0, "ref_transaction_abort");
@@ -2995,7 +2995,9 @@ static int files_transaction_abort(struct ref_store *ref_store,
 }
 
 static int ref_present(const char *refname,
-                      const struct object_id *oid, int flags, void *cb_data)
+                      const struct object_id *oid UNUSED,
+                      int flags UNUSED,
+                      void *cb_data)
 {
        struct string_list *affected_refnames = cb_data;
 
@@ -3259,7 +3261,7 @@ static int files_reflog_expire(struct ref_store *ref_store,
        return -1;
 }
 
-static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
+static int files_init_db(struct ref_store *ref_store, struct strbuf *err UNUSED)
 {
        struct files_ref_store *refs =
                files_downcast(ref_store, REF_STORE_WRITE, "init_db");
index b2e56bae1c6408bccf66729b8b09f76907b984d9..c9fd0bcaf90c753031a1624f593c607a4549bf5a 100644 (file)
@@ -51,8 +51,8 @@ static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator)
        return ref_iterator_abort(ref_iterator);
 }
 
-static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator,
-                                  struct object_id *peeled)
+static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
+                                  struct object_id *peeled UNUSED)
 {
        BUG("peel called for empty iterator");
 }
@@ -238,7 +238,7 @@ struct ref_iterator *merge_ref_iterator_begin(
  */
 static enum iterator_selection overlay_iterator_select(
                struct ref_iterator *front, struct ref_iterator *back,
-               void *cb_data)
+               void *cb_data UNUSED)
 {
        int cmp;
 
index 97b68377673067d3a28d60e01f2b4532433750f2..43cdb97f8b37756ee2f62fcf2dc285ec3116f5b4 100644 (file)
@@ -726,7 +726,7 @@ static struct snapshot *get_snapshot(struct packed_ref_store *refs)
 }
 
 static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
-                              struct object_id *oid, struct strbuf *referent,
+                              struct object_id *oid, struct strbuf *referent UNUSED,
                               unsigned int *type, int *failure_errno)
 {
        struct packed_ref_store *refs =
@@ -1078,7 +1078,8 @@ int packed_refs_is_locked(struct ref_store *ref_store)
 static const char PACKED_REFS_HEADER[] =
        "# pack-refs with: peeled fully-peeled sorted \n";
 
-static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
+static int packed_init_db(struct ref_store *ref_store UNUSED,
+                         struct strbuf *err UNUSED)
 {
        /* Nothing to do. */
        return 0;
@@ -1473,7 +1474,7 @@ failure:
 
 static int packed_transaction_abort(struct ref_store *ref_store,
                                    struct ref_transaction *transaction,
-                                   struct strbuf *err)
+                                   struct strbuf *err UNUSED)
 {
        struct packed_ref_store *refs = packed_downcast(
                        ref_store,
@@ -1512,7 +1513,7 @@ cleanup:
        return ret;
 }
 
-static int packed_initial_transaction_commit(struct ref_store *ref_store,
+static int packed_initial_transaction_commit(struct ref_store *ref_store UNUSED,
                                            struct ref_transaction *transaction,
                                            struct strbuf *err)
 {
@@ -1568,7 +1569,8 @@ static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
        return ret;
 }
 
-static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
+static int packed_pack_refs(struct ref_store *ref_store UNUSED,
+                           unsigned int flags UNUSED)
 {
        /*
         * Packed refs are already packed. It might be that loose refs
@@ -1578,7 +1580,7 @@ static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
        return 0;
 }
 
-static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
+static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store UNUSED)
 {
        return empty_ref_iterator_begin();
 }
index 8b79891d3218d526ef1216bc1525b6040730185a..8c0c44699335e65af07ee2ea3ebd4fb266db32a5 100644 (file)
--- a/refspec.h
+++ b/refspec.h
@@ -69,7 +69,7 @@ int valid_remote_name(const char *name);
 struct strvec;
 /*
  * Determine what <prefix> values to pass to the peer in ref-prefix lines
- * (see Documentation/technical/protocol-v2.txt).
+ * (see linkgit:gitprotocol-v2[5]).
  */
 void refspec_ref_prefixes(const struct refspec *rs,
                          struct strvec *ref_prefixes);
index 54b4025105cfd6dc53821d366a1dd20bc8d11004..b4db23ce1880794a937b841e28ffa27e8869d2c7 100644 (file)
@@ -443,7 +443,7 @@ static int reader_start(struct reftable_reader *r, struct table_iter *ti,
        return reader_table_iter_at(r, ti, off, typ);
 }
 
-static int reader_seek_linear(struct reftable_reader *r, struct table_iter *ti,
+static int reader_seek_linear(struct table_iter *ti,
                              struct reftable_record *want)
 {
        struct reftable_record rec =
@@ -510,7 +510,7 @@ static int reader_seek_indexed(struct reftable_reader *r,
        if (err < 0)
                goto done;
 
-       err = reader_seek_linear(r, &index_iter, &want_index);
+       err = reader_seek_linear(&index_iter, &want_index);
        while (1) {
                err = table_iter_next(&index_iter, &index_result);
                table_iter_block_done(&index_iter);
@@ -570,7 +570,7 @@ static int reader_seek_internal(struct reftable_reader *r,
        err = reader_start(r, &ti, reftable_record_type(rec), 0);
        if (err < 0)
                return err;
-       err = reader_seek_linear(r, &ti, rec);
+       err = reader_seek_linear(&ti, rec);
        if (err < 0)
                return err;
        else {
index 67f178b1120bd9167cef21382141ab96b8ff3514..72dfb8fb86aab0e93780416049cf8f8bfae157bc 100644 (file)
@@ -580,6 +580,7 @@ struct rpc_state {
        char *service_url;
        char *hdr_content_type;
        char *hdr_accept;
+       char *hdr_accept_language;
        char *protocol_header;
        char *buf;
        size_t alloc;
@@ -607,6 +608,8 @@ struct rpc_state {
        unsigned flush_read_but_not_sent : 1;
 };
 
+#define RPC_STATE_INIT { 0 }
+
 /*
  * Appends the result of reading from rpc->out to the string represented by
  * rpc->buf and rpc->len if there is enough space. Returns 1 if there was
@@ -932,6 +935,10 @@ static int post_rpc(struct rpc_state *rpc, int stateless_connect, int flush_rece
        headers = curl_slist_append(headers, needs_100_continue ?
                "Expect: 100-continue" : "Expect:");
 
+       /* Add Accept-Language header */
+       if (rpc->hdr_accept_language)
+               headers = curl_slist_append(headers, rpc->hdr_accept_language);
+
        /* Add the extra Git-Protocol header */
        if (rpc->protocol_header)
                headers = curl_slist_append(headers, rpc->protocol_header);
@@ -1080,6 +1087,8 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads,
        strbuf_addf(&buf, "%s%s", url.buf, svc);
        rpc->service_url = strbuf_detach(&buf, NULL);
 
+       rpc->hdr_accept_language = xstrdup_or_null(http_get_accept_language_header());
+
        strbuf_addf(&buf, "Content-Type: application/x-%s-request", svc);
        rpc->hdr_content_type = strbuf_detach(&buf, NULL);
 
@@ -1118,6 +1127,7 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads,
        free(rpc->service_url);
        free(rpc->hdr_content_type);
        free(rpc->hdr_accept);
+       free(rpc->hdr_accept_language);
        free(rpc->protocol_header);
        free(rpc->buf);
        strbuf_release(&buf);
@@ -1153,7 +1163,7 @@ static int fetch_dumb(int nr_heads, struct ref **to_fetch)
 static int fetch_git(struct discovery *heads,
        int nr_heads, struct ref **to_fetch)
 {
-       struct rpc_state rpc;
+       struct rpc_state rpc = RPC_STATE_INIT;
        struct strbuf preamble = STRBUF_INIT;
        int i, err;
        struct strvec args = STRVEC_INIT;
@@ -1276,6 +1286,29 @@ static void parse_fetch(struct strbuf *buf)
        strbuf_reset(buf);
 }
 
+static void parse_get(const char *arg)
+{
+       struct strbuf url = STRBUF_INIT;
+       struct strbuf path = STRBUF_INIT;
+       const char *space;
+
+       space = strchr(arg, ' ');
+
+       if (!space)
+               die(_("protocol error: expected '<url> <path>', missing space"));
+
+       strbuf_add(&url, arg, space - arg);
+       strbuf_addstr(&path, space + 1);
+
+       if (http_get_file(url.buf, path.buf, NULL))
+               die(_("failed to download file at URL '%s'"), url.buf);
+
+       strbuf_release(&url);
+       strbuf_release(&path);
+       printf("\n");
+       fflush(stdout);
+}
+
 static int push_dav(int nr_spec, const char **specs)
 {
        struct child_process child = CHILD_PROCESS_INIT;
@@ -1299,7 +1332,7 @@ static int push_dav(int nr_spec, const char **specs)
 
 static int push_git(struct discovery *heads, int nr_spec, const char **specs)
 {
-       struct rpc_state rpc;
+       struct rpc_state rpc = RPC_STATE_INIT;
        int i, err;
        struct strvec args;
        struct string_list_item *cas_option;
@@ -1398,8 +1431,9 @@ free_specs:
 static int stateless_connect(const char *service_name)
 {
        struct discovery *discover;
-       struct rpc_state rpc;
+       struct rpc_state rpc = RPC_STATE_INIT;
        struct strbuf buf = STRBUF_INIT;
+       const char *accept_language;
 
        /*
         * Run the info/refs request and see if the server supports protocol
@@ -1418,6 +1452,9 @@ static int stateless_connect(const char *service_name)
                printf("\n");
                fflush(stdout);
        }
+       accept_language = http_get_accept_language_header();
+       if (accept_language)
+               rpc.hdr_accept_language = xstrfmt("%s", accept_language);
 
        rpc.service_name = service_name;
        rpc.service_url = xstrfmt("%s%s", url.buf, rpc.service_name);
@@ -1467,6 +1504,7 @@ static int stateless_connect(const char *service_name)
        free(rpc.service_url);
        free(rpc.hdr_content_type);
        free(rpc.hdr_accept);
+       free(rpc.hdr_accept_language);
        free(rpc.protocol_header);
        free(rpc.buf);
        strbuf_release(&buf);
@@ -1549,9 +1587,14 @@ int cmd_main(int argc, const char **argv)
                                printf("unsupported\n");
                        fflush(stdout);
 
+               } else if (skip_prefix(buf.buf, "get ", &arg)) {
+                       parse_get(arg);
+                       fflush(stdout);
+
                } else if (!strcmp(buf.buf, "capabilities")) {
                        printf("stateless-connect\n");
                        printf("fetch\n");
+                       printf("get\n");
                        printf("option\n");
                        printf("push\n");
                        printf("check-connectivity\n");
index b19e3a2f015a7215df4227ae85368e11d8b1e962..60869beebe7364a594cd45938d4ed97dcdd28840 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -11,7 +11,6 @@
 #include "dir.h"
 #include "tag.h"
 #include "string-list.h"
-#include "mergesort.h"
 #include "strvec.h"
 #include "commit-reach.h"
 #include "advice.h"
@@ -87,7 +86,7 @@ struct remotes_hash_key {
        int len;
 };
 
-static int remotes_hash_cmp(const void *unused_cmp_data,
+static int remotes_hash_cmp(const void *cmp_data UNUSED,
                            const struct hashmap_entry *eptr,
                            const struct hashmap_entry *entry_or_key,
                            const void *keydata)
@@ -171,7 +170,7 @@ struct branches_hash_key {
        int len;
 };
 
-static int branches_hash_cmp(const void *unused_cmp_data,
+static int branches_hash_cmp(const void *cmp_data UNUSED,
                             const struct hashmap_entry *eptr,
                             const struct hashmap_entry *entry_or_key,
                             const void *keydata)
@@ -850,7 +849,7 @@ static int refspec_match(const struct refspec_item *refspec,
        return !strcmp(refspec->src, name);
 }
 
-static int omit_name_by_refspec(const char *name, struct refspec *rs)
+int omit_name_by_refspec(const char *name, struct refspec *rs)
 {
        int i;
 
@@ -1082,27 +1081,6 @@ void free_refs(struct ref *ref)
        }
 }
 
-int ref_compare_name(const void *va, const void *vb)
-{
-       const struct ref *a = va, *b = vb;
-       return strcmp(a->name, b->name);
-}
-
-static void *ref_list_get_next(const void *a)
-{
-       return ((const struct ref *)a)->next;
-}
-
-static void ref_list_set_next(void *a, void *next)
-{
-       ((struct ref *)a)->next = next;
-}
-
-void sort_ref_list(struct ref **l, int (*cmp)(const void *, const void *))
-{
-       *l = llist_mergesort(*l, ref_list_get_next, ref_list_set_next, cmp);
-}
-
 int count_refspec_match(const char *pattern,
                        struct ref *refs,
                        struct ref **matched_ref)
@@ -2169,6 +2147,9 @@ static int stat_branch_pair(const char *branch_name, const char *base,
        struct object_id oid;
        struct commit *ours, *theirs;
        struct rev_info revs;
+       struct setup_revision_opt opt = {
+               .free_removed_argv_elements = 1,
+       };
        struct strvec argv = STRVEC_INIT;
 
        /* Cannot stat if what we used to build on no longer exists */
@@ -2203,7 +2184,7 @@ static int stat_branch_pair(const char *branch_name, const char *base,
        strvec_push(&argv, "--");
 
        repo_init_revisions(the_repository, &revs, NULL);
-       setup_revisions(argv.nr, argv.v, &revs, NULL);
+       setup_revisions(argv.nr, argv.v, &revs, &opt);
        if (prepare_revision_walk(&revs))
                die(_("revision walk setup failed"));
 
@@ -2339,7 +2320,8 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb,
 }
 
 static int one_local_ref(const char *refname, const struct object_id *oid,
-                        int flag, void *cb_data)
+                        int flag UNUSED,
+                        void *cb_data)
 {
        struct ref ***local_tail = cb_data;
        struct ref *ref;
@@ -2595,19 +2577,22 @@ struct check_and_collect_until_cb_data {
 };
 
 /* Get the timestamp of the latest entry. */
-static int peek_reflog(struct object_id *o_oid, struct object_id *n_oid,
-                      const char *ident, timestamp_t timestamp,
-                      int tz, const char *message, void *cb_data)
+static int peek_reflog(struct object_id *o_oid UNUSED,
+                      struct object_id *n_oid UNUSED,
+                      const char *ident UNUSED,
+                      timestamp_t timestamp, int tz UNUSED,
+                      const char *message UNUSED, void *cb_data)
 {
        timestamp_t *ts = cb_data;
        *ts = timestamp;
        return 1;
 }
 
-static int check_and_collect_until(struct object_id *o_oid,
+static int check_and_collect_until(struct object_id *o_oid UNUSED,
                                   struct object_id *n_oid,
-                                  const char *ident, timestamp_t timestamp,
-                                  int tz, const char *message, void *cb_data)
+                                  const char *ident UNUSED,
+                                  timestamp_t timestamp, int tz UNUSED,
+                                  const char *message UNUSED, void *cb_data)
 {
        struct commit *commit;
        struct check_and_collect_until_cb_data *cb = cb_data;
index dd4402436f1f2e1bb06d37c932ca8f9e3afcf48a..1c4621b414bdc0372613e58c3b2133b84004d1e7 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -207,9 +207,7 @@ struct ref *find_ref_by_name(const struct ref *list, const char *name);
 struct ref *alloc_ref(const char *name);
 struct ref *copy_ref(const struct ref *ref);
 struct ref *copy_ref_list(const struct ref *ref);
-void sort_ref_list(struct ref **, int (*cmp)(const void *, const void *));
 int count_refspec_match(const char *, struct ref *refs, struct ref **matched_ref);
-int ref_compare_name(const void *, const void *);
 
 int check_ref_type(const struct ref *ref, int flags);
 
@@ -247,6 +245,12 @@ int resolve_remote_symref(struct ref *ref, struct ref *list);
  */
 struct ref *ref_remove_duplicates(struct ref *ref_map);
 
+/*
+ * Check whether a name matches any negative refspec in rs. Returns 1 if the
+ * name matches at least one negative refspec, and 0 otherwise.
+ */
+int omit_name_by_refspec(const char *name, struct refspec *rs);
+
 /*
  * Remove all entries in the input list which match any negative refspec in
  * the refspec list.
index 7bd9aba6ee6c339e02c6fe25262a75a19bfc6e68..320be2522d80a87fdb1569a97c5e24b77fd4a525 100644 (file)
@@ -9,7 +9,8 @@
 static int register_replace_ref(struct repository *r,
                                const char *refname,
                                const struct object_id *oid,
-                               int flag, void *cb_data)
+                               int flag UNUSED,
+                               void *cb_data UNUSED)
 {
        /* Get sha1 from refname */
        const char *slash = strrchr(refname, '/');
index 2dfcb2b6542f2cbe75d4575e8b707cf1676679a3..e8b58151bc4a01d0a90ba49d89ddf52d3d780995 100644 (file)
@@ -11,11 +11,18 @@ static void repo_cfg_bool(struct repository *r, const char *key, int *dest,
                *dest = def;
 }
 
+static void repo_cfg_int(struct repository *r, const char *key, int *dest,
+                        int def)
+{
+       if (repo_config_get_int(r, key, dest))
+               *dest = def;
+}
+
 void prepare_repo_settings(struct repository *r)
 {
        int experimental;
        int value;
-       char *strval;
+       const char *strval;
        int manyfiles;
 
        if (!r->gitdir)
@@ -42,11 +49,14 @@ void prepare_repo_settings(struct repository *r)
                r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE;
        }
 
-       /* Boolean config or default, does not cascade (simple)  */
+       /* Commit graph config or default, does not cascade (simple) */
        repo_cfg_bool(r, "core.commitgraph", &r->settings.core_commit_graph, 1);
+       repo_cfg_int(r, "commitgraph.generationversion", &r->settings.commit_graph_generation_version, 2);
        repo_cfg_bool(r, "commitgraph.readchangedpaths", &r->settings.commit_graph_read_changed_paths, 1);
        repo_cfg_bool(r, "gc.writecommitgraph", &r->settings.gc_write_commit_graph, 1);
        repo_cfg_bool(r, "fetch.writecommitgraph", &r->settings.fetch_write_commit_graph, 0);
+
+       /* Boolean config or default, does not cascade (simple)  */
        repo_cfg_bool(r, "pack.usesparse", &r->settings.pack_use_sparse, 1);
        repo_cfg_bool(r, "core.multipackindex", &r->settings.core_multi_pack_index, 1);
        repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 0);
@@ -67,7 +77,7 @@ void prepare_repo_settings(struct repository *r)
        if (!repo_config_get_int(r, "index.version", &value))
                r->settings.index_version = value;
 
-       if (!repo_config_get_string(r, "core.untrackedcache", &strval)) {
+       if (!repo_config_get_string_tmp(r, "core.untrackedcache", &strval)) {
                int v = git_parse_maybe_bool(strval);
 
                /*
@@ -78,10 +88,9 @@ void prepare_repo_settings(struct repository *r)
                if (v >= 0)
                        r->settings.core_untracked_cache = v ?
                                UNTRACKED_CACHE_WRITE : UNTRACKED_CACHE_REMOVE;
-               free(strval);
        }
 
-       if (!repo_config_get_string(r, "fetch.negotiationalgorithm", &strval)) {
+       if (!repo_config_get_string_tmp(r, "fetch.negotiationalgorithm", &strval)) {
                int fetch_default = r->settings.fetch_negotiation_algorithm;
                if (!strcasecmp(strval, "skipping"))
                        r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
index 6cc661e5a43b82022ca171a43c015db2f278116e..24316ac944edcd5827de279d14fef7dea9334c74 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef REPOSITORY_H
 #define REPOSITORY_H
 
+#include "git-compat-util.h"
 #include "path.h"
 
 struct config_set;
@@ -30,6 +31,7 @@ struct repo_settings {
        int initialized;
 
        int core_commit_graph;
+       int commit_graph_generation_version;
        int commit_graph_read_changed_paths;
        int gc_write_commit_graph;
        int fetch_write_commit_graph;
@@ -185,6 +187,7 @@ void repo_set_gitdir(struct repository *repo, const char *root,
 void repo_set_worktree(struct repository *repo, const char *path);
 void repo_set_hash_algo(struct repository *repo, int algo);
 void initialize_the_repository(void);
+RESULT_MUST_BE_USED
 int repo_init(struct repository *r, const char *gitdir, const char *worktree);
 
 /*
@@ -196,6 +199,7 @@ int repo_init(struct repository *r, const char *gitdir, const char *worktree);
  * Return 0 upon success and a non-zero value upon failure.
  */
 struct object_id;
+RESULT_MUST_BE_USED
 int repo_submodule_init(struct repository *subrepo,
                        struct repository *superproject,
                        const char *path,
index 0c6e26cd9c8ff8d4f6fdb6050d7df57be460256e..d5f4463cb6b789c28c86c68e39aa621cd50d26ed 100644 (file)
@@ -119,10 +119,10 @@ struct path_and_oids_entry {
        struct oidset trees;
 };
 
-static int path_and_oids_cmp(const void *hashmap_cmp_fn_data,
+static int path_and_oids_cmp(const void *hashmap_cmp_fn_data UNUSED,
                             const struct hashmap_entry *eptr,
                             const struct hashmap_entry *entry_or_key,
-                            const void *keydata)
+                            const void *keydata UNUSED)
 {
        const struct path_and_oids_entry *e1, *e2;
 
@@ -373,18 +373,10 @@ static struct object *get_reference(struct rev_info *revs, const char *name,
                                    unsigned int flags)
 {
        struct object *object;
-       struct commit *commit;
 
-       /*
-        * If the repository has commit graphs, we try to opportunistically
-        * look up the object ID in those graphs. Like this, we can avoid
-        * parsing commit data from disk.
-        */
-       commit = lookup_commit_in_graph(revs->repo, oid);
-       if (commit)
-               object = &commit->object;
-       else
-               object = parse_object(revs->repo, oid);
+       object = parse_object_with_flags(revs->repo, oid,
+                                        revs->verify_objects ? 0 :
+                                        PARSE_OBJECT_SKIP_HASH_CHECK);
 
        if (!object) {
                if (revs->ignore_missing)
@@ -1105,7 +1097,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
                           struct commit_list **list, struct prio_queue *queue)
 {
        struct commit_list *parent = commit->parents;
-       unsigned left_flag;
+       unsigned pass_flags;
 
        if (commit->object.flags & ADDED)
                return 0;
@@ -1160,7 +1152,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
        if (revs->no_walk)
                return 0;
 
-       left_flag = (commit->object.flags & SYMMETRIC_LEFT);
+       pass_flags = (commit->object.flags & (SYMMETRIC_LEFT | ANCESTRY_PATH));
 
        for (parent = commit->parents; parent; parent = parent->next) {
                struct commit *p = parent->item;
@@ -1181,7 +1173,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
                        if (!*slot)
                                *slot = *revision_sources_at(revs->sources, commit);
                }
-               p->object.flags |= left_flag;
+               p->object.flags |= pass_flags;
                if (!(p->object.flags & SEEN)) {
                        p->object.flags |= (SEEN | NOT_USER_GIVEN);
                        if (list)
@@ -1304,13 +1296,24 @@ static int still_interesting(struct commit_list *src, timestamp_t date, int slop
 }
 
 /*
- * "rev-list --ancestry-path A..B" computes commits that are ancestors
- * of B but not ancestors of A but further limits the result to those
- * that are descendants of A.  This takes the list of bottom commits and
- * the result of "A..B" without --ancestry-path, and limits the latter
- * further to the ones that can reach one of the commits in "bottom".
+ * "rev-list --ancestry-path=C_0 [--ancestry-path=C_1 ...] A..B"
+ * computes commits that are ancestors of B but not ancestors of A but
+ * further limits the result to those that have any of C in their
+ * ancestry path (i.e. are either ancestors of any of C, descendants
+ * of any of C, or are any of C). If --ancestry-path is specified with
+ * no commit, we use all bottom commits for C.
+ *
+ * Before this function is called, ancestors of C will have already
+ * been marked with ANCESTRY_PATH previously.
+ *
+ * This takes the list of bottom commits and the result of "A..B"
+ * without --ancestry-path, and limits the latter further to the ones
+ * that have any of C in their ancestry path. Since the ancestors of C
+ * have already been marked (a prerequisite of this function), we just
+ * need to mark the descendants, then exclude any commit that does not
+ * have any of these marks.
  */
-static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list)
+static void limit_to_ancestry(struct commit_list *bottoms, struct commit_list *list)
 {
        struct commit_list *p;
        struct commit_list *rlist = NULL;
@@ -1323,7 +1326,7 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li
        for (p = list; p; p = p->next)
                commit_list_insert(p->item, &rlist);
 
-       for (p = bottom; p; p = p->next)
+       for (p = bottoms; p; p = p->next)
                p->item->object.flags |= TMP_MARK;
 
        /*
@@ -1356,38 +1359,39 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li
         */
 
        /*
-        * The ones that are not marked with TMP_MARK are uninteresting
+        * The ones that are not marked with either TMP_MARK or
+        * ANCESTRY_PATH are uninteresting
         */
        for (p = list; p; p = p->next) {
                struct commit *c = p->item;
-               if (c->object.flags & TMP_MARK)
+               if (c->object.flags & (TMP_MARK | ANCESTRY_PATH))
                        continue;
                c->object.flags |= UNINTERESTING;
        }
 
-       /* We are done with the TMP_MARK */
+       /* We are done with TMP_MARK and ANCESTRY_PATH */
        for (p = list; p; p = p->next)
-               p->item->object.flags &= ~TMP_MARK;
-       for (p = bottom; p; p = p->next)
-               p->item->object.flags &= ~TMP_MARK;
+               p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH);
+       for (p = bottoms; p; p = p->next)
+               p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH);
        free_commit_list(rlist);
 }
 
 /*
- * Before walking the history, keep the set of "negative" refs the
- * caller has asked to exclude.
+ * Before walking the history, add the set of "negative" refs the
+ * caller has asked to exclude to the bottom list.
  *
  * This is used to compute "rev-list --ancestry-path A..B", as we need
  * to filter the result of "A..B" further to the ones that can actually
  * reach A.
  */
-static struct commit_list *collect_bottom_commits(struct commit_list *list)
+static void collect_bottom_commits(struct commit_list *list,
+                                  struct commit_list **bottom)
 {
-       struct commit_list *elem, *bottom = NULL;
+       struct commit_list *elem;
        for (elem = list; elem; elem = elem->next)
                if (elem->item->object.flags & BOTTOM)
-                       commit_list_insert(elem->item, &bottom);
-       return bottom;
+                       commit_list_insert(elem->item, bottom);
 }
 
 /* Assumes either left_only or right_only is set */
@@ -1414,12 +1418,12 @@ static int limit_list(struct rev_info *revs)
        struct commit_list *original_list = revs->commits;
        struct commit_list *newlist = NULL;
        struct commit_list **p = &newlist;
-       struct commit_list *bottom = NULL;
        struct commit *interesting_cache = NULL;
 
-       if (revs->ancestry_path) {
-               bottom = collect_bottom_commits(original_list);
-               if (!bottom)
+       if (revs->ancestry_path_implicit_bottoms) {
+               collect_bottom_commits(original_list,
+                                      &revs->ancestry_path_bottoms);
+               if (!revs->ancestry_path_bottoms)
                        die("--ancestry-path given but there are no bottom commits");
        }
 
@@ -1464,9 +1468,8 @@ static int limit_list(struct rev_info *revs)
        if (revs->left_only || revs->right_only)
                limit_left_right(newlist, revs);
 
-       if (bottom)
-               limit_to_ancestry(bottom, newlist);
-       free_commit_list(bottom);
+       if (revs->ancestry_path)
+               limit_to_ancestry(revs->ancestry_path_bottoms, newlist);
 
        /*
         * Check if any commits have become TREESAME by some of their parents
@@ -1543,7 +1546,8 @@ int ref_excluded(struct string_list *ref_excludes, const char *path)
 }
 
 static int handle_one_ref(const char *path, const struct object_id *oid,
-                         int flag, void *cb_data)
+                         int flag UNUSED,
+                         void *cb_data)
 {
        struct all_refs_cb *cb = cb_data;
        struct object *object;
@@ -1618,8 +1622,11 @@ static void handle_one_reflog_commit(struct object_id *oid, void *cb_data)
 }
 
 static int handle_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
-               const char *email, timestamp_t timestamp, int tz,
-               const char *message, void *cb_data)
+                                const char *email UNUSED,
+                                timestamp_t timestamp UNUSED,
+                                int tz UNUSED,
+                                const char *message UNUSED,
+                                void *cb_data)
 {
        handle_one_reflog_commit(ooid, cb_data);
        handle_one_reflog_commit(noid, cb_data);
@@ -1627,8 +1634,8 @@ static int handle_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
 }
 
 static int handle_one_reflog(const char *refname_in_wt,
-                            const struct object_id *oid,
-                            int flag, void *cb_data)
+                            const struct object_id *oid UNUSED,
+                            int flag UNUSED, void *cb_data)
 {
        struct all_refs_cb *cb = cb_data;
        struct strbuf refname = STRBUF_INIT;
@@ -2213,7 +2220,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                               const struct setup_revision_opt* opt)
 {
        const char *arg = argv[0];
-       const char *optarg;
+       const char *optarg = NULL;
        int argcount;
        const unsigned hexsz = the_hash_algo->hexsz;
 
@@ -2284,6 +2291,23 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->ancestry_path = 1;
                revs->simplify_history = 0;
                revs->limited = 1;
+               revs->ancestry_path_implicit_bottoms = 1;
+       } else if (skip_prefix(arg, "--ancestry-path=", &optarg)) {
+               struct commit *c;
+               struct object_id oid;
+               const char *msg = _("could not get commit for ancestry-path argument %s");
+
+               revs->ancestry_path = 1;
+               revs->simplify_history = 0;
+               revs->limited = 1;
+
+               if (repo_get_oid_committish(revs->repo, optarg, &oid))
+                       return error(msg, optarg);
+               get_reference(revs, optarg, &oid, ANCESTRY_PATH);
+               c = lookup_commit_reference(revs->repo, &oid);
+               if (!c)
+                       return error(msg, optarg);
+               commit_list_insert(c, &revs->ancestry_path_bottoms);
        } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) {
                init_reflog_walk(&revs->reflog_info);
        } else if (!strcmp(arg, "--default")) {
@@ -2398,6 +2422,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->tree_objects = 1;
                revs->blob_objects = 1;
                revs->verify_objects = 1;
+               disable_commit_graph(revs->repo);
        } else if (!strcmp(arg, "--unpacked")) {
                revs->unpacked = 1;
        } else if (starts_with(arg, "--unpacked=")) {
@@ -2784,6 +2809,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
                        const char *arg = argv[i];
                        if (strcmp(arg, "--"))
                                continue;
+                       if (opt && opt->free_removed_argv_elements)
+                               free((char *)argv[i]);
                        argv[i] = NULL;
                        argc = i;
                        if (argv[i + 1])
@@ -2991,6 +3018,7 @@ static void release_revisions_topo_walk_info(struct topo_walk_info *info);
 void release_revisions(struct rev_info *revs)
 {
        free_commit_list(revs->commits);
+       free_commit_list(revs->ancestry_path_bottoms);
        object_array_clear(&revs->pending);
        object_array_clear(&revs->boundary_commits);
        release_revisions_cmdline(&revs->cmdline);
@@ -3791,51 +3819,6 @@ int rewrite_parents(struct rev_info *revs, struct commit *commit,
        return 0;
 }
 
-static int commit_rewrite_person(struct strbuf *buf, const char *what, struct string_list *mailmap)
-{
-       char *person, *endp;
-       size_t len, namelen, maillen;
-       const char *name;
-       const char *mail;
-       struct ident_split ident;
-
-       person = strstr(buf->buf, what);
-       if (!person)
-               return 0;
-
-       person += strlen(what);
-       endp = strchr(person, '\n');
-       if (!endp)
-               return 0;
-
-       len = endp - person;
-
-       if (split_ident_line(&ident, person, len))
-               return 0;
-
-       mail = ident.mail_begin;
-       maillen = ident.mail_end - ident.mail_begin;
-       name = ident.name_begin;
-       namelen = ident.name_end - ident.name_begin;
-
-       if (map_user(mailmap, &mail, &maillen, &name, &namelen)) {
-               struct strbuf namemail = STRBUF_INIT;
-
-               strbuf_addf(&namemail, "%.*s <%.*s>",
-                           (int)namelen, name, (int)maillen, mail);
-
-               strbuf_splice(buf, ident.name_begin - buf->buf,
-                             ident.mail_end - ident.name_begin + 1,
-                             namemail.buf, namemail.len);
-
-               strbuf_release(&namemail);
-
-               return 1;
-       }
-
-       return 0;
-}
-
 static int commit_match(struct commit *commit, struct rev_info *opt)
 {
        int retval;
@@ -3868,11 +3851,12 @@ static int commit_match(struct commit *commit, struct rev_info *opt)
                strbuf_addstr(&buf, message);
 
        if (opt->grep_filter.header_list && opt->mailmap) {
+               const char *commit_headers[] = { "author ", "committer ", NULL };
+
                if (!buf.len)
                        strbuf_addstr(&buf, message);
 
-               commit_rewrite_person(&buf, "\nauthor ", opt->mailmap);
-               commit_rewrite_person(&buf, "\ncommitter ", opt->mailmap);
+               apply_mailmap_to_header(&buf, commit_headers, opt->mailmap);
        }
 
        /* Append "fake" message parts as needed */
index e576845cdd103731f3e8d3ea1a4b0eefb21e2063..afe1b77985faf5caeea507dad6639441013587c2 100644 (file)
@@ -48,6 +48,7 @@
  */
 #define NOT_USER_GIVEN (1u<<25)
 #define TRACK_LINEAR   (1u<<26)
+#define ANCESTRY_PATH  (1u<<27)
 #define ALL_REV_FLAGS  (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR | PULL_MERGE)
 
 #define DECORATE_SHORT_REFS    1
@@ -164,6 +165,13 @@ struct rev_info {
                        cherry_mark:1,
                        bisect:1,
                        ancestry_path:1,
+
+                       /* True if --ancestry-path was specified without an
+                        * argument. The bottom revisions are implicitly
+                        * the arguments in this case.
+                        */
+                       ancestry_path_implicit_bottoms:1,
+
                        first_parent_only:1,
                        exclude_first_parent_only:1,
                        line_level_traverse:1,
@@ -221,6 +229,7 @@ struct rev_info {
                        missing_newline:1,
                        date_mode_explicit:1,
                        preserve_subject:1,
+                       force_in_body_from:1,
                        encode_email_headers:1,
                        include_header:1;
        unsigned int    disable_stdin:1;
@@ -306,6 +315,7 @@ struct rev_info {
        struct saved_parents *saved_parents_slab;
 
        struct commit_list *previous_parents;
+       struct commit_list *ancestry_path_bottoms;
        const char *break_bar;
 
        struct revision_sources *sources;
@@ -375,7 +385,8 @@ struct setup_revision_opt {
        const char *def;
        void (*tweak)(struct rev_info *, struct setup_revision_opt *);
        unsigned int    assume_dashdash:1,
-                       allow_exclude_promisor_objects:1;
+                       allow_exclude_promisor_objects:1,
+                       free_removed_argv_elements:1;
        unsigned revarg_opt;
 };
 int setup_revisions(int argc, const char **argv, struct rev_info *revs,
index 14f17830f51254511e2c184293a6816a132d287f..5ec3a46dccf959bd54af42fbeaaf4027dc64996a 100644 (file)
@@ -10,6 +10,7 @@
 #include "config.h"
 #include "packfile.h"
 #include "hook.h"
+#include "compat/nonblock.h"
 
 void child_process_init(struct child_process *child)
 {
@@ -1364,12 +1365,25 @@ static int pump_io_round(struct io_pump *slots, int nr, struct pollfd *pfd)
                        continue;
 
                if (io->type == POLLOUT) {
-                       ssize_t len = xwrite(io->fd,
-                                            io->u.out.buf, io->u.out.len);
+                       ssize_t len;
+
+                       /*
+                        * Don't use xwrite() here. It loops forever on EAGAIN,
+                        * and we're in our own poll() loop here.
+                        *
+                        * Note that we lose xwrite()'s handling of MAX_IO_SIZE
+                        * and EINTR, so we have to implement those ourselves.
+                        */
+                       len = write(io->fd, io->u.out.buf,
+                                   io->u.out.len <= MAX_IO_SIZE ?
+                                   io->u.out.len : MAX_IO_SIZE);
                        if (len < 0) {
-                               io->error = errno;
-                               close(io->fd);
-                               io->fd = -1;
+                               if (errno != EINTR && errno != EAGAIN &&
+                                   errno != ENOSPC) {
+                                       io->error = errno;
+                                       close(io->fd);
+                                       io->fd = -1;
+                               }
                        } else {
                                io->u.out.buf += len;
                                io->u.out.len -= len;
@@ -1438,6 +1452,15 @@ int pipe_command(struct child_process *cmd,
                return -1;
 
        if (in) {
+               if (enable_pipe_nonblock(cmd->in) < 0) {
+                       error_errno("unable to make pipe non-blocking");
+                       close(cmd->in);
+                       if (out)
+                               close(cmd->out);
+                       if (err)
+                               close(cmd->err);
+                       return -1;
+               }
                io[nr].fd = cmd->in;
                io[nr].type = POLLOUT;
                io[nr].u.out.buf = in;
index bc0fcdbb000769935e9f080164d8e02e81dbb8b3..f2e19838c9c342b3b9d6df5e9bf24d0add0d47d1 100644 (file)
@@ -84,6 +84,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *advertised,
                strvec_push(&po.args, "--progress");
        if (is_repository_shallow(the_repository))
                strvec_push(&po.args, "--shallow");
+       if (args->disable_bitmaps)
+               strvec_push(&po.args, "--no-use-bitmap-index");
        po.in = -1;
        po.out = args->stateless_rpc ? -1 : fd;
        po.git_cmd = 1;
@@ -264,7 +266,7 @@ static int receive_status(struct packet_reader *reader, struct ref *refs)
        return ret;
 }
 
-static int sideband_demux(int in, int out, void *data)
+static int sideband_demux(int in UNUSED, int out, void *data)
 {
        int *fd = data, ret;
        if (async_with_fork())
@@ -487,6 +489,7 @@ int send_pack(struct send_pack_args *args,
        struct async demux;
        const char *push_cert_nonce = NULL;
        struct packet_reader reader;
+       int use_bitmaps;
 
        if (!remote_refs) {
                fprintf(stderr, "No refs in common and none specified; doing nothing.\n"
@@ -498,6 +501,9 @@ int send_pack(struct send_pack_args *args,
        if (push_negotiate)
                get_commons_through_negotiation(args->url, remote_refs, &commons);
 
+       if (!git_config_get_bool("push.usebitmaps", &use_bitmaps))
+               args->disable_bitmaps = !use_bitmaps;
+
        git_config_get_bool("transfer.advertisesid", &advertise_sid);
 
        /* Does the other end support the reporting? */
index e148fcd960994b1724f8d09c4ea4897b1cf069f0..7edb80596c7b0edf607ea5cd0f01315106d02b73 100644 (file)
@@ -26,7 +26,8 @@ struct send_pack_args {
                /* One of the SEND_PACK_PUSH_CERT_* constants. */
                push_cert:2,
                stateless_rpc:1,
-               atomic:1;
+               atomic:1,
+               disable_bitmaps:1;
        const struct string_list *push_options;
 };
 
index 61a8e0020d596de550127d5b66f076034636239c..d26ede83c4b2beb1f14ee368bfc30958c4db6d07 100644 (file)
@@ -35,6 +35,8 @@
 #include "commit-reach.h"
 #include "rebase-interactive.h"
 #include "reset.h"
+#include "branch.h"
+#include "log-tree.h"
 
 #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
 
@@ -147,6 +149,20 @@ static GIT_PATH_FUNC(rebase_path_squash_onto, "rebase-merge/squash-onto")
  */
 static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete")
 
+/*
+ * The update-refs file stores a list of refs that will be updated at the end
+ * of the rebase sequence. The 'update-ref <ref>' commands in the todo file
+ * update the OIDs for the refs in this file, but the refs are not updated
+ * until the end of the rebase sequence.
+ *
+ * rebase_path_update_refs() returns the path to this file for a given
+ * worktree directory. For the current worktree, pass the_repository->gitdir.
+ */
+static char *rebase_path_update_refs(const char *wt_git_dir)
+{
+       return xstrfmt("%s/rebase-merge/update-refs", wt_git_dir);
+}
+
 /*
  * The following files are written by git-rebase just after parsing the
  * command-line.
@@ -169,6 +185,30 @@ static GIT_PATH_FUNC(rebase_path_no_reschedule_failed_exec, "rebase-merge/no-res
 static GIT_PATH_FUNC(rebase_path_drop_redundant_commits, "rebase-merge/drop_redundant_commits")
 static GIT_PATH_FUNC(rebase_path_keep_redundant_commits, "rebase-merge/keep_redundant_commits")
 
+/**
+ * A 'struct update_refs_record' represents a value in the update-refs
+ * list. We use a string_list to map refs to these (before, after) pairs.
+ */
+struct update_ref_record {
+       struct object_id before;
+       struct object_id after;
+};
+
+static struct update_ref_record *init_update_ref_record(const char *ref)
+{
+       struct update_ref_record *rec;
+
+       CALLOC_ARRAY(rec, 1);
+
+       oidcpy(&rec->before, null_oid());
+       oidcpy(&rec->after, null_oid());
+
+       /* This may fail, but that's fine, we will keep the null OID. */
+       read_ref(ref, &rec->before);
+
+       return rec;
+}
+
 static int git_sequencer_config(const char *k, const char *v, void *cb)
 {
        struct replay_opts *opts = cb;
@@ -497,7 +537,7 @@ static struct tree *empty_tree(struct repository *r)
 static int error_dirty_index(struct repository *repo, struct replay_opts *opts)
 {
        if (repo_read_index_unmerged(repo))
-               return error_resolve_conflict(_(action_name(opts)));
+               return error_resolve_conflict(action_name(opts));
 
        error(_("your local changes would be overwritten by %s."),
                _(action_name(opts)));
@@ -535,7 +575,7 @@ static int fast_forward_to(struct repository *r,
        if (checkout_fast_forward(r, from, to, 1))
                return -1; /* the callee should have complained already */
 
-       strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts)));
+       strbuf_addf(&sb, "%s: fast-forward", action_name(opts));
 
        transaction = ref_transaction_begin(&err);
        if (!transaction ||
@@ -1689,20 +1729,21 @@ static struct {
        char c;
        const char *str;
 } todo_command_info[] = {
-       { 'p', "pick" },
-       { 0,   "revert" },
-       { 'e', "edit" },
-       { 'r', "reword" },
-       { 'f', "fixup" },
-       { 's', "squash" },
-       { 'x', "exec" },
-       { 'b', "break" },
-       { 'l', "label" },
-       { 't', "reset" },
-       { 'm', "merge" },
-       { 0,   "noop" },
-       { 'd', "drop" },
-       { 0,   NULL }
+       [TODO_PICK] = { 'p', "pick" },
+       [TODO_REVERT] = { 0,   "revert" },
+       [TODO_EDIT] = { 'e', "edit" },
+       [TODO_REWORD] = { 'r', "reword" },
+       [TODO_FIXUP] = { 'f', "fixup" },
+       [TODO_SQUASH] = { 's', "squash" },
+       [TODO_EXEC] = { 'x', "exec" },
+       [TODO_BREAK] = { 'b', "break" },
+       [TODO_LABEL] = { 'l', "label" },
+       [TODO_RESET] = { 't', "reset" },
+       [TODO_MERGE] = { 'm', "merge" },
+       [TODO_UPDATE_REF] = { 'u', "update-ref" },
+       [TODO_NOOP] = { 0,   "noop" },
+       [TODO_DROP] = { 'd', "drop" },
+       [TODO_COMMENT] = { 0,   NULL },
 };
 
 static const char *command_to_string(const enum todo_command command)
@@ -2381,7 +2422,7 @@ static int read_and_refresh_cache(struct repository *r,
        if (repo_read_index(r) < 0) {
                rollback_lock_file(&index_lock);
                return error(_("git %s: failed to read the index"),
-                       _(action_name(opts)));
+                       action_name(opts));
        }
        refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
 
@@ -2389,7 +2430,7 @@ static int read_and_refresh_cache(struct repository *r,
                if (write_locked_index(r->index, &index_lock,
                                       COMMIT_LOCK | SKIP_IF_UNCHANGED)) {
                        return error(_("git %s: failed to refresh the index"),
-                               _(action_name(opts)));
+                               action_name(opts));
                }
        }
 
@@ -2481,7 +2522,7 @@ static int parse_insn_line(struct repository *r, struct todo_item *item,
                             command_to_string(item->command));
 
        if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
-           item->command == TODO_RESET) {
+           item->command == TODO_RESET || item->command == TODO_UPDATE_REF) {
                item->commit = NULL;
                item->arg_offset = bol - buf;
                item->arg_len = (int)(eol - bol);
@@ -3712,7 +3753,7 @@ static int do_reset(struct repository *r,
        init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL);
 
        if (repo_read_index_unmerged(r)) {
-               ret = error_resolve_conflict(_(action_name(opts)));
+               ret = error_resolve_conflict(action_name(opts));
                goto cleanup;
        }
 
@@ -4081,6 +4122,221 @@ leave_merge:
        return ret;
 }
 
+static int write_update_refs_state(struct string_list *refs_to_oids)
+{
+       int result = 0;
+       struct lock_file lock = LOCK_INIT;
+       FILE *fp = NULL;
+       struct string_list_item *item;
+       char *path;
+
+       if (!refs_to_oids->nr)
+               return 0;
+
+       path = rebase_path_update_refs(the_repository->gitdir);
+
+       if (safe_create_leading_directories(path)) {
+               result = error(_("unable to create leading directories of %s"),
+                              path);
+               goto cleanup;
+       }
+
+       if (hold_lock_file_for_update(&lock, path, 0) < 0) {
+               result = error(_("another 'rebase' process appears to be running; "
+                                "'%s.lock' already exists"),
+                              path);
+               goto cleanup;
+       }
+
+       fp = fdopen_lock_file(&lock, "w");
+       if (!fp) {
+               result = error_errno(_("could not open '%s' for writing"), path);
+               rollback_lock_file(&lock);
+               goto cleanup;
+       }
+
+       for_each_string_list_item(item, refs_to_oids) {
+               struct update_ref_record *rec = item->util;
+               fprintf(fp, "%s\n%s\n%s\n", item->string,
+                       oid_to_hex(&rec->before), oid_to_hex(&rec->after));
+       }
+
+       result = commit_lock_file(&lock);
+
+cleanup:
+       free(path);
+       return result;
+}
+
+/*
+ * Parse the update-refs file for the current rebase, then remove the
+ * refs that do not appear in the todo_list (and have not had updated
+ * values stored) and add refs that are in the todo_list but not
+ * represented in the update-refs file.
+ *
+ * If there are changes to the update-refs list, then write the new state
+ * to disk.
+ */
+void todo_list_filter_update_refs(struct repository *r,
+                                 struct todo_list *todo_list)
+{
+       int i;
+       int updated = 0;
+       struct string_list update_refs = STRING_LIST_INIT_DUP;
+
+       sequencer_get_update_refs_state(r->gitdir, &update_refs);
+
+       /*
+        * For each item in the update_refs list, if it has no updated
+        * value and does not appear in the todo_list, then remove it
+        * from the update_refs list.
+        */
+       for (i = 0; i < update_refs.nr; i++) {
+               int j;
+               int found = 0;
+               const char *ref = update_refs.items[i].string;
+               size_t reflen = strlen(ref);
+               struct update_ref_record *rec = update_refs.items[i].util;
+
+               /* OID already stored as updated. */
+               if (!is_null_oid(&rec->after))
+                       continue;
+
+               for (j = 0; !found && j < todo_list->total_nr; j++) {
+                       struct todo_item *item = &todo_list->items[j];
+                       const char *arg = todo_list->buf.buf + item->arg_offset;
+
+                       if (item->command != TODO_UPDATE_REF)
+                               continue;
+
+                       if (item->arg_len != reflen ||
+                           strncmp(arg, ref, reflen))
+                               continue;
+
+                       found = 1;
+               }
+
+               if (!found) {
+                       free(update_refs.items[i].string);
+                       free(update_refs.items[i].util);
+
+                       update_refs.nr--;
+                       MOVE_ARRAY(update_refs.items + i, update_refs.items + i + 1, update_refs.nr - i);
+
+                       updated = 1;
+                       i--;
+               }
+       }
+
+       /*
+        * For each todo_item, check if its ref is in the update_refs list.
+        * If not, then add it as an un-updated ref.
+        */
+       for (i = 0; i < todo_list->total_nr; i++) {
+               struct todo_item *item = &todo_list->items[i];
+               const char *arg = todo_list->buf.buf + item->arg_offset;
+               int j, found = 0;
+
+               if (item->command != TODO_UPDATE_REF)
+                       continue;
+
+               for (j = 0; !found && j < update_refs.nr; j++) {
+                       const char *ref = update_refs.items[j].string;
+
+                       found = strlen(ref) == item->arg_len &&
+                               !strncmp(ref, arg, item->arg_len);
+               }
+
+               if (!found) {
+                       struct string_list_item *inserted;
+                       struct strbuf argref = STRBUF_INIT;
+
+                       strbuf_add(&argref, arg, item->arg_len);
+                       inserted = string_list_insert(&update_refs, argref.buf);
+                       inserted->util = init_update_ref_record(argref.buf);
+                       strbuf_release(&argref);
+                       updated = 1;
+               }
+       }
+
+       if (updated)
+               write_update_refs_state(&update_refs);
+       string_list_clear(&update_refs, 1);
+}
+
+static int do_update_ref(struct repository *r, const char *refname)
+{
+       struct string_list_item *item;
+       struct string_list list = STRING_LIST_INIT_DUP;
+
+       if (sequencer_get_update_refs_state(r->gitdir, &list))
+               return -1;
+
+       for_each_string_list_item(item, &list) {
+               if (!strcmp(item->string, refname)) {
+                       struct update_ref_record *rec = item->util;
+                       if (read_ref("HEAD", &rec->after))
+                               return -1;
+                       break;
+               }
+       }
+
+       write_update_refs_state(&list);
+       string_list_clear(&list, 1);
+       return 0;
+}
+
+static int do_update_refs(struct repository *r, int quiet)
+{
+       int res = 0;
+       struct string_list_item *item;
+       struct string_list refs_to_oids = STRING_LIST_INIT_DUP;
+       struct ref_store *refs = get_main_ref_store(r);
+       struct strbuf update_msg = STRBUF_INIT;
+       struct strbuf error_msg = STRBUF_INIT;
+
+       if ((res = sequencer_get_update_refs_state(r->gitdir, &refs_to_oids)))
+               return res;
+
+       for_each_string_list_item(item, &refs_to_oids) {
+               struct update_ref_record *rec = item->util;
+               int loop_res;
+
+               loop_res = refs_update_ref(refs, "rewritten during rebase",
+                                          item->string,
+                                          &rec->after, &rec->before,
+                                          0, UPDATE_REFS_MSG_ON_ERR);
+               res |= loop_res;
+
+               if (quiet)
+                       continue;
+
+               if (loop_res)
+                       strbuf_addf(&error_msg, "\t%s\n", item->string);
+               else
+                       strbuf_addf(&update_msg, "\t%s\n", item->string);
+       }
+
+       if (!quiet &&
+           (update_msg.len || error_msg.len)) {
+               fprintf(stderr,
+                       _("Updated the following refs with %s:\n%s"),
+                       "--update-refs",
+                       update_msg.buf);
+
+               if (res)
+                       fprintf(stderr,
+                               _("Failed to update the following refs with %s:\n%s"),
+                               "--update-refs",
+                               error_msg.buf);
+       }
+
+       string_list_clear(&refs_to_oids, 1);
+       strbuf_release(&update_msg);
+       strbuf_release(&error_msg);
+       return res;
+}
+
 static int is_final_fixup(struct todo_list *todo_list)
 {
        int i = todo_list->current;
@@ -4456,6 +4712,12 @@ static int pick_commits(struct repository *r,
                                return error_with_patch(r, item->commit,
                                                        arg, item->arg_len,
                                                        opts, res, 0);
+               } else if (item->command == TODO_UPDATE_REF) {
+                       struct strbuf ref = STRBUF_INIT;
+                       strbuf_add(&ref, arg, item->arg_len);
+                       if ((res = do_update_ref(r, ref.buf)))
+                               reschedule = 1;
+                       strbuf_release(&ref);
                } else if (!is_noop(item->command))
                        return error(_("unknown command %d"), item->command);
 
@@ -4591,6 +4853,9 @@ cleanup_head_ref:
 
                strbuf_release(&buf);
                strbuf_release(&head_ref);
+
+               if (do_update_refs(r, opts->quiet))
+                       return -1;
        }
 
        /*
@@ -4989,7 +5254,8 @@ struct labels_entry {
        char label[FLEX_ARRAY];
 };
 
-static int labels_cmp(const void *fndata, const struct hashmap_entry *eptr,
+static int labels_cmp(const void *fndata UNUSED,
+                     const struct hashmap_entry *eptr,
                      const struct hashmap_entry *entry_or_key, const void *key)
 {
        const struct labels_entry *a, *b;
@@ -5638,10 +5904,135 @@ static int skip_unnecessary_picks(struct repository *r,
        return 0;
 }
 
+struct todo_add_branch_context {
+       struct todo_item *items;
+       size_t items_nr;
+       size_t items_alloc;
+       struct strbuf *buf;
+       struct commit *commit;
+       struct string_list refs_to_oids;
+};
+
+static int add_decorations_to_list(const struct commit *commit,
+                                  struct todo_add_branch_context *ctx)
+{
+       const struct name_decoration *decoration = get_name_decoration(&commit->object);
+       const char *head_ref = resolve_ref_unsafe("HEAD",
+                                                 RESOLVE_REF_READING,
+                                                 NULL,
+                                                 NULL);
+
+       while (decoration) {
+               struct todo_item *item;
+               const char *path;
+               size_t base_offset = ctx->buf->len;
+
+               /*
+                * If the branch is the current HEAD, then it will be
+                * updated by the default rebase behavior.
+                */
+               if (head_ref && !strcmp(head_ref, decoration->name)) {
+                       decoration = decoration->next;
+                       continue;
+               }
+
+               ALLOC_GROW(ctx->items,
+                       ctx->items_nr + 1,
+                       ctx->items_alloc);
+               item = &ctx->items[ctx->items_nr];
+               memset(item, 0, sizeof(*item));
+
+               /* If the branch is checked out, then leave a comment instead. */
+               if ((path = branch_checked_out(decoration->name))) {
+                       item->command = TODO_COMMENT;
+                       strbuf_addf(ctx->buf, "# Ref %s checked out at '%s'\n",
+                                   decoration->name, path);
+               } else {
+                       struct string_list_item *sti;
+                       item->command = TODO_UPDATE_REF;
+                       strbuf_addf(ctx->buf, "%s\n", decoration->name);
+
+                       sti = string_list_insert(&ctx->refs_to_oids,
+                                                decoration->name);
+                       sti->util = init_update_ref_record(decoration->name);
+               }
+
+               item->offset_in_buf = base_offset;
+               item->arg_offset = base_offset;
+               item->arg_len = ctx->buf->len - base_offset;
+               ctx->items_nr++;
+
+               decoration = decoration->next;
+       }
+
+       return 0;
+}
+
+/*
+ * For each 'pick' command, find out if the commit has a decoration in
+ * refs/heads/. If so, then add a 'label for-update-refs/' command.
+ */
+static int todo_list_add_update_ref_commands(struct todo_list *todo_list)
+{
+       int i, res;
+       static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
+       static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
+       static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
+       struct decoration_filter decoration_filter = {
+               .include_ref_pattern = &decorate_refs_include,
+               .exclude_ref_pattern = &decorate_refs_exclude,
+               .exclude_ref_config_pattern = &decorate_refs_exclude_config,
+       };
+       struct todo_add_branch_context ctx = {
+               .buf = &todo_list->buf,
+               .refs_to_oids = STRING_LIST_INIT_DUP,
+       };
+
+       ctx.items_alloc = 2 * todo_list->nr + 1;
+       ALLOC_ARRAY(ctx.items, ctx.items_alloc);
+
+       string_list_append(&decorate_refs_include, "refs/heads/");
+       load_ref_decorations(&decoration_filter, 0);
+
+       for (i = 0; i < todo_list->nr; ) {
+               struct todo_item *item = &todo_list->items[i];
+
+               /* insert ith item into new list */
+               ALLOC_GROW(ctx.items,
+                          ctx.items_nr + 1,
+                          ctx.items_alloc);
+
+               ctx.items[ctx.items_nr++] = todo_list->items[i++];
+
+               if (item->commit) {
+                       ctx.commit = item->commit;
+                       add_decorations_to_list(item->commit, &ctx);
+               }
+       }
+
+       res = write_update_refs_state(&ctx.refs_to_oids);
+
+       string_list_clear(&ctx.refs_to_oids, 1);
+
+       if (res) {
+               /* we failed, so clean up the new list. */
+               free(ctx.items);
+               return res;
+       }
+
+       free(todo_list->items);
+       todo_list->items = ctx.items;
+       todo_list->nr = ctx.items_nr;
+       todo_list->alloc = ctx.items_alloc;
+
+       return 0;
+}
+
 int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags,
                    const char *shortrevisions, const char *onto_name,
                    struct commit *onto, const struct object_id *orig_head,
                    struct string_list *commands, unsigned autosquash,
+                   unsigned update_refs,
                    struct todo_list *todo_list)
 {
        char shortonto[GIT_MAX_HEXSZ + 1];
@@ -5660,6 +6051,9 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla
                item->arg_len = item->arg_offset = item->flags = item->offset_in_buf = 0;
        }
 
+       if (update_refs && todo_list_add_update_ref_commands(todo_list))
+               return -1;
+
        if (autosquash && todo_list_rearrange_squash(todo_list))
                return -1;
 
@@ -5738,7 +6132,7 @@ struct subject2item_entry {
        char subject[FLEX_ARRAY];
 };
 
-static int subject2item_cmp(const void *fndata,
+static int subject2item_cmp(const void *fndata UNUSED,
                            const struct hashmap_entry *eptr,
                            const struct hashmap_entry *entry_or_key,
                            const void *key)
@@ -5936,3 +6330,54 @@ int sequencer_determine_whence(struct repository *r, enum commit_whence *whence)
 
        return 0;
 }
+
+int sequencer_get_update_refs_state(const char *wt_dir,
+                                   struct string_list *refs)
+{
+       int result = 0;
+       FILE *fp = NULL;
+       struct strbuf ref = STRBUF_INIT;
+       struct strbuf hash = STRBUF_INIT;
+       struct update_ref_record *rec = NULL;
+
+       char *path = rebase_path_update_refs(wt_dir);
+
+       fp = fopen(path, "r");
+       if (!fp)
+               goto cleanup;
+
+       while (strbuf_getline(&ref, fp) != EOF) {
+               struct string_list_item *item;
+
+               CALLOC_ARRAY(rec, 1);
+
+               if (strbuf_getline(&hash, fp) == EOF ||
+                   get_oid_hex(hash.buf, &rec->before)) {
+                       warning(_("update-refs file at '%s' is invalid"),
+                                 path);
+                       result = -1;
+                       goto cleanup;
+               }
+
+               if (strbuf_getline(&hash, fp) == EOF ||
+                   get_oid_hex(hash.buf, &rec->after)) {
+                       warning(_("update-refs file at '%s' is invalid"),
+                                 path);
+                       result = -1;
+                       goto cleanup;
+               }
+
+               item = string_list_insert(refs, ref.buf);
+               item->util = rec;
+               rec = NULL;
+       }
+
+cleanup:
+       if (fp)
+               fclose(fp);
+       free(path);
+       free(rec);
+       strbuf_release(&ref);
+       strbuf_release(&hash);
+       return result;
+}
index 698599fe4e915ed4acbbf17995b0df201b723321..563fe5993340a0d0a18b148d4280c1d5a3f8d307 100644 (file)
@@ -96,6 +96,7 @@ enum todo_command {
        TODO_LABEL,
        TODO_RESET,
        TODO_MERGE,
+       TODO_UPDATE_REF,
        /* commands that do nothing but are counted for reporting progress */
        TODO_NOOP,
        TODO_DROP,
@@ -132,6 +133,18 @@ void todo_list_release(struct todo_list *todo_list);
 const char *todo_item_get_arg(struct todo_list *todo_list,
                              struct todo_item *item);
 
+/*
+ * Parse the update-refs file for the current rebase, then remove the
+ * refs that do not appear in the todo_list (and have not had updated
+ * values stored) and add refs that are in the todo_list but not
+ * represented in the update-refs file.
+ *
+ * If there are changes to the update-refs list, then write the new state
+ * to disk.
+ */
+void todo_list_filter_update_refs(struct repository *r,
+                                 struct todo_list *todo_list);
+
 /* Call this to setup defaults before parsing command line options */
 void sequencer_init_config(struct replay_opts *opts);
 int sequencer_pick_revisions(struct repository *repo,
@@ -167,6 +180,7 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla
                    const char *shortrevisions, const char *onto_name,
                    struct commit *onto, const struct object_id *orig_head,
                    struct string_list *commands, unsigned autosquash,
+                   unsigned update_refs,
                    struct todo_list *todo_list);
 int todo_list_rearrange_squash(struct todo_list *todo_list);
 
@@ -233,4 +247,13 @@ void sequencer_post_commit_cleanup(struct repository *r, int verbose);
 int sequencer_get_last_command(struct repository* r,
                               enum replay_action *action);
 int sequencer_determine_whence(struct repository *r, enum commit_whence *whence);
+
+/**
+ * Append the set of ref-OID pairs that are currently stored for the 'git
+ * rebase --update-refs' feature if such a rebase is currently happening.
+ *
+ * Localized to a worktree's git dir.
+ */
+int sequencer_get_update_refs_state(const char *wt_dir, struct string_list *refs);
+
 #endif /* SEQUENCER_H */
index 7701d7c20a12af53d2f91737f12f3dfb97ac59e9..0ec6c0c16546a7a2ebb45a8e8a03e3270cedf214 100644 (file)
@@ -147,7 +147,8 @@ out:
 }
 
 static int add_info_ref(const char *path, const struct object_id *oid,
-                       int flag, void *cb_data)
+                       int flag UNUSED,
+                       void *cb_data)
 {
        struct update_info_ctx *uic = cb_data;
        struct object *o = parse_object(the_repository, oid);
diff --git a/setup.c b/setup.c
index 640f6ea4d03f58147fa51e59d213adac9c37d3ba..cefd5f63c4680f7f656084ef72f74784f86e4562 100644 (file)
--- a/setup.c
+++ b/setup.c
 static int inside_git_dir = -1;
 static int inside_work_tree = -1;
 static int work_tree_config_is_bogus;
+enum allowed_bare_repo {
+       ALLOWED_BARE_REPO_EXPLICIT = 0,
+       ALLOWED_BARE_REPO_ALL,
+};
 
 static struct startup_info the_startup_info;
 struct startup_info *startup_info = &the_startup_info;
@@ -1156,11 +1160,51 @@ static int ensure_valid_ownership(const char *gitfile,
         * constant regardless of what failed above. data.is_safe should be
         * initialized to false, and might be changed by the callback.
         */
-       read_very_early_config(safe_directory_cb, &data);
+       git_protected_config(safe_directory_cb, &data);
 
        return data.is_safe;
 }
 
+static int allowed_bare_repo_cb(const char *key, const char *value, void *d)
+{
+       enum allowed_bare_repo *allowed_bare_repo = d;
+
+       if (strcasecmp(key, "safe.bareRepository"))
+               return 0;
+
+       if (!strcmp(value, "explicit")) {
+               *allowed_bare_repo = ALLOWED_BARE_REPO_EXPLICIT;
+               return 0;
+       }
+       if (!strcmp(value, "all")) {
+               *allowed_bare_repo = ALLOWED_BARE_REPO_ALL;
+               return 0;
+       }
+       return -1;
+}
+
+static enum allowed_bare_repo get_allowed_bare_repo(void)
+{
+       enum allowed_bare_repo result = ALLOWED_BARE_REPO_ALL;
+       git_protected_config(allowed_bare_repo_cb, &result);
+       return result;
+}
+
+static const char *allowed_bare_repo_to_string(
+       enum allowed_bare_repo allowed_bare_repo)
+{
+       switch (allowed_bare_repo) {
+       case ALLOWED_BARE_REPO_EXPLICIT:
+               return "explicit";
+       case ALLOWED_BARE_REPO_ALL:
+               return "all";
+       default:
+               BUG("invalid allowed_bare_repo %d",
+                   allowed_bare_repo);
+       }
+       return NULL;
+}
+
 enum discovery_result {
        GIT_DIR_NONE = 0,
        GIT_DIR_EXPLICIT,
@@ -1170,7 +1214,8 @@ enum discovery_result {
        GIT_DIR_HIT_CEILING = -1,
        GIT_DIR_HIT_MOUNT_POINT = -2,
        GIT_DIR_INVALID_GITFILE = -3,
-       GIT_DIR_INVALID_OWNERSHIP = -4
+       GIT_DIR_INVALID_OWNERSHIP = -4,
+       GIT_DIR_DISALLOWED_BARE = -5,
 };
 
 /*
@@ -1300,6 +1345,8 @@ static enum discovery_result setup_git_directory_gently_1(struct strbuf *dir,
                }
 
                if (is_git_directory(dir->buf)) {
+                       if (get_allowed_bare_repo() == ALLOWED_BARE_REPO_EXPLICIT)
+                               return GIT_DIR_DISALLOWED_BARE;
                        if (!ensure_valid_ownership(NULL, NULL, dir->buf, report))
                                return GIT_DIR_INVALID_OWNERSHIP;
                        strbuf_addstr(gitdir, ".");
@@ -1448,6 +1495,14 @@ const char *setup_git_directory_gently(int *nongit_ok)
                }
                *nongit_ok = 1;
                break;
+       case GIT_DIR_DISALLOWED_BARE:
+               if (!nongit_ok) {
+                       die(_("cannot use bare repository '%s' (safe.bareRepository is '%s')"),
+                           dir.buf,
+                           allowed_bare_repo_to_string(get_allowed_bare_repo()));
+               }
+               *nongit_ok = 1;
+               break;
        case GIT_DIR_NONE:
                /*
                 * As a safeguard against setup_git_directory_gently_1 returning
diff --git a/sha256/nettle.h b/sha256/nettle.h
new file mode 100644 (file)
index 0000000..b63e1c8
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef SHA256_NETTLE_H
+#define SHA256_NETTLE_H
+
+#include <nettle/sha2.h>
+
+typedef struct sha256_ctx nettle_SHA256_CTX;
+
+static inline void nettle_SHA256_Init(nettle_SHA256_CTX *ctx)
+{
+       sha256_init(ctx);
+}
+
+static inline void nettle_SHA256_Update(nettle_SHA256_CTX *ctx,
+                                       const void *data,
+                                       size_t len)
+{
+       sha256_update(ctx, len, data);
+}
+
+static inline void nettle_SHA256_Final(unsigned char *digest,
+                                      nettle_SHA256_CTX *ctx)
+{
+       sha256_digest(ctx, SHA256_DIGEST_SIZE, digest);
+}
+
+#define platform_SHA256_CTX nettle_SHA256_CTX
+#define platform_SHA256_Init nettle_SHA256_Init
+#define platform_SHA256_Update nettle_SHA256_Update
+#define platform_SHA256_Final nettle_SHA256_Final
+
+#endif
index 8cb768ee5f886fdafcf16221e5878b8fc080f253..17f9bcdb5f38270c4f5910a2e1c930432ba350e5 100644 (file)
--- a/shallow.c
+++ b/shallow.c
@@ -604,8 +604,10 @@ static void paint_down(struct paint_info *info, const struct object_id *oid,
        free(tmp);
 }
 
-static int mark_uninteresting(const char *refname, const struct object_id *oid,
-                             int flags, void *cb_data)
+static int mark_uninteresting(const char *refname UNUSED,
+                             const struct object_id *oid,
+                             int flags UNUSED,
+                             void *cb_data UNUSED)
 {
        struct commit *commit = lookup_commit_reference_gently(the_repository,
                                                               oid, 1);
@@ -715,8 +717,10 @@ struct commit_array {
        int nr, alloc;
 };
 
-static int add_ref(const char *refname, const struct object_id *oid,
-                  int flags, void *cb_data)
+static int add_ref(const char *refname UNUSED,
+                  const struct object_id *oid,
+                  int flags UNUSED,
+                  void *cb_data)
 {
        struct commit_array *ca = cb_data;
        ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
index 4330192e9c363df8103e27d5ccb06769adfbc0b8..33f43edbf9a6b2e156f15628869c1d6c1755cd6f 100644 (file)
@@ -70,6 +70,7 @@ ifndef V
        QUIET_HDR      = @echo '   ' HDR $(<:hcc=h);
        QUIET_RC       = @echo '   ' RC $@;
        QUIET_SPATCH   = @echo '   ' SPATCH $<;
+       QUIET_SPATCH_T = @echo '   ' SPATCH TEST $(@:.build/%=%);
 
 ## Used in "Documentation/Makefile"
        QUIET_ASCIIDOC  = @echo '   ' ASCIIDOC $@;
index dd9eb85527ab5a2f5007907b2cb3ce76ed5ccc3f..0890b1405c5cc6888396bac8558549ebdb2e2977 100644 (file)
--- a/strbuf.c
+++ b/strbuf.c
@@ -436,7 +436,7 @@ void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn,
 
 size_t strbuf_expand_literal_cb(struct strbuf *sb,
                                const char *placeholder,
-                               void *context)
+                               void *context UNUSED)
 {
        int ch;
 
index fe54665d86e72a3400a6f60f610a5264bc0f0667..7b2f8b2b9384b8c9c516be005c9b9f811348b0c7 100644 (file)
@@ -328,9 +328,9 @@ static int close_istream_pack_non_delta(struct git_istream *st)
 }
 
 static int open_istream_pack_non_delta(struct git_istream *st,
-                                      struct repository *r,
-                                      const struct object_id *oid,
-                                      enum object_type *type)
+                                      struct repository *r UNUSED,
+                                      const struct object_id *oid UNUSED,
+                                      enum object_type *type UNUSED)
 {
        struct pack_window *window;
        enum object_type in_pack_type;
index ee48635708219e66bf03bad6f5e9100c766d2fb7..c7b9c52174c54c5bc1986e927e0be67bf1983625 100644 (file)
--- a/strmap.c
+++ b/strmap.c
@@ -2,10 +2,10 @@
 #include "strmap.h"
 #include "mem-pool.h"
 
-int cmp_strmap_entry(const void *hashmap_cmp_fn_data,
+int cmp_strmap_entry(const void *hashmap_cmp_fn_data UNUSED,
                     const struct hashmap_entry *entry1,
                     const struct hashmap_entry *entry2,
-                    const void *keydata)
+                    const void *keydata UNUSED)
 {
        const struct strmap_entry *e1, *e2;
 
index cae56ae6b8077547a35eda62bb3d250ca6d6510b..6d4232294dbee7ad2928b0ac12e1860dcf04a12d 100644 (file)
@@ -5,10 +5,10 @@
 #include "sigchain.h"
 #include "pkt-line.h"
 
-int cmd2process_cmp(const void *unused_cmp_data,
+int cmd2process_cmp(const void *cmp_data UNUSED,
                    const struct hashmap_entry *eptr,
                    const struct hashmap_entry *entry_or_key,
-                   const void *unused_keydata)
+                   const void *keydata UNUSED)
 {
        const struct subprocess_entry *e1, *e2;
 
index c2ac7e7bf39b2eb9c273a11fb457b0dbc7369aa6..cd7ee236a120bc99d91ec615e94a6cd4240937f0 100644 (file)
@@ -38,10 +38,10 @@ enum lookup_type {
        lookup_path
 };
 
-static int config_path_cmp(const void *unused_cmp_data,
+static int config_path_cmp(const void *cmp_data UNUSED,
                           const struct hashmap_entry *eptr,
                           const struct hashmap_entry *entry_or_key,
-                          const void *unused_keydata)
+                          const void *keydata UNUSED)
 {
        const struct submodule_entry *a, *b;
 
@@ -52,10 +52,10 @@ static int config_path_cmp(const void *unused_cmp_data,
               !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
 }
 
-static int config_name_cmp(const void *unused_cmp_data,
+static int config_name_cmp(const void *cmp_data UNUSED,
                           const struct hashmap_entry *eptr,
                           const struct hashmap_entry *entry_or_key,
-                          const void *unused_keydata)
+                          const void *keydata UNUSED)
 {
        const struct submodule_entry *a, *b;
 
index 4e299f578f96f17110114f441e6370085c452e40..bf7a2c79183e17eb3b6c4c8487938de6c01be565 100644 (file)
@@ -213,7 +213,8 @@ void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
 }
 
 /* Cheap function that only determines if we're interested in submodules at all */
-int git_default_submodule_config(const char *var, const char *value, void *cb)
+int git_default_submodule_config(const char *var, const char *value,
+                                void *cb UNUSED)
 {
        if (!strcmp(var, "submodule.recurse")) {
                int v = git_config_bool(var, value) ?
@@ -415,10 +416,9 @@ int parse_submodule_update_strategy(const char *value,
        return 0;
 }
 
-const char *submodule_strategy_to_string(const struct submodule_update_strategy *s)
+const char *submodule_update_type_to_string(enum submodule_update_type type)
 {
-       struct strbuf sb = STRBUF_INIT;
-       switch (s->type) {
+       switch (type) {
        case SM_UPDATE_CHECKOUT:
                return "checkout";
        case SM_UPDATE_MERGE:
@@ -428,12 +428,11 @@ const char *submodule_strategy_to_string(const struct submodule_update_strategy
        case SM_UPDATE_NONE:
                return "none";
        case SM_UPDATE_UNSPECIFIED:
-               return NULL;
        case SM_UPDATE_COMMAND:
-               strbuf_addf(&sb, "!%s", s->command);
-               return strbuf_detach(&sb, NULL);
+               BUG("init_submodule() should handle type %d", type);
+       default:
+               BUG("unexpected update strategy type: %d", type);
        }
-       return NULL;
 }
 
 void handle_ignore_submodules_arg(struct diff_options *diffopt,
@@ -940,8 +939,9 @@ static void free_submodules_data(struct string_list *submodules)
        string_list_clear(submodules, 1);
 }
 
-static int has_remote(const char *refname, const struct object_id *oid,
-                     int flags, void *cb_data)
+static int has_remote(const char *refname UNUSED,
+                     const struct object_id *oid UNUSED,
+                     int flags UNUSED, void *cb_data UNUSED)
 {
        return 1;
 }
@@ -1243,8 +1243,9 @@ int push_unpushed_submodules(struct repository *r,
        return ret;
 }
 
-static int append_oid_to_array(const char *ref, const struct object_id *oid,
-                              int flags, void *data)
+static int append_oid_to_array(const char *ref UNUSED,
+                              const struct object_id *oid,
+                              int flags UNUSED, void *data)
 {
        struct oid_array *array = data;
        oid_array_append(array, oid);
@@ -2374,7 +2375,7 @@ void absorb_git_dir_into_superproject(const char *path,
                cp.no_stdin = 1;
                strvec_pushl(&cp.args, "--super-prefix", sb.buf,
                             "submodule--helper",
-                            "absorb-git-dirs", NULL);
+                            "absorbgitdirs", NULL);
                prepare_submodule_repo_env(&cp.env);
                if (run_command(&cp))
                        die(_("could not recurse into submodule '%s'"), path);
@@ -2388,7 +2389,7 @@ int get_superproject_working_tree(struct strbuf *buf)
        struct child_process cp = CHILD_PROCESS_INIT;
        struct strbuf sb = STRBUF_INIT;
        struct strbuf one_up = STRBUF_INIT;
-       const char *cwd = xgetcwd();
+       char *cwd = xgetcwd();
        int ret = 0;
        const char *subpath;
        int code;
@@ -2451,6 +2452,7 @@ int get_superproject_working_tree(struct strbuf *buf)
                ret = 1;
                free(super_wt);
        }
+       free(cwd);
        strbuf_release(&sb);
 
        code = finish_command(&cp);
index bfaa9da1868af5e201db3a831b283924cf7c8a89..6a9fec6de1159f0389df2391c2d18e8d5fd7d297 100644 (file)
@@ -72,7 +72,7 @@ void die_path_inside_submodule(struct index_state *istate,
 enum submodule_update_type parse_submodule_update_type(const char *value);
 int parse_submodule_update_strategy(const char *value,
                                    struct submodule_update_strategy *dst);
-const char *submodule_strategy_to_string(const struct submodule_update_strategy *s);
+const char *submodule_update_type_to_string(enum submodule_update_type type);
 void handle_ignore_submodules_arg(struct diff_options *, const char *);
 void show_submodule_diff_summary(struct diff_options *o, const char *path,
                            struct object_id *one, struct object_id *two,
index 056ce55dcc92c923ce560ef61a0b2888d799c450..1c80c0c79a05d8de64b2ac0702112337d2db60f1 100644 (file)
@@ -35,7 +35,6 @@ TEST_RESULTS_DIRECTORY_SQ = $(subst ','\'',$(TEST_RESULTS_DIRECTORY))
 CHAINLINTTMP_SQ = $(subst ','\'',$(CHAINLINTTMP))
 
 T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
-TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
 THELPERS = $(sort $(filter-out $(T),$(wildcard *.sh)))
 TPERF = $(sort $(wildcard perf/p[0-9][0-9][0-9][0-9]-*.sh))
 CHAINLINTTESTS = $(sort $(patsubst chainlint/%.test,%,$(wildcard chainlint/*.test)))
@@ -63,7 +62,7 @@ pre-clean:
        $(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)'
 
 clean-except-prove-cache: clean-chainlint
-       $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)'
+       $(RM) -r 'trash directory'.*
        $(RM) -r valgrind/bin
 
 clean: clean-except-prove-cache
@@ -112,9 +111,6 @@ aggregate-results:
                echo "$$f"; \
        done | '$(SHELL_PATH_SQ)' ./aggregate-results.sh
 
-gitweb-test:
-       $(MAKE) $(TGITWEB)
-
 valgrind:
        $(MAKE) GIT_TEST_OPTS="$(GIT_TEST_OPTS) --valgrind"
 
index 4f9981cf5e3c29d255ed0d2a3f399cd992e961c7..2f439f96589f821b74e7f203c8627d8b5a807bdf 100644 (file)
--- a/t/README
+++ b/t/README
@@ -366,12 +366,47 @@ excluded as so much relies on it, but this might change in the future.
 GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole
 test suite. Accept any boolean values that are accepted by git-config.
 
-GIT_TEST_PASSING_SANITIZE_LEAK=<boolean> when compiled with
-SANITIZE=leak will run only those tests that have whitelisted
-themselves as passing with no memory leaks. Tests can be whitelisted
-by setting "TEST_PASSES_SANITIZE_LEAK=true" before sourcing
-"test-lib.sh" itself at the top of the test script. This test mode is
-used by the "linux-leaks" CI target.
+GIT_TEST_PASSING_SANITIZE_LEAK=true skips those tests that haven't
+declared themselves as leak-free by setting
+"TEST_PASSES_SANITIZE_LEAK=true" before sourcing "test-lib.sh". This
+test mode is used by the "linux-leaks" CI target.
+
+GIT_TEST_PASSING_SANITIZE_LEAK=check checks that our
+"TEST_PASSES_SANITIZE_LEAK=true" markings are current. Rather than
+skipping those tests that haven't set "TEST_PASSES_SANITIZE_LEAK=true"
+before sourcing "test-lib.sh" this mode runs them with
+"--invert-exit-code". This is used to check that there's a one-to-one
+mapping between "TEST_PASSES_SANITIZE_LEAK=true" and those tests that
+pass under "SANITIZE=leak". This is especially useful when testing a
+series that fixes various memory leaks with "git rebase -x".
+
+GIT_TEST_SANITIZE_LEAK_LOG=true will log memory leaks to
+"test-results/$TEST_NAME.leak/trace.*" files. The logs include a
+"dedup_token" (see +"ASAN_OPTIONS=help=1 ./git") and other options to
+make logs +machine-readable.
+
+With GIT_TEST_SANITIZE_LEAK_LOG=true we'll look at the leak logs
+before exiting and exit on failure if the logs showed that we had a
+memory leak, even if the test itself would have otherwise passed. This
+allows us to catch e.g. missing &&-chaining. This is especially useful
+when combined with "GIT_TEST_PASSING_SANITIZE_LEAK", see below.
+
+GIT_TEST_PASSING_SANITIZE_LEAK=check when combined with "--immediate"
+will run to completion faster, and result in the same failing
+tests. The only practical reason to run
+GIT_TEST_PASSING_SANITIZE_LEAK=check without "--immediate" is to
+combine it with "GIT_TEST_SANITIZE_LEAK_LOG=true". If we stop at the
+first failing test case our leak logs won't show subsequent leaks we
+might have run into.
+
+GIT_TEST_PASSING_SANITIZE_LEAK=(true|check) will not catch all memory
+leaks unless combined with GIT_TEST_SANITIZE_LEAK_LOG=true. Some tests
+run "git" (or "test-tool" etc.) without properly checking the exit
+code, or git will invoke itself and fail to ferry the abort() exit
+code to the original caller. When the two modes are combined we'll
+look at the "test-results/$TEST_NAME.leak/trace.*" files at the end of
+the test run to see if had memory leaks which the test itself didn't
+catch.
 
 GIT_TEST_PROTOCOL_VERSION=<n>, when set, makes 'protocol.version'
 default to n.
@@ -935,32 +970,6 @@ see test-lib-functions.sh for the full list and their options.
            test_done
        fi
 
- - test_external [<prereq>] <message> <external> <script>
-
-   Execute a <script> with an <external> interpreter (like perl). This
-   was added for tests like t9700-perl-git.sh which do most of their
-   work in an external test script.
-
-       test_external \
-           'GitwebCache::*FileCache*' \
-           perl "$TEST_DIRECTORY"/t9503/test_cache_interface.pl
-
-   If the test is outputting its own TAP you should set the
-   test_external_has_tap variable somewhere before calling the first
-   test_external* function. See t9700-perl-git.sh for an example.
-
-       # The external test will outputs its own plan
-       test_external_has_tap=1
-
- - test_external_without_stderr [<prereq>] <message> <external> <script>
-
-   Like test_external but fail if there's any output on stderr,
-   instead of checking the exit code.
-
-       test_external_without_stderr \
-           'Perl API' \
-           perl "$TEST_DIRECTORY"/t9700/test.pl
-
  - test_expect_code <exit-code> <command>
 
    Run a command and ensure that it exits with the given exit code.
index cc01d891504eb117eb9c367ebc4e89b516157012..f1b9a6ce4daee67c7aadfd2a9d19cb1c4033224c 100644 (file)
@@ -153,7 +153,7 @@ test_expect_success 'blame evil merge' '
 
 test_expect_success 'blame huge graft' '
        test_when_finished "git checkout branch2" &&
-       test_when_finished "rm -f .git/info/grafts" &&
+       test_when_finished "rm -rf .git/info" &&
        graft= &&
        for i in 0 1 2
        do
@@ -168,6 +168,7 @@ test_expect_success 'blame huge graft' '
                        graft="$graft$commit " || return 1
                done
        done &&
+       mkdir .git/info &&
        printf "%s " $graft >.git/info/grafts &&
        check_count -h 00 01 1 10 1
 '
index ad3ef1cd77ae1b98db3c46980d1439f1ca5f56e7..6c900ca668467dcdbc92c6544ad173565c3a5f08 100644 (file)
@@ -16,6 +16,7 @@ static void add_string_to_filter(const char *data, struct bloom_filter *filter)
                }
                printf("\n");
                add_key_to_filter(&key, filter, &settings);
+               clear_bloom_key(&key);
 }
 
 static void print_bloom_filter(struct bloom_filter *filter) {
@@ -80,6 +81,7 @@ int cmd__bloom(int argc, const char **argv)
                }
 
                print_bloom_filter(&filter);
+               free(filter.data);
        }
 
        if (!strcmp(argv[1], "get_filter_for_commit")) {
index a6e936721fe660bb110e3d691d61dad80fddc307..4ba9eb65606d42f70f345a1b18bcd575db31e4cf 100644 (file)
@@ -37,7 +37,7 @@
  *
  */
 
-static int iterate_cb(const char *var, const char *value, void *data)
+static int iterate_cb(const char *var, const char *value, void *data UNUSED)
 {
        static int nr;
 
index e7c0137a4779ff005d3b440e1874d570a2d5d797..e6c1b1e22bb36da604af3238341f6b7b14753796 100644 (file)
@@ -2,33 +2,34 @@
 #include "cache.h"
 
 /*
- * Usage: test-tool cron <file> [-l]
+ * Usage: test-tool crontab <file> -l|<input>
  *
  * If -l is specified, then write the contents of <file> to stdout.
- * Otherwise, write from stdin into <file>.
+ * Otherwise, copy the contents of <input> into <file>.
  */
 int cmd__crontab(int argc, const char **argv)
 {
        int a;
        FILE *from, *to;
 
-       if (argc == 3 && !strcmp(argv[2], "-l")) {
+       if (argc != 3)
+               usage("test-tool crontab <file> -l|<input>");
+
+       if (!strcmp(argv[2], "-l")) {
                from = fopen(argv[1], "r");
                if (!from)
                        return 0;
                to = stdout;
-       } else if (argc == 2) {
-               from = stdin;
-               to = fopen(argv[1], "w");
-       } else
-               return error("unknown arguments");
+       } else {
+               from = xfopen(argv[2], "r");
+               to = xfopen(argv[1], "w");
+       }
 
        while ((a = fgetc(from)) != EOF)
                fputc(a, to);
 
-       if (argc == 3)
-               fclose(from);
-       else
+       fclose(from);
+       if (to != stdout)
                fclose(to);
 
        return 0;
index e749a49c88e66e4b3ce388b2c0762d36d4090f99..b15481ea596dcdd2337a4dd9d99761ecb5514ea4 100644 (file)
@@ -20,8 +20,9 @@ int cmd__delta(int argc, const char **argv)
 {
        int fd;
        struct stat st;
-       void *from_buf, *data_buf, *out_buf;
+       void *from_buf = NULL, *data_buf = NULL, *out_buf = NULL;
        unsigned long from_size, data_size, out_size;
+       int ret = 1;
 
        if (argc != 5 || (strcmp(argv[1], "-d") && strcmp(argv[1], "-p"))) {
                fprintf(stderr, "usage: %s\n", usage_str);
@@ -38,21 +39,21 @@ int cmd__delta(int argc, const char **argv)
        if (read_in_full(fd, from_buf, from_size) < 0) {
                perror(argv[2]);
                close(fd);
-               return 1;
+               goto cleanup;
        }
        close(fd);
 
        fd = open(argv[3], O_RDONLY);
        if (fd < 0 || fstat(fd, &st)) {
                perror(argv[3]);
-               return 1;
+               goto cleanup;
        }
        data_size = st.st_size;
        data_buf = xmalloc(data_size);
        if (read_in_full(fd, data_buf, data_size) < 0) {
                perror(argv[3]);
                close(fd);
-               return 1;
+               goto cleanup;
        }
        close(fd);
 
@@ -66,14 +67,20 @@ int cmd__delta(int argc, const char **argv)
                                      &out_size);
        if (!out_buf) {
                fprintf(stderr, "delta operation failed (returned NULL)\n");
-               return 1;
+               goto cleanup;
        }
 
        fd = open (argv[4], O_WRONLY|O_CREAT|O_TRUNC, 0666);
        if (fd < 0 || write_in_full(fd, out_buf, out_size) < 0) {
                perror(argv[4]);
-               return 1;
+               goto cleanup;
        }
 
-       return 0;
+       ret = 0;
+cleanup:
+       free(from_buf);
+       free(data_buf);
+       free(out_buf);
+
+       return ret;
 }
index 6a3f88f5f5d4a8af09dca7c13da6132d66093ca2..0d6d7f1ecbf198af006b4d80b8027a196d5caab3 100644 (file)
@@ -59,11 +59,16 @@ int cmd__dump_cache_tree(int ac, const char **av)
 {
        struct index_state istate;
        struct cache_tree *another = cache_tree();
+       int ret;
+
        setup_git_directory();
        if (read_cache() < 0)
                die("unable to read index file");
        istate = the_index;
        istate.cache_tree = another;
        cache_tree_update(&istate, WRITE_TREE_DRY_RUN);
-       return dump_cache_tree(active_cache_tree, another, "");
+       ret = dump_cache_tree(active_cache_tree, another, "");
+       cache_tree_free(&another);
+
+       return ret;
 }
index 4e5553e20249debf36a76296f18599300ee91af5..45665ec19a5d6d4e2cf37c1a2a9f87d69ff3be02 100644 (file)
@@ -184,8 +184,6 @@ int cmd__fast_rebase(int argc, const char **argv)
                last_picked_commit = commit;
                last_commit = create_commit(result.tree, commit, last_commit);
        }
-       /* TODO: There should be some kind of rev_info_free(&revs) call... */
-       memset(&revs, 0, sizeof(revs));
 
        merge_switch_to_result(&merge_opt, head_tree, &result, 1, !result.clean);
 
index 261c545b9d199c6ba8c87eaaab7ce8da4f5cba69..5860dab0ffac976bc3e6d0ef2da2153566477018 100644 (file)
@@ -54,5 +54,6 @@ int cmd_hash_impl(int ac, const char **av, int algo)
                fwrite(hash, 1, algop->rawsz, stdout);
        else
                puts(hash_to_hex_algop(hash, algop));
+       free(buffer);
        return 0;
 }
index 37c452535f8bb2bcb3ee27461097ef0bf578ebac..8c3edacc0007df7ddb52f2f65d00e44b95edeb5a 100644 (file)
@@ -181,12 +181,18 @@ static struct json_writer nest1 = JSON_WRITER_INIT;
 
 static void make_nest1(int pretty)
 {
+       make_obj1(0);
+       make_arr1(0);
+
        jw_object_begin(&nest1, pretty);
        {
                jw_object_sub_jw(&nest1, "obj1", &obj1);
                jw_object_sub_jw(&nest1, "arr1", &arr1);
        }
        jw_end(&nest1);
+
+       jw_release(&obj1);
+       jw_release(&arr1);
 }
 
 static char *expect_inline1 =
@@ -313,6 +319,9 @@ static void make_mixed1(int pretty)
                jw_object_sub_jw(&mixed1, "arr1", &arr1);
        }
        jw_end(&mixed1);
+
+       jw_release(&obj1);
+       jw_release(&arr1);
 }
 
 static void cmp(const char *test, const struct json_writer *jw, const char *exp)
@@ -325,8 +334,8 @@ static void cmp(const char *test, const struct json_writer *jw, const char *exp)
        exit(1);
 }
 
-#define t(v) do { make_##v(0); cmp(#v, &v, expect_##v); } while (0)
-#define p(v) do { make_##v(1); cmp(#v, &v, pretty_##v); } while (0)
+#define t(v) do { make_##v(0); cmp(#v, &v, expect_##v); jw_release(&v); } while (0)
+#define p(v) do { make_##v(1); cmp(#v, &v, pretty_##v); jw_release(&v); } while (0)
 
 /*
  * Run some basic regression tests with some known patterns.
@@ -381,7 +390,6 @@ static int unit_tests(void)
 
        /* mixed forms */
        t(mixed1);
-       jw_init(&mixed1);
        p(mixed1);
 
        return 0;
@@ -544,7 +552,7 @@ static int scripted(void)
 
        printf("%s\n", jw.json.buf);
 
-       strbuf_release(&jw.json);
+       jw_release(&jw);
        return 0;
 }
 
index ebf68f7de82465d2761bceedd47715badbf76dbb..335e5bb3a9008c8ce1cb31198bd54808c70566dc 100644 (file)
@@ -13,41 +13,46 @@ struct line {
        struct line *next;
 };
 
-static void *get_next(const void *a)
-{
-       return ((const struct line *)a)->next;
-}
+DEFINE_LIST_SORT(static, sort_lines, struct line, next);
 
-static void set_next(void *a, void *b)
+static int compare_strings(const struct line *x, const struct line *y)
 {
-       ((struct line *)a)->next = b;
-}
-
-static int compare_strings(const void *a, const void *b)
-{
-       const struct line *x = a, *y = b;
        return strcmp(x->text, y->text);
 }
 
 static int sort_stdin(void)
 {
-       struct line *line, *p = NULL, *lines = NULL;
+       struct line *lines;
+       struct line **tail = &lines;
        struct strbuf sb = STRBUF_INIT;
-
-       while (!strbuf_getline(&sb, stdin)) {
-               line = xmalloc(sizeof(struct line));
-               line->text = strbuf_detach(&sb, NULL);
-               if (p) {
-                       line->next = p->next;
-                       p->next = line;
-               } else {
-                       line->next = NULL;
-                       lines = line;
-               }
-               p = line;
+       struct mem_pool lines_pool;
+       char *p;
+
+       strbuf_read(&sb, 0, 0);
+
+       /*
+        * Split by newline, but don't create an item
+        * for the empty string after the last separator.
+        */
+       if (sb.len && sb.buf[sb.len - 1] == '\n')
+               strbuf_setlen(&sb, sb.len - 1);
+
+       mem_pool_init(&lines_pool, 0);
+       p = sb.buf;
+       for (;;) {
+               char *eol = strchr(p, '\n');
+               struct line *line = mem_pool_alloc(&lines_pool, sizeof(*line));
+               line->text = p;
+               *tail = line;
+               tail = &line->next;
+               if (!eol)
+                       break;
+               *eol = '\0';
+               p = eol + 1;
        }
+       *tail = NULL;
 
-       lines = llist_mergesort(lines, get_next, set_next, compare_strings);
+       sort_lines(&lines, compare_strings);
 
        while (lines) {
                puts(lines->text);
@@ -273,21 +278,11 @@ struct number {
        struct number *next;
 };
 
-static void *get_next_number(const void *a)
-{
-       stats.get_next++;
-       return ((const struct number *)a)->next;
-}
-
-static void set_next_number(void *a, void *b)
-{
-       stats.set_next++;
-       ((struct number *)a)->next = b;
-}
+DEFINE_LIST_SORT_DEBUG(static, sort_numbers, struct number, next,
+                      stats.get_next++, stats.set_next++);
 
-static int compare_numbers(const void *av, const void *bv)
+static int compare_numbers(const struct number *an, const struct number *bn)
 {
-       const struct number *an = av, *bn = bv;
        int a = an->value, b = bn->value;
        stats.compare++;
        return (a > b) - (a < b);
@@ -325,8 +320,7 @@ static int test(const struct dist *dist, const struct mode *mode, int n, int m)
        *tail = NULL;
 
        stats.get_next = stats.set_next = stats.compare = 0;
-       list = llist_mergesort(list, get_next_number, set_next_number,
-                              compare_numbers);
+       sort_numbers(&list, compare_numbers);
 
        QSORT(arr, n, compare_ints);
        for (i = 0, curr = list; i < n && curr; i++, curr = curr->next) {
index 48d3cf6692da0612eff2f8c61051780ef944def6..506835521a463aebd1a06259cf96e05d71e3833b 100644 (file)
@@ -192,3 +192,131 @@ int cmd__parse_options(int argc, const char **argv)
 
        return ret;
 }
+
+static void print_args(int argc, const char **argv)
+{
+       int i;
+       for (i = 0; i < argc; i++)
+               printf("arg %02d: %s\n", i, argv[i]);
+}
+
+static int parse_options_flags__cmd(int argc, const char **argv,
+                                   enum parse_opt_flags test_flags)
+{
+       const char *usage[] = {
+               "<...> cmd [options]",
+               NULL
+       };
+       int opt = 0;
+       const struct option options[] = {
+               OPT_INTEGER('o', "opt", &opt, "an integer option"),
+               OPT_END()
+       };
+
+       argc = parse_options(argc, argv, NULL, options, usage, test_flags);
+
+       printf("opt: %d\n", opt);
+       print_args(argc, argv);
+
+       return 0;
+}
+
+static enum parse_opt_flags test_flags = 0;
+static const struct option test_flag_options[] = {
+       OPT_GROUP("flag-options:"),
+       OPT_BIT(0, "keep-dashdash", &test_flags,
+               "pass PARSE_OPT_KEEP_DASHDASH to parse_options()",
+               PARSE_OPT_KEEP_DASHDASH),
+       OPT_BIT(0, "stop-at-non-option", &test_flags,
+               "pass PARSE_OPT_STOP_AT_NON_OPTION to parse_options()",
+               PARSE_OPT_STOP_AT_NON_OPTION),
+       OPT_BIT(0, "keep-argv0", &test_flags,
+               "pass PARSE_OPT_KEEP_ARGV0 to parse_options()",
+               PARSE_OPT_KEEP_ARGV0),
+       OPT_BIT(0, "keep-unknown-opt", &test_flags,
+               "pass PARSE_OPT_KEEP_UNKNOWN_OPT to parse_options()",
+               PARSE_OPT_KEEP_UNKNOWN_OPT),
+       OPT_BIT(0, "no-internal-help", &test_flags,
+               "pass PARSE_OPT_NO_INTERNAL_HELP to parse_options()",
+               PARSE_OPT_NO_INTERNAL_HELP),
+       OPT_BIT(0, "subcommand-optional", &test_flags,
+               "pass PARSE_OPT_SUBCOMMAND_OPTIONAL to parse_options()",
+               PARSE_OPT_SUBCOMMAND_OPTIONAL),
+       OPT_END()
+};
+
+int cmd__parse_options_flags(int argc, const char **argv)
+{
+       const char *usage[] = {
+               "test-tool parse-options-flags [flag-options] cmd [options]",
+               NULL
+       };
+
+       argc = parse_options(argc, argv, NULL, test_flag_options, usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (!argc || strcmp(argv[0], "cmd")) {
+               error("'cmd' is mandatory");
+               usage_with_options(usage, test_flag_options);
+       }
+
+       return parse_options_flags__cmd(argc, argv, test_flags);
+}
+
+static int subcmd_one(int argc, const char **argv, const char *prefix)
+{
+       printf("fn: subcmd_one\n");
+       print_args(argc, argv);
+       return 0;
+}
+
+static int subcmd_two(int argc, const char **argv, const char *prefix)
+{
+       printf("fn: subcmd_two\n");
+       print_args(argc, argv);
+       return 0;
+}
+
+static int parse_subcommand__cmd(int argc, const char **argv,
+                                enum parse_opt_flags test_flags)
+{
+       const char *usage[] = {
+               "<...> cmd subcmd-one",
+               "<...> cmd subcmd-two",
+               NULL
+       };
+       parse_opt_subcommand_fn *fn = NULL;
+       int opt = 0;
+       struct option options[] = {
+               OPT_SUBCOMMAND("subcmd-one", &fn, subcmd_one),
+               OPT_SUBCOMMAND("subcmd-two", &fn, subcmd_two),
+               OPT_INTEGER('o', "opt", &opt, "an integer option"),
+               OPT_END()
+       };
+
+       if (test_flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)
+               fn = subcmd_one;
+       argc = parse_options(argc, argv, NULL, options, usage, test_flags);
+
+       printf("opt: %d\n", opt);
+
+       return fn(argc, argv, NULL);
+}
+
+int cmd__parse_subcommand(int argc, const char **argv)
+{
+       const char *usage[] = {
+               "test-tool parse-subcommand [flag-options] cmd <subcommand>",
+               NULL
+       };
+
+       argc = parse_options(argc, argv, NULL, test_flag_options, usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (!argc || strcmp(argv[0], "cmd")) {
+               error("'cmd' is mandatory");
+               usage_with_options(usage, test_flag_options);
+       }
+
+       return parse_subcommand__cmd(argc, argv, test_flags);
+}
index 229ed416b0e67867331bfd75fe376834766f7da5..d20e1b7a18d613a97b00e29fa491dc7e1654b21a 100644 (file)
@@ -296,9 +296,8 @@ int cmd__path_utils(int argc, const char **argv)
        if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
                char *buf = xmallocz(strlen(argv[2]));
                int rv = normalize_path_copy(buf, argv[2]);
-               if (rv)
-                       buf = "++failed++";
-               puts(buf);
+               puts(rv ? "++failed++" : buf);
+               free(buf);
                return 0;
        }
 
@@ -356,7 +355,10 @@ int cmd__path_utils(int argc, const char **argv)
                int nongit_ok;
                setup_git_directory_gently(&nongit_ok);
                while (argc > 3) {
-                       puts(prefix_path(prefix, prefix_len, argv[3]));
+                       char *pfx = prefix_path(prefix, prefix_len, argv[3]);
+
+                       puts(pfx);
+                       free(pfx);
                        argc--;
                        argv++;
                }
@@ -366,6 +368,7 @@ int cmd__path_utils(int argc, const char **argv)
        if (argc == 4 && !strcmp(argv[1], "strip_path_suffix")) {
                char *prefix = strip_path_suffix(argv[2], argv[3]);
                printf("%s\n", prefix ? prefix : "(null)");
+               free(prefix);
                return 0;
        }
 
index 9646d85fc84a9e52ff4c05e7614a9582c0fc3b7e..ae8a5648daf5c1385afe43f327bf9a5a3d98e1f1 100644 (file)
@@ -96,6 +96,7 @@ static const char **get_store(const char **argv, struct ref_store **refs)
                        die("no such worktree: %s", gitdir);
 
                *refs = get_worktree_ref_store(*p);
+               free_worktrees(worktrees);
        } else
                die("unknown backend %s", argv[0]);
 
@@ -160,7 +161,7 @@ static int cmd_rename_ref(struct ref_store *refs, const char **argv)
 }
 
 static int each_ref(const char *refname, const struct object_id *oid,
-                   int flags, void *cb_data)
+                   int flags, void *cb_data UNUSED)
 {
        printf("%s %s 0x%x\n", oid_to_hex(oid), refname, flags);
        return 0;
@@ -206,7 +207,7 @@ static int cmd_for_each_reflog(struct ref_store *refs, const char **argv)
 
 static int each_reflog(struct object_id *old_oid, struct object_id *new_oid,
                       const char *committer, timestamp_t timestamp,
-                      int tz, const char *msg, void *cb_data)
+                      int tz, const char *msg, void *cb_data UNUSED)
 {
        printf("%s %s %s %" PRItime " %+05d%s%s", oid_to_hex(old_oid),
               oid_to_hex(new_oid), committer, timestamp, tz,
index d6f28ca8d148d9c05fa2941b59054840da0b156f..bd871a735b4fbb19ed99e9bfab56f13d138a0567 100644 (file)
@@ -34,6 +34,7 @@ static int test_regex_bug(void)
        if (m[0].rm_so == 3) /* matches '\n' when it should not */
                die("regex bug confirmed: re-build git with NO_REGEX=1");
 
+       regfree(&r);
        return 0;
 }
 
@@ -94,18 +95,20 @@ int cmd__regex(int argc, const char **argv)
                die("failed regcomp() for pattern '%s' (%s)", pat, errbuf);
        }
        if (!str)
-               return 0;
+               goto cleanup;
 
        ret = regexec(&r, str, 1, m, 0);
        if (ret) {
                if (silent || ret == REG_NOMATCH)
-                       return ret;
+                       goto cleanup;
 
                regerror(ret, &r, errbuf, sizeof(errbuf));
                die("failed regexec() for subject '%s' (%s)", str, errbuf);
        }
 
-       return 0;
+cleanup:
+       regfree(&r);
+       return ret;
 usage:
        usage("\ttest-tool regex --bug\n"
              "\ttest-tool regex [--silent] <pattern>\n"
diff --git a/t/helper/test-rot13-filter.c b/t/helper/test-rot13-filter.c
new file mode 100644 (file)
index 0000000..f8d564c
--- /dev/null
@@ -0,0 +1,382 @@
+/*
+ * Example implementation for the Git filter protocol version 2
+ * See Documentation/gitattributes.txt, section "Filter Protocol"
+ *
+ * Usage: test-tool rot13-filter [--always-delay] --log=<path> <capabilities>
+ *
+ * Log path defines a debug log file that the script writes to. The
+ * subsequent arguments define a list of supported protocol capabilities
+ * ("clean", "smudge", etc).
+ *
+ * When --always-delay is given all pathnames with the "can-delay" flag
+ * that don't appear on the list bellow are delayed with a count of 1
+ * (see more below).
+ *
+ * This implementation supports special test cases:
+ * (1) If data with the pathname "clean-write-fail.r" is processed with
+ *     a "clean" operation then the write operation will die.
+ * (2) If data with the pathname "smudge-write-fail.r" is processed with
+ *     a "smudge" operation then the write operation will die.
+ * (3) If data with the pathname "error.r" is processed with any
+ *     operation then the filter signals that it cannot or does not want
+ *     to process the file.
+ * (4) If data with the pathname "abort.r" is processed with any
+ *     operation then the filter signals that it cannot or does not want
+ *     to process the file and any file after that is processed with the
+ *     same command.
+ * (5) If data with a pathname that is a key in the delay hash is
+ *     requested (e.g. "test-delay10.a") then the filter responds with
+ *     a "delay" status and sets the "requested" field in the delay hash.
+ *     The filter will signal the availability of this object after
+ *     "count" (field in delay hash) "list_available_blobs" commands.
+ * (6) If data with the pathname "missing-delay.a" is processed that the
+ *     filter will drop the path from the "list_available_blobs" response.
+ * (7) If data with the pathname "invalid-delay.a" is processed that the
+ *     filter will add the path "unfiltered" which was not delayed before
+ *     to the "list_available_blobs" response.
+ */
+
+#include "test-tool.h"
+#include "pkt-line.h"
+#include "string-list.h"
+#include "strmap.h"
+#include "parse-options.h"
+
+static FILE *logfile;
+static int always_delay, has_clean_cap, has_smudge_cap;
+static struct strmap delay = STRMAP_INIT;
+
+static inline const char *str_or_null(const char *str)
+{
+       return str ? str : "(null)";
+}
+
+static char *rot13(char *str)
+{
+       char *c;
+       for (c = str; *c; c++)
+               if (isalpha(*c))
+                       *c += tolower(*c) < 'n' ? 13 : -13;
+       return str;
+}
+
+static char *get_value(char *buf, const char *key)
+{
+       const char *orig_buf = buf;
+       if (!buf ||
+           !skip_prefix((const char *)buf, key, (const char **)&buf) ||
+           !skip_prefix((const char *)buf, "=", (const char **)&buf) ||
+           !*buf)
+               die("expected key '%s', got '%s'", key, str_or_null(orig_buf));
+       return buf;
+}
+
+/*
+ * Read a text packet, expecting that it is in the form "key=value" for
+ * the given key. An EOF does not trigger any error and is reported
+ * back to the caller with NULL. Die if the "key" part of "key=value" does
+ * not match the given key, or the value part is empty.
+ */
+static char *packet_key_val_read(const char *key)
+{
+       char *buf;
+       if (packet_read_line_gently(0, NULL, &buf) < 0)
+               return NULL;
+       return xstrdup(get_value(buf, key));
+}
+
+static inline void assert_remote_capability(struct strset *caps, const char *cap)
+{
+       if (!strset_contains(caps, cap))
+               die("required '%s' capability not available from remote", cap);
+}
+
+static void read_capabilities(struct strset *remote_caps)
+{
+       for (;;) {
+               char *buf = packet_read_line(0, NULL);
+               if (!buf)
+                       break;
+               strset_add(remote_caps, get_value(buf, "capability"));
+       }
+
+       assert_remote_capability(remote_caps, "clean");
+       assert_remote_capability(remote_caps, "smudge");
+       assert_remote_capability(remote_caps, "delay");
+}
+
+static void check_and_write_capabilities(struct strset *remote_caps,
+                                        const char **caps, int nr_caps)
+{
+       int i;
+       for (i = 0; i < nr_caps; i++) {
+               if (!strset_contains(remote_caps, caps[i]))
+                       die("our capability '%s' is not available from remote",
+                           caps[i]);
+               packet_write_fmt(1, "capability=%s\n", caps[i]);
+       }
+       packet_flush(1);
+}
+
+struct delay_entry {
+       int requested, count;
+       char *output;
+};
+
+static void free_delay_entries(void)
+{
+       struct hashmap_iter iter;
+       struct strmap_entry *ent;
+
+       strmap_for_each_entry(&delay, &iter, ent) {
+               struct delay_entry *delay_entry = ent->value;
+               free(delay_entry->output);
+               free(delay_entry);
+       }
+       strmap_clear(&delay, 0);
+}
+
+static void add_delay_entry(char *pathname, int count, int requested)
+{
+       struct delay_entry *entry = xcalloc(1, sizeof(*entry));
+       entry->count = count;
+       entry->requested = requested;
+       if (strmap_put(&delay, pathname, entry))
+               BUG("adding the same path twice to delay hash?");
+}
+
+static void reply_list_available_blobs_cmd(void)
+{
+       struct hashmap_iter iter;
+       struct strmap_entry *ent;
+       struct string_list_item *str_item;
+       struct string_list paths = STRING_LIST_INIT_NODUP;
+
+       /* flush */
+       if (packet_read_line(0, NULL))
+               die("bad list_available_blobs end");
+
+       strmap_for_each_entry(&delay, &iter, ent) {
+               struct delay_entry *delay_entry = ent->value;
+               if (!delay_entry->requested)
+                       continue;
+               delay_entry->count--;
+               if (!strcmp(ent->key, "invalid-delay.a")) {
+                       /* Send Git a pathname that was not delayed earlier */
+                       packet_write_fmt(1, "pathname=unfiltered");
+               }
+               if (!strcmp(ent->key, "missing-delay.a")) {
+                       /* Do not signal Git that this file is available */
+               } else if (!delay_entry->count) {
+                       string_list_append(&paths, ent->key);
+                       packet_write_fmt(1, "pathname=%s", ent->key);
+               }
+       }
+
+       /* Print paths in sorted order. */
+       string_list_sort(&paths);
+       for_each_string_list_item(str_item, &paths)
+               fprintf(logfile, " %s", str_item->string);
+       string_list_clear(&paths, 0);
+
+       packet_flush(1);
+
+       fprintf(logfile, " [OK]\n");
+       packet_write_fmt(1, "status=success");
+       packet_flush(1);
+}
+
+static void command_loop(void)
+{
+       for (;;) {
+               char *buf, *output;
+               char *pathname;
+               struct delay_entry *entry;
+               struct strbuf input = STRBUF_INIT;
+               char *command = packet_key_val_read("command");
+
+               if (!command) {
+                       fprintf(logfile, "STOP\n");
+                       break;
+               }
+               fprintf(logfile, "IN: %s", command);
+
+               if (!strcmp(command, "list_available_blobs")) {
+                       reply_list_available_blobs_cmd();
+                       free(command);
+                       continue;
+               }
+
+               pathname = packet_key_val_read("pathname");
+               if (!pathname)
+                       die("unexpected EOF while expecting pathname");
+               fprintf(logfile, " %s", pathname);
+
+               /* Read until flush */
+               while ((buf = packet_read_line(0, NULL))) {
+                       if (!strcmp(buf, "can-delay=1")) {
+                               entry = strmap_get(&delay, pathname);
+                               if (entry && !entry->requested)
+                                       entry->requested = 1;
+                               else if (!entry && always_delay)
+                                       add_delay_entry(pathname, 1, 1);
+                       } else if (starts_with(buf, "ref=") ||
+                                  starts_with(buf, "treeish=") ||
+                                  starts_with(buf, "blob=")) {
+                               fprintf(logfile, " %s", buf);
+                       } else {
+                               /*
+                                * In general, filters need to be graceful about
+                                * new metadata, since it's documented that we
+                                * can pass any key-value pairs, but for tests,
+                                * let's be a little stricter.
+                                */
+                               die("Unknown message '%s'", buf);
+                       }
+               }
+
+               read_packetized_to_strbuf(0, &input, 0);
+               fprintf(logfile, " %"PRIuMAX" [OK] -- ", (uintmax_t)input.len);
+
+               entry = strmap_get(&delay, pathname);
+               if (entry && entry->output) {
+                       output = entry->output;
+               } else if (!strcmp(pathname, "error.r") || !strcmp(pathname, "abort.r")) {
+                       output = "";
+               } else if (!strcmp(command, "clean") && has_clean_cap) {
+                       output = rot13(input.buf);
+               } else if (!strcmp(command, "smudge") && has_smudge_cap) {
+                       output = rot13(input.buf);
+               } else {
+                       die("bad command '%s'", command);
+               }
+
+               if (!strcmp(pathname, "error.r")) {
+                       fprintf(logfile, "[ERROR]\n");
+                       packet_write_fmt(1, "status=error");
+                       packet_flush(1);
+               } else if (!strcmp(pathname, "abort.r")) {
+                       fprintf(logfile, "[ABORT]\n");
+                       packet_write_fmt(1, "status=abort");
+                       packet_flush(1);
+               } else if (!strcmp(command, "smudge") &&
+                          (entry = strmap_get(&delay, pathname)) &&
+                          entry->requested == 1) {
+                       fprintf(logfile, "[DELAYED]\n");
+                       packet_write_fmt(1, "status=delayed");
+                       packet_flush(1);
+                       entry->requested = 2;
+                       if (entry->output != output) {
+                               free(entry->output);
+                               entry->output = xstrdup(output);
+                       }
+               } else {
+                       int i, nr_packets = 0;
+                       size_t output_len;
+                       const char *p;
+                       packet_write_fmt(1, "status=success");
+                       packet_flush(1);
+
+                       if (skip_prefix(pathname, command, &p) &&
+                           !strcmp(p, "-write-fail.r")) {
+                               fprintf(logfile, "[WRITE FAIL]\n");
+                               die("%s write error", command);
+                       }
+
+                       output_len = strlen(output);
+                       fprintf(logfile, "OUT: %"PRIuMAX" ", (uintmax_t)output_len);
+
+                       if (write_packetized_from_buf_no_flush_count(output,
+                               output_len, 1, &nr_packets))
+                               die("failed to write buffer to stdout");
+                       packet_flush(1);
+
+                       for (i = 0; i < nr_packets; i++)
+                               fprintf(logfile, ".");
+                       fprintf(logfile, " [OK]\n");
+
+                       packet_flush(1);
+               }
+               free(pathname);
+               strbuf_release(&input);
+               free(command);
+       }
+}
+
+static void packet_initialize(void)
+{
+       char *pkt_buf = packet_read_line(0, NULL);
+
+       if (!pkt_buf || strcmp(pkt_buf, "git-filter-client"))
+               die("bad initialize: '%s'", str_or_null(pkt_buf));
+
+       pkt_buf = packet_read_line(0, NULL);
+       if (!pkt_buf || strcmp(pkt_buf, "version=2"))
+               die("bad version: '%s'", str_or_null(pkt_buf));
+
+       pkt_buf = packet_read_line(0, NULL);
+       if (pkt_buf)
+               die("bad version end: '%s'", pkt_buf);
+
+       packet_write_fmt(1, "git-filter-server");
+       packet_write_fmt(1, "version=2");
+       packet_flush(1);
+}
+
+static const char *rot13_usage[] = {
+       "test-tool rot13-filter [--always-delay] --log=<path> <capabilities>",
+       NULL
+};
+
+int cmd__rot13_filter(int argc, const char **argv)
+{
+       int i, nr_caps;
+       struct strset remote_caps = STRSET_INIT;
+       const char *log_path = NULL;
+
+       struct option options[] = {
+               OPT_BOOL(0, "always-delay", &always_delay,
+                        "delay all paths with the can-delay flag"),
+               OPT_STRING(0, "log", &log_path, "path",
+                          "path to the debug log file"),
+               OPT_END()
+       };
+       nr_caps = parse_options(argc, argv, NULL, options, rot13_usage,
+                               PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (!log_path || !nr_caps)
+               usage_with_options(rot13_usage, options);
+
+       logfile = fopen(log_path, "a");
+       if (!logfile)
+               die_errno("failed to open log file");
+
+       for (i = 0; i < nr_caps; i++) {
+               if (!strcmp(argv[i], "smudge"))
+                       has_smudge_cap = 1;
+               if (!strcmp(argv[i], "clean"))
+                       has_clean_cap = 1;
+       }
+
+       add_delay_entry("test-delay10.a", 1, 0);
+       add_delay_entry("test-delay11.a", 1, 0);
+       add_delay_entry("test-delay20.a", 2, 0);
+       add_delay_entry("test-delay10.b", 1, 0);
+       add_delay_entry("missing-delay.a", 1, 0);
+       add_delay_entry("invalid-delay.a", 1, 0);
+
+       fprintf(logfile, "START\n");
+       packet_initialize();
+
+       read_capabilities(&remote_caps);
+       check_and_write_capabilities(&remote_caps, argv, nr_caps);
+       fprintf(logfile, "init handshake complete\n");
+       strset_clear(&remote_caps);
+
+       command_loop();
+
+       if (fclose(logfile))
+               die_errno("error closing logfile");
+       free_delay_entries();
+       return 0;
+}
index 393f1604ff954703a2a5e075fbda1e14d0827dc0..026c802479d012b30bc625c820f5d0fcc25e997c 100644 (file)
@@ -12,6 +12,7 @@ int cmd__scrap_cache_tree(int ac, const char **av)
        hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR);
        if (read_cache() < 0)
                die("unable to read index file");
+       cache_tree_free(&active_cache_tree);
        active_cache_tree = NULL;
        if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
                die("unable to write index file");
index 28e905afc36afdc3202ec68ac915b1ddc6a08781..824e5c0a95819f8d11393a74da4b231728bb0615 100644 (file)
@@ -24,7 +24,7 @@ int cmd__serve_v2(int argc, const char **argv)
        /* ignore all unknown cmdline switches for now */
        argc = parse_options(argc, argv, prefix, options, serve_usage,
                             PARSE_OPT_KEEP_DASHDASH |
-                            PARSE_OPT_KEEP_UNKNOWN);
+                            PARSE_OPT_KEEP_UNKNOWN_OPT);
 
        if (advertise_capabilities)
                protocol_v2_advertise_capabilities();
index e2692746dfdb0e6a5c3b1c748124059bcbdd5fae..22a41c409263c077843598e4cc51881a52b0ce30 100644 (file)
@@ -15,14 +15,11 @@ int cmd__submodule_config(int argc, const char **argv)
 {
        const char **arg = argv;
        int my_argc = argc;
-       int output_url = 0;
        int lookup_name = 0;
 
        arg++;
        my_argc--;
        while (arg[0] && starts_with(arg[0], "--")) {
-               if (!strcmp(arg[0], "--url"))
-                       output_url = 1;
                if (!strcmp(arg[0], "--name"))
                        lookup_name = 1;
                arg++;
@@ -57,12 +54,8 @@ int cmd__submodule_config(int argc, const char **argv)
                if (!submodule)
                        die_usage(argc, argv, "Submodule not found.");
 
-               if (output_url)
-                       printf("Submodule url: '%s' for path '%s'\n",
-                                       submodule->url, submodule->path);
-               else
-                       printf("Submodule name: '%s' for path '%s'\n",
-                                       submodule->name, submodule->path);
+               printf("Submodule name: '%s' for path '%s'\n", submodule->name,
+                      submodule->path);
 
                arg += 2;
        }
diff --git a/t/helper/test-submodule.c b/t/helper/test-submodule.c
new file mode 100644 (file)
index 0000000..e0e0c53
--- /dev/null
@@ -0,0 +1,146 @@
+#include "test-tool.h"
+#include "test-tool-utils.h"
+#include "cache.h"
+#include "parse-options.h"
+#include "remote.h"
+#include "submodule-config.h"
+#include "submodule.h"
+
+#define TEST_TOOL_CHECK_NAME_USAGE \
+       "test-tool submodule check-name <name>"
+static const char *submodule_check_name_usage[] = {
+       TEST_TOOL_CHECK_NAME_USAGE,
+       NULL
+};
+
+#define TEST_TOOL_IS_ACTIVE_USAGE \
+       "test-tool submodule is-active <name>"
+static const char *submodule_is_active_usage[] = {
+       TEST_TOOL_IS_ACTIVE_USAGE,
+       NULL
+};
+
+#define TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE \
+       "test-tool submodule resolve-relative-url <up_path> <remoteurl> <url>"
+static const char *submodule_resolve_relative_url_usage[] = {
+       TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE,
+       NULL,
+};
+
+static const char *submodule_usage[] = {
+       TEST_TOOL_CHECK_NAME_USAGE,
+       TEST_TOOL_IS_ACTIVE_USAGE,
+       TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE,
+       NULL
+};
+
+/*
+ * Exit non-zero if any of the submodule names given on the command line is
+ * invalid. If no names are given, filter stdin to print only valid names
+ * (which is primarily intended for testing).
+ */
+static int check_name(int argc, const char **argv)
+{
+       if (argc > 1) {
+               while (*++argv) {
+                       if (check_submodule_name(*argv) < 0)
+                               return 1;
+               }
+       } else {
+               struct strbuf buf = STRBUF_INIT;
+               while (strbuf_getline(&buf, stdin) != EOF) {
+                       if (!check_submodule_name(buf.buf))
+                               printf("%s\n", buf.buf);
+               }
+               strbuf_release(&buf);
+       }
+       return 0;
+}
+
+static int cmd__submodule_check_name(int argc, const char **argv)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       argc = parse_options(argc, argv, "test-tools", options,
+                            submodule_check_name_usage, 0);
+       if (argc)
+               usage_with_options(submodule_check_name_usage, options);
+
+       return check_name(argc, argv);
+}
+
+static int cmd__submodule_is_active(int argc, const char **argv)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       argc = parse_options(argc, argv, "test-tools", options,
+                            submodule_is_active_usage, 0);
+       if (argc != 1)
+               usage_with_options(submodule_is_active_usage, options);
+
+       setup_git_directory();
+
+       return !is_submodule_active(the_repository, argv[0]);
+}
+
+static int resolve_relative_url(int argc, const char **argv)
+{
+       char *remoteurl, *res;
+       const char *up_path, *url;
+
+       up_path = argv[0];
+       remoteurl = xstrdup(argv[1]);
+       url = argv[2];
+
+       if (!strcmp(up_path, "(null)"))
+               up_path = NULL;
+
+       res = relative_url(remoteurl, url, up_path);
+       puts(res);
+       free(res);
+       free(remoteurl);
+       return 0;
+}
+
+static int cmd__submodule_resolve_relative_url(int argc, const char **argv)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       argc = parse_options(argc, argv, "test-tools", options,
+                            submodule_resolve_relative_url_usage, 0);
+       if (argc != 3)
+               usage_with_options(submodule_resolve_relative_url_usage, options);
+
+       return resolve_relative_url(argc, argv);
+}
+
+static struct test_cmd cmds[] = {
+       { "check-name", cmd__submodule_check_name },
+       { "is-active", cmd__submodule_is_active },
+       { "resolve-relative-url", cmd__submodule_resolve_relative_url},
+};
+
+int cmd__submodule(int argc, const char **argv)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       size_t i;
+
+       argc = parse_options(argc, argv, "test-tools", options, submodule_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+       if (argc < 1)
+               usage_with_options(submodule_usage, options);
+
+       for (i = 0; i < ARRAY_SIZE(cmds); i++)
+               if (!strcmp(cmds[i].name, argv[0]))
+                       return cmds[i].fn(argc, argv);
+
+       usage_msg_optf("unknown subcommand '%s'", submodule_usage, options,
+                      argv[0]);
+
+       return 0;
+}
diff --git a/t/helper/test-tool-utils.h b/t/helper/test-tool-utils.h
new file mode 100644 (file)
index 0000000..6a0e5e0
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef TEST_TOOL_UTILS_H
+#define TEST_TOOL_UTILS_H
+
+struct test_cmd {
+       const char *name;
+       int (*fn)(int argc, const char **argv);
+};
+
+#endif
index 318fdbab0c315cd0823b76de2864d9689dc5c377..d1d013bcd920b197163b7c780f0131b0a1356541 100644 (file)
@@ -1,5 +1,6 @@
 #include "git-compat-util.h"
 #include "test-tool.h"
+#include "test-tool-utils.h"
 #include "trace2.h"
 #include "parse-options.h"
 
@@ -8,11 +9,6 @@ static const char * const test_tool_usage[] = {
        NULL
 };
 
-struct test_cmd {
-       const char *name;
-       int (*fn)(int argc, const char **argv);
-};
-
 static struct test_cmd cmds[] = {
        { "advise", cmd__advise_if_enabled },
        { "bitmap", cmd__bitmap },
@@ -51,7 +47,9 @@ static struct test_cmd cmds[] = {
        { "online-cpus", cmd__online_cpus },
        { "pack-mtimes", cmd__pack_mtimes },
        { "parse-options", cmd__parse_options },
+       { "parse-options-flags", cmd__parse_options_flags },
        { "parse-pathspec-file", cmd__parse_pathspec_file },
+       { "parse-subcommand", cmd__parse_subcommand },
        { "partial-clone", cmd__partial_clone },
        { "path-utils", cmd__path_utils },
        { "pcre2-config", cmd__pcre2_config },
@@ -65,6 +63,7 @@ static struct test_cmd cmds[] = {
        { "read-midx", cmd__read_midx },
        { "ref-store", cmd__ref_store },
        { "reftable", cmd__reftable },
+       { "rot13-filter", cmd__rot13_filter },
        { "dump-reftable", cmd__dump_reftable },
        { "regex", cmd__regex },
        { "repository", cmd__repository },
@@ -78,6 +77,7 @@ static struct test_cmd cmds[] = {
        { "simple-ipc", cmd__simple_ipc },
        { "strcmp-offset", cmd__strcmp_offset },
        { "string-list", cmd__string_list },
+       { "submodule", cmd__submodule },
        { "submodule-config", cmd__submodule_config },
        { "submodule-nested-repo-config", cmd__submodule_nested_repo_config },
        { "subprocess", cmd__subprocess },
index bb79927163196cd4815d32fb45ee82a6e1f0bdfc..6b46b6444b657c3debdc286c50e81db5ba495f8b 100644 (file)
@@ -41,7 +41,9 @@ int cmd__oidtree(int argc, const char **argv);
 int cmd__online_cpus(int argc, const char **argv);
 int cmd__pack_mtimes(int argc, const char **argv);
 int cmd__parse_options(int argc, const char **argv);
+int cmd__parse_options_flags(int argc, const char **argv);
 int cmd__parse_pathspec_file(int argc, const char** argv);
+int cmd__parse_subcommand(int argc, const char **argv);
 int cmd__partial_clone(int argc, const char **argv);
 int cmd__path_utils(int argc, const char **argv);
 int cmd__pcre2_config(int argc, const char **argv);
@@ -54,6 +56,7 @@ int cmd__read_cache(int argc, const char **argv);
 int cmd__read_graph(int argc, const char **argv);
 int cmd__read_midx(int argc, const char **argv);
 int cmd__ref_store(int argc, const char **argv);
+int cmd__rot13_filter(int argc, const char **argv);
 int cmd__reftable(int argc, const char **argv);
 int cmd__regex(int argc, const char **argv);
 int cmd__repository(int argc, const char **argv);
@@ -68,6 +71,7 @@ int cmd__sigchain(int argc, const char **argv);
 int cmd__simple_ipc(int argc, const char **argv);
 int cmd__strcmp_offset(int argc, const char **argv);
 int cmd__string_list(int argc, const char **argv);
+int cmd__submodule(int argc, const char **argv);
 int cmd__submodule_config(int argc, const char **argv);
 int cmd__submodule_nested_repo_config(int argc, const char **argv);
 int cmd__subprocess(int argc, const char **argv);
index 8f4d67e646953c5b094b55ec8112f6ba3b9b9024..86edd454f5c9e86f516f564868d94b1859b524f7 100644 (file)
@@ -5,8 +5,9 @@
 int cmd__urlmatch_normalization(int argc, const char **argv)
 {
        const char usage[] = "test-tool urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
-       char *url1, *url2;
+       char *url1 = NULL, *url2 = NULL;
        int opt_p = 0, opt_l = 0;
+       int ret = 0;
 
        /*
         * For one url, succeed if url_normalize succeeds on it, fail otherwise.
@@ -39,7 +40,7 @@ int cmd__urlmatch_normalization(int argc, const char **argv)
                        printf("%s\n", url1);
                if (opt_l)
                        printf("%u\n", (unsigned)info.url_len);
-               return 0;
+               goto cleanup;
        }
 
        if (opt_p || opt_l)
@@ -47,5 +48,9 @@ int cmd__urlmatch_normalization(int argc, const char **argv)
 
        url1 = url_normalize(argv[1], NULL);
        url2 = url_normalize(argv[2], NULL);
-       return (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1;
+       ret = (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1;
+cleanup:
+       free(url1);
+       free(url2);
+       return ret;
 }
index f013f8a31e5294d879f1e872c7f37626fa583ed6..a2b56b9cae5e3736220afcc30919d1c8bca0b69a 100644 (file)
@@ -12,7 +12,7 @@ static int driver_cb(struct userdiff_driver *driver,
        return 0;
 }
 
-static int cmd__userdiff_config(const char *var, const char *value, void *cb)
+static int cmd__userdiff_config(const char *var, const char *value, void *cb UNUSED)
 {
        if (userdiff_config(var, value) < 0)
                return -1;
index a95537e759b0365db3f80ccf9135838da0cbde5e..f5959370941e33ee4ca9cfe3daa09df8a17858a8 100644 (file)
@@ -440,7 +440,7 @@ midx_bitmap_partial_tests () {
                test_commit packed &&
                git repack &&
                test_commit loose &&
-               git multi-pack-index write --bitmap 2>err &&
+               git multi-pack-index write --bitmap &&
                test_path_is_file $midx &&
                test_path_is_file $midx-$(midx_checksum $objdir).bitmap
        '
diff --git a/t/lib-perl.sh b/t/lib-perl.sh
new file mode 100644 (file)
index 0000000..d0bf509
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (c) 2022 Ã†var Arnfjörð Bjarmason
+
+test_lazy_prereq PERL_TEST_MORE '
+       perl -MTest::More -e 0
+'
+
+skip_all_if_no_Test_More () {
+       if ! test_have_prereq PERL
+       then
+               skip_all='skipping perl interface tests, perl not available'
+               test_done
+       fi
+
+       if ! test_have_prereq PERL_TEST_MORE
+       then
+               skip_all="Perl Test::More unavailable, skipping test"
+               test_done
+       fi
+}
index ec6b9b107da4eaab70e221b07c25c9f949f09b14..b57541356bd03d139334a144339b01a92be2c70a 100644 (file)
@@ -207,3 +207,18 @@ check_reworded_commits () {
                >reword-log &&
        test_cmp reword-expected reword-log
 }
+
+# usage: set_replace_editor <file>
+#
+# Replace the todo file with the exact contents of the given file.
+set_replace_editor () {
+       cat >script <<-\EOF &&
+       cat FILENAME >"$1"
+
+       echo 'rebase -i script after editing:'
+       cat "$1"
+       EOF
+
+       sed -e "s/FILENAME/$1/g" <script | write_script fake-editor.sh &&
+       test_set_editor "$(pwd)/fake-editor.sh"
+}
index f7c7df0ca427b396ed757dd7e323d9eb03a366ff..03e0abbdb83f260f3c688734d5e0d3914c460581 100644 (file)
@@ -207,7 +207,7 @@ prolog () {
 # should be updated to an existing commit.
 reset_work_tree_to () {
        rm -rf submodule_update &&
-       git clone submodule_update_repo submodule_update &&
+       git clone --template= submodule_update_repo submodule_update &&
        (
                cd submodule_update &&
                rm -rf sub1 &&
@@ -902,13 +902,14 @@ test_submodule_switch_recursing_with_args () {
        '
        # ... but an ignored file is fine.
        test_expect_$RESULTOI "$command: added submodule removes an untracked ignored file" '
-               test_when_finished "rm submodule_update/.git/info/exclude" &&
+               test_when_finished "rm -rf submodule_update/.git/info" &&
                prolog &&
                reset_work_tree_to_interested no_submodule &&
                (
                        cd submodule_update &&
                        git branch -t add_sub1 origin/add_sub1 &&
                        : >sub1 &&
+                       mkdir .git/info &&
                        echo sub1 >.git/info/exclude &&
                        $command add_sub1 &&
                        test_superproject_content origin/add_sub1 &&
@@ -951,7 +952,9 @@ test_submodule_switch_recursing_with_args () {
                reset_work_tree_to_interested add_sub1 &&
                (
                        cd submodule_update &&
+                       rm -rf .git/modules/sub1/info &&
                        git branch -t replace_sub1_with_file origin/replace_sub1_with_file &&
+                       mkdir .git/modules/sub1/info &&
                        echo ignored >.git/modules/sub1/info/exclude &&
                        : >sub1/ignored &&
                        $command replace_sub1_with_file &&
index 63d3bc7cece59937424b3aed6288eca553234665..55a8feb1dc4f964f1630a0af09e575fa76b870fe 100644 (file)
@@ -67,3 +67,34 @@ test_partial_bitmap () {
                        --filter=tree:0 >/dev/null
        '
 }
+
+test_pack_bitmap () {
+       test_perf "repack to disk" '
+               git repack -ad
+       '
+
+       test_full_bitmap
+
+       test_expect_success "create partial bitmap state" '
+               # pick a commit to represent the repo tip in the past
+               cutoff=$(git rev-list HEAD~100 -1) &&
+               orig_tip=$(git rev-parse HEAD) &&
+
+               # now kill off all of the refs and pretend we had
+               # just the one tip
+               rm -rf .git/logs .git/refs/* .git/packed-refs &&
+               git update-ref HEAD $cutoff &&
+
+               # and then repack, which will leave us with a nice
+               # big bitmap pack of the "old" history, and all of
+               # the new history will be loose, as if it had been pushed
+               # up incrementally and exploded via unpack-objects
+               git repack -Ad &&
+
+               # and now restore our original tip, as if the pushes
+               # had happened
+               git update-ref HEAD $orig_tip
+       '
+
+       test_partial_bitmap
+}
index 1afc08fe7f1990cdf7143f0dfd3cfaa66628ca73..85be14e4ddb2a00516e84a8ff4c62796f3e354b4 100755 (executable)
@@ -49,7 +49,7 @@ test_perf "single-threaded, $desc" "
        test-tool lazy-init-name-hash --single --count=$count
 "
 
-test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" "
+test_perf "multi-threaded, $desc" --prereq REPO_BIG_ENOUGH_FOR_MULTI "
        test-tool lazy-init-name-hash --multi --count=$count
 "
 
index 900b385c4bbc2d0bf9a803f06b48fe1c25909e86..c481c012d2fc17a7944f308c2d6ef68acb284dee 100755 (executable)
@@ -46,7 +46,7 @@ test_expect_success "setup repo" '
 '
 
 test_perf "read-tree br_base br_ballast ($nr_files)" '
-       git read-tree -m br_base br_ballast -n
+       git read-tree -n -m br_base br_ballast
 '
 
 test_perf "switch between br_base br_ballast ($nr_files)" '
index ed366e2e1295254d176941a60e1fa5128d24f02e..ae4ddac8640c1aa8dd8e2ddd9babe01cb8cdbcf7 100755 (executable)
@@ -40,11 +40,11 @@ done
 
 for file in unsorted sorted reversed
 do
-       test_perf "llist_mergesort() $file" "
+       test_perf "DEFINE_LIST_SORT $file" "
                test-tool mergesort sort <$file >actual
        "
 
-       test_expect_success "llist_mergesort() $file sorts like sort(1)" "
+       test_expect_success "DEFINE_LIST_SORT $file sorts like sort(1)" "
                test_cmp_bin sorted actual
        "
 done
index c181110a43931fb2a8131270a4d8bdcb0a2e7a4b..fce8151d41cbbd9eadd098b26c864d61b5b74bcc 100755 (executable)
@@ -123,5 +123,6 @@ test_perf_on_all git blame $SPARSE_CONE/f3/a
 test_perf_on_all git read-tree -mu HEAD
 test_perf_on_all git checkout-index -f --all
 test_perf_on_all git update-index --add --remove $SPARSE_CONE/a
+test_perf_on_all "git rm -f $SPARSE_CONE/a && git checkout HEAD -- $SPARSE_CONE/a"
 
 test_done
index 7ad4f237bc37ff0547bcd036cbbfc160cf723a4f..b1399f1007e6dd8044bf0fc8945e7c5cfaa91673 100755 (executable)
@@ -4,51 +4,37 @@ test_description='Tests pack performance using bitmaps'
 . ./perf-lib.sh
 . "${TEST_DIRECTORY}/perf/lib-bitmap.sh"
 
-test_perf_large_repo
-
-# note that we do everything through config,
-# since we want to be able to compare bitmap-aware
-# git versus non-bitmap git
-#
-# We intentionally use the deprecated pack.writebitmaps
-# config so that we can test against older versions of git.
-test_expect_success 'setup bitmap config' '
-       git config pack.writebitmaps true
-'
-
-# we need to create the tag up front such that it is covered by the repack and
-# thus by generated bitmaps.
-test_expect_success 'create tags' '
-       git tag --message="tag pointing to HEAD" perf-tag HEAD
-'
-
-test_perf 'repack to disk' '
-       git repack -ad
-'
-
-test_full_bitmap
-
-test_expect_success 'create partial bitmap state' '
-       # pick a commit to represent the repo tip in the past
-       cutoff=$(git rev-list HEAD~100 -1) &&
-       orig_tip=$(git rev-parse HEAD) &&
-
-       # now kill off all of the refs and pretend we had
-       # just the one tip
-       rm -rf .git/logs .git/refs/* .git/packed-refs &&
-       git update-ref HEAD $cutoff &&
-
-       # and then repack, which will leave us with a nice
-       # big bitmap pack of the "old" history, and all of
-       # the new history will be loose, as if it had been pushed
-       # up incrementally and exploded via unpack-objects
-       git repack -Ad &&
-
-       # and now restore our original tip, as if the pushes
-       # had happened
-       git update-ref HEAD $orig_tip
-'
-
-test_partial_bitmap
+test_lookup_pack_bitmap () {
+       test_expect_success 'start the test from scratch' '
+               rm -rf * .git
+       '
+
+       test_perf_large_repo
+
+       # note that we do everything through config,
+       # since we want to be able to compare bitmap-aware
+       # git versus non-bitmap git
+       #
+       # We intentionally use the deprecated pack.writebitmaps
+       # config so that we can test against older versions of git.
+       test_expect_success 'setup bitmap config' '
+               git config pack.writebitmaps true
+       '
+
+       # we need to create the tag up front such that it is covered by the repack and
+       # thus by generated bitmaps.
+       test_expect_success 'create tags' '
+               git tag --message="tag pointing to HEAD" perf-tag HEAD
+       '
+
+       test_perf "enable lookup table: $1" '
+               git config pack.writeBitmapLookupTable '"$1"'
+       '
+
+       test_pack_bitmap
+}
+
+test_lookup_pack_bitmap false
+test_lookup_pack_bitmap true
 
 test_done
index 47c3fd7581cc99d562bb5796f3374e207573a304..426fab87e3293e0d1817159eacd1f63e6cd27fd7 100755 (executable)
@@ -3,42 +3,52 @@
 test_description='performance of fetches from bitmapped packs'
 . ./perf-lib.sh
 
-test_perf_default_repo
-
-test_expect_success 'create bitmapped server repo' '
-       git config pack.writebitmaps true &&
-       git repack -ad
-'
-
-# simulate a fetch from a repository that last fetched N days ago, for
-# various values of N. We do so by following the first-parent chain,
-# and assume the first entry in the chain that is N days older than the current
-# HEAD is where the HEAD would have been then.
-for days in 1 2 4 8 16 32 64 128; do
-       title=$(printf '%10s' "($days days)")
-       test_expect_success "setup revs from $days days ago" '
-               now=$(git log -1 --format=%ct HEAD) &&
-               then=$(($now - ($days * 86400))) &&
-               tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
-               {
-                       echo HEAD &&
-                       echo ^$tip
-               } >revs
+test_fetch_bitmaps () {
+       test_expect_success 'setup test directory' '
+               rm -fr * .git
        '
 
-       test_perf "server $title" '
-               git pack-objects --stdout --revs \
-                                --thin --delta-base-offset \
-                                <revs >tmp.pack
-       '
+       test_perf_default_repo
 
-       test_size "size   $title" '
-               wc -c <tmp.pack
+       test_expect_success 'create bitmapped server repo' '
+               git config pack.writebitmaps true &&
+               git config pack.writeBitmapLookupTable '"$1"' &&
+               git repack -ad
        '
 
-       test_perf "client $title" '
-               git index-pack --stdin --fix-thin <tmp.pack
-       '
-done
+       # simulate a fetch from a repository that last fetched N days ago, for
+       # various values of N. We do so by following the first-parent chain,
+       # and assume the first entry in the chain that is N days older than the current
+       # HEAD is where the HEAD would have been then.
+       for days in 1 2 4 8 16 32 64 128; do
+               title=$(printf '%10s' "($days days)")
+               test_expect_success "setup revs from $days days ago" '
+                       now=$(git log -1 --format=%ct HEAD) &&
+                       then=$(($now - ($days * 86400))) &&
+                       tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
+                       {
+                               echo HEAD &&
+                               echo ^$tip
+                       } >revs
+               '
+
+               test_perf "server $title (lookup=$1)" '
+                       git pack-objects --stdout --revs \
+                                       --thin --delta-base-offset \
+                                       <revs >tmp.pack
+               '
+
+               test_size "size   $title" '
+                       wc -c <tmp.pack
+               '
+
+               test_perf "client $title (lookup=$1)" '
+                       git index-pack --stdin --fix-thin <tmp.pack
+               '
+       done
+}
+
+test_fetch_bitmaps true
+test_fetch_bitmaps false
 
 test_done
diff --git a/t/perf/p5312-pack-bitmaps-revs.sh b/t/perf/p5312-pack-bitmaps-revs.sh
new file mode 100755 (executable)
index 0000000..0684b69
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+test_description='Tests pack performance using bitmaps (rev index enabled)'
+. ./perf-lib.sh
+. "${TEST_DIRECTORY}/perf/lib-bitmap.sh"
+
+test_lookup_pack_bitmap () {
+       test_expect_success 'start the test from scratch' '
+               rm -rf * .git
+       '
+
+       test_perf_large_repo
+
+       test_expect_success 'setup bitmap config' '
+               git config pack.writebitmaps true &&
+               git config pack.writeReverseIndex true
+       '
+
+       # we need to create the tag up front such that it is covered by the repack and
+       # thus by generated bitmaps.
+       test_expect_success 'create tags' '
+               git tag --message="tag pointing to HEAD" perf-tag HEAD
+       '
+
+       test_perf "enable lookup table: $1" '
+               git config pack.writeBitmapLookupTable '"$1"'
+       '
+
+       test_pack_bitmap
+}
+
+test_lookup_pack_bitmap false
+test_lookup_pack_bitmap true
+
+test_done
index f2fa228f16a1ce5056da5add225cc3130fadeae3..d082e6cacbeb344e405f9da7ca86065c8cf2d0c4 100755 (executable)
@@ -4,49 +4,64 @@ test_description='Tests performance using midx bitmaps'
 . ./perf-lib.sh
 . "${TEST_DIRECTORY}/perf/lib-bitmap.sh"
 
-test_perf_large_repo
-
-# we need to create the tag up front such that it is covered by the repack and
-# thus by generated bitmaps.
-test_expect_success 'create tags' '
-       git tag --message="tag pointing to HEAD" perf-tag HEAD
-'
-
-test_expect_success 'start with bitmapped pack' '
-       git repack -adb
-'
-
-test_perf 'setup multi-pack index' '
-       git multi-pack-index write --bitmap
-'
-
-test_expect_success 'drop pack bitmap' '
-       rm -f .git/objects/pack/pack-*.bitmap
-'
-
-test_full_bitmap
-
-test_expect_success 'create partial bitmap state' '
-       # pick a commit to represent the repo tip in the past
-       cutoff=$(git rev-list HEAD~100 -1) &&
-       orig_tip=$(git rev-parse HEAD) &&
-
-       # now pretend we have just one tip
-       rm -rf .git/logs .git/refs/* .git/packed-refs &&
-       git update-ref HEAD $cutoff &&
-
-       # and then repack, which will leave us with a nice
-       # big bitmap pack of the "old" history, and all of
-       # the new history will be loose, as if it had been pushed
-       # up incrementally and exploded via unpack-objects
-       git repack -Ad &&
-       git multi-pack-index write --bitmap &&
-
-       # and now restore our original tip, as if the pushes
-       # had happened
-       git update-ref HEAD $orig_tip
-'
-
-test_partial_bitmap
+test_bitmap () {
+       local enabled="$1"
+
+       test_expect_success "remove existing repo (lookup=$enabled)" '
+               rm -fr * .git
+       '
+
+       test_perf_large_repo
+
+       # we need to create the tag up front such that it is covered by the repack and
+       # thus by generated bitmaps.
+       test_expect_success 'create tags' '
+               git tag --message="tag pointing to HEAD" perf-tag HEAD
+       '
+
+       test_expect_success "use lookup table: $enabled" '
+               git config pack.writeBitmapLookupTable '"$enabled"'
+       '
+
+       test_expect_success "start with bitmapped pack (lookup=$enabled)" '
+               git repack -adb
+       '
+
+       test_perf "setup multi-pack index (lookup=$enabled)" '
+               git multi-pack-index write --bitmap
+       '
+
+       test_expect_success "drop pack bitmap (lookup=$enabled)" '
+               rm -f .git/objects/pack/pack-*.bitmap
+       '
+
+       test_full_bitmap
+
+       test_expect_success "create partial bitmap state (lookup=$enabled)" '
+               # pick a commit to represent the repo tip in the past
+               cutoff=$(git rev-list HEAD~100 -1) &&
+               orig_tip=$(git rev-parse HEAD) &&
+
+               # now pretend we have just one tip
+               rm -rf .git/logs .git/refs/* .git/packed-refs &&
+               git update-ref HEAD $cutoff &&
+
+               # and then repack, which will leave us with a nice
+               # big bitmap pack of the "old" history, and all of
+               # the new history will be loose, as if it had been pushed
+               # up incrementally and exploded via unpack-objects
+               git repack -Ad &&
+               git multi-pack-index write --bitmap &&
+
+               # and now restore our original tip, as if the pushes
+               # had happened
+               git update-ref HEAD $orig_tip
+       '
+
+       test_partial_bitmap
+}
+
+test_bitmap false
+test_bitmap true
 
 test_done
index 9338b9ea008d59f5e81b9e7a01375e68676e3d8d..c3f9a4caa4caadca271982c2e9f872d977c3610b 100755 (executable)
@@ -249,7 +249,7 @@ test_expect_success "Cleanup temp and matrix branches" "
        do
                for fsm_val in $fsm_values
                do
-                       cleanup $uc_val $fsm_val
+                       cleanup $uc_val $fsm_val || return 1
                done
        done
 "
index 17a268ccd1b014201e25c810ebade41f6f31aecf..502b4bcf9ea0ad06cddaad50b999dd8024af28eb 100755 (executable)
@@ -578,6 +578,78 @@ test_expect_success 'subtest: --run invalid range end' '
        EOF_ERR
 '
 
+test_expect_success 'subtest: --invert-exit-code without --immediate' '
+       run_sub_test_lib_test_err full-pass \
+               --invert-exit-code &&
+       check_sub_test_lib_test_err full-pass \
+               <<-\EOF_OUT 3<<-EOF_ERR
+       ok 1 - passing test #1
+       ok 2 - passing test #2
+       ok 3 - passing test #3
+       # passed all 3 test(s)
+       1..3
+       # faking up non-zero exit with --invert-exit-code
+       EOF_OUT
+       EOF_ERR
+'
+
+test_expect_success 'subtest: --invert-exit-code with --immediate: all passed' '
+       run_sub_test_lib_test_err full-pass \
+               --invert-exit-code --immediate &&
+       check_sub_test_lib_test_err full-pass \
+               <<-\EOF_OUT 3<<-EOF_ERR
+       ok 1 - passing test #1
+       ok 2 - passing test #2
+       ok 3 - passing test #3
+       # passed all 3 test(s)
+       1..3
+       # faking up non-zero exit with --invert-exit-code
+       EOF_OUT
+       EOF_ERR
+'
+
+test_expect_success 'subtest: --invert-exit-code without --immediate: partial pass' '
+       run_sub_test_lib_test partial-pass \
+               --invert-exit-code &&
+       check_sub_test_lib_test partial-pass <<-\EOF
+       ok 1 - passing test #1
+       not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2
+       #       false
+       ok 3 - passing test #3
+       # failed 1 among 3 test(s)
+       1..3
+       # faked up failures as TODO & now exiting with 0 due to --invert-exit-code
+       EOF
+'
+
+test_expect_success 'subtest: --invert-exit-code with --immediate: partial pass' '
+       run_sub_test_lib_test partial-pass \
+               --invert-exit-code --immediate &&
+       check_sub_test_lib_test partial-pass \
+               <<-\EOF_OUT 3<<-EOF_ERR
+       ok 1 - passing test #1
+       not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2
+       #       false
+       1..2
+       # faked up failures as TODO & now exiting with 0 due to --invert-exit-code
+       EOF_OUT
+       EOF_ERR
+'
+
+test_expect_success 'subtest: --invert-exit-code --immediate: got a failure' '
+       run_sub_test_lib_test partial-pass \
+               --invert-exit-code --immediate &&
+       check_sub_test_lib_test_err partial-pass \
+               <<-\EOF_OUT 3<<-EOF_ERR
+       ok 1 - passing test #1
+       not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2
+       #       false
+       1..2
+       # faked up failures as TODO & now exiting with 0 due to --invert-exit-code
+       EOF_OUT
+       EOF_ERR
+'
+
 test_expect_success 'subtest: tests respect prerequisites' '
        write_and_run_sub_test_lib_test prereqs <<-\EOF &&
 
index f6356db183b98c5c29acdb4b03dcb453533a5728..26eaca095a26a6a654364bdd8acf208af30b2d00 100755 (executable)
@@ -65,7 +65,7 @@ test_expect_success 'check commit-tree' '
        test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
-test_expect_success !SANITIZE_LEAK 'check rev-list' '
+test_expect_success 'check rev-list' '
        git update-ref "HEAD" "$SHA" &&
        git rev-list HEAD >actual &&
        echo $SHA >expected &&
index 143f1005179c8afb11aba90e4d279b6d288c8a48..f7ee2f2ff0e6ee3be99ecd4d545c376963653482 100755 (executable)
@@ -3,6 +3,7 @@
 test_description=gitattributes
 
 TEST_PASSES_SANITIZE_LEAK=true
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 attr_check_basic () {
@@ -284,7 +285,7 @@ test_expect_success 'using --git-dir and --work-tree' '
 '
 
 test_expect_success 'setup bare' '
-       git clone --bare . bare.git
+       git clone --template= --bare . bare.git
 '
 
 test_expect_success 'bare repository: check that .gitattribute is ignored' '
@@ -315,6 +316,7 @@ test_expect_success 'bare repository: check that --cached honors index' '
 test_expect_success 'bare repository: test info/attributes' '
        (
                cd bare.git &&
+               mkdir info &&
                (
                        echo "f test=f" &&
                        echo "a/i test=a/i"
@@ -360,6 +362,7 @@ test_expect_success SYMLINKS 'symlinks respected in core.attributesFile' '
 
 test_expect_success SYMLINKS 'symlinks respected in info/attributes' '
        test_when_finished "rm .git/info/attributes" &&
+       mkdir .git/info &&
        ln -s ../../attr .git/info/attributes &&
        attr_check file set
 '
index 2e9d652d826af230fc90166c9ef53da3f6e7aa7e..8114fac73b320b85949d8d7ce1422c35e1eb8158 100755 (executable)
@@ -31,7 +31,7 @@ test_expect_success WRITE_TREE_OUT 'write-tree output on unwritable repository'
        test_cmp expect out.write-tree
 '
 
-test_expect_success POSIXPERM,SANITY,!SANITIZE_LEAK 'commit should notice unwritable repository' '
+test_expect_success POSIXPERM,SANITY 'commit should notice unwritable repository' '
        test_when_finished "chmod 775 .git/objects .git/objects/??" &&
        chmod a-w .git/objects .git/objects/?? &&
        test_must_fail git commit -m second 2>out.commit
index 5575dade8eee8184b24d6685213b9a30dcee4364..c70d11bc914d09df5baa8cf74e9a2c292fc861b8 100755 (executable)
@@ -3,6 +3,7 @@
 test_description=check-ignore
 
 TEST_PASSES_SANITIZE_LEAK=true
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 init_vars () {
@@ -225,7 +226,8 @@ test_expect_success 'setup' '
                !globaltwo
                globalthree
        EOF
-       cat <<-\EOF >>.git/info/exclude
+       mkdir .git/info &&
+       cat <<-\EOF >.git/info/exclude
                per-repo
        EOF
 '
@@ -543,9 +545,9 @@ test_expect_success_multi 'submodule from subdirectory' '' '
 
 test_expect_success 'global ignore not yet enabled' '
        expect_from_stdin <<-\EOF &&
-               .git/info/exclude:7:per-repo    per-repo
+               .git/info/exclude:1:per-repo    per-repo
                a/.gitignore:2:*three   a/globalthree
-               .git/info/exclude:7:per-repo    a/per-repo
+               .git/info/exclude:1:per-repo    a/per-repo
        EOF
        test_check_ignore "-v globalone per-repo a/globalthree a/per-repo not-ignored a/globaltwo"
 '
@@ -566,10 +568,10 @@ test_expect_success 'global ignore with -v' '
        enable_global_excludes &&
        expect_from_stdin <<-EOF &&
                $global_excludes:1:globalone    globalone
-               .git/info/exclude:7:per-repo    per-repo
+               .git/info/exclude:1:per-repo    per-repo
                $global_excludes:3:globalthree  globalthree
                a/.gitignore:2:*three   a/globalthree
-               .git/info/exclude:7:per-repo    a/per-repo
+               .git/info/exclude:1:per-repo    a/per-repo
                $global_excludes:2:!globaltwo   globaltwo
        EOF
        test_check_ignore "-v globalone per-repo globalthree a/globalthree a/per-repo not-ignored globaltwo"
index 6c33a4369015c813a73384ff68149463c5189105..4ed2f242eb246b05e82527d16f7203ec2d163af9 100755 (executable)
@@ -44,6 +44,8 @@ test_expect_success 'invalid usage' '
        test_expect_code 129 git help -g add &&
        test_expect_code 129 git help -a -g &&
 
+       test_expect_code 129 git help --user-interfaces add &&
+
        test_expect_code 129 git help -g -c &&
        test_expect_code 129 git help --config-for-completion add &&
        test_expect_code 129 git help --config-sections-for-completion add
@@ -104,9 +106,9 @@ test_expect_success 'git help' '
        test_i18ngrep "^   commit " help.output &&
        test_i18ngrep "^   fetch  " help.output
 '
+
 test_expect_success 'git help -g' '
        git help -g >help.output &&
-       test_i18ngrep "^   attributes " help.output &&
        test_i18ngrep "^   everyday   " help.output &&
        test_i18ngrep "^   tutorial   " help.output
 '
@@ -127,6 +129,12 @@ test_expect_success 'git help succeeds without git.html' '
        test_cmp expect test-browser.log
 '
 
+test_expect_success 'git help --user-interfaces' '
+       git help --user-interfaces >help.output &&
+       grep "^   attributes   " help.output &&
+       grep "^   mailmap   " help.output
+'
+
 test_expect_success 'git help -c' '
        git help -c >help.output &&
        cat >expect <<-\EOF &&
@@ -220,6 +228,10 @@ test_expect_success "'git help -a' section spacing" '
        Low-level Commands / Syncing Repositories
 
        Low-level Commands / Internal Helpers
+
+       User-facing repository, command and file interfaces
+
+       Developer-facing file file formats, protocols and interfaces
        EOF
        test_cmp expect actual
 '
index 086822fc45b66c8caf6aff43af86da2a466618ac..0a087a1983da43fdfce0506218fa021e5f1840be 100755 (executable)
@@ -1,8 +1,9 @@
 #!/bin/sh
 
 test_description='test basic hash implementation'
-. ./test-lib.sh
 
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
 
 test_expect_success 'test basic SHA-1 hash values' '
        test-tool sha1 </dev/null >actual &&
index 3b0c336b38e4c7f81268227984472cb35572cdc2..19a730c29ed8f791d7885c31ff2a908cb1b5dfb3 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='test json-writer JSON generation'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'unit test of json-writer routines' '
index 1c840348bd1eec848850c9679fb5e0368c5348da..abecd75e4e430b6a1182690ae54a0e5d4e76bc32 100755 (executable)
@@ -17,9 +17,6 @@ tr \
   'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM'
 EOF
 
-write_script rot13-filter.pl "$PERL_PATH" \
-       <"$TEST_DIRECTORY"/t0021/rot13-filter.pl
-
 generate_random_characters () {
        LEN=$1
        NAME=$2
@@ -365,8 +362,8 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' '
        test_line_count = 0 count
 '
 
-test_expect_success PERL 'required process filter should filter data' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should filter data' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -450,8 +447,8 @@ test_expect_success PERL 'required process filter should filter data' '
        )
 '
 
-test_expect_success PERL 'required process filter should filter data for various subcommands' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should filter data for various subcommands' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        (
                cd repo &&
@@ -561,9 +558,9 @@ test_expect_success PERL 'required process filter should filter data for various
        )
 '
 
-test_expect_success PERL 'required process filter takes precedence' '
+test_expect_success 'required process filter takes precedence' '
        test_config_global filter.protocol.clean false &&
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -587,8 +584,8 @@ test_expect_success PERL 'required process filter takes precedence' '
        )
 '
 
-test_expect_success PERL 'required process filter should be used only for "clean" operation only' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
+test_expect_success 'required process filter should be used only for "clean" operation only' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -622,8 +619,8 @@ test_expect_success PERL 'required process filter should be used only for "clean
        )
 '
 
-test_expect_success PERL 'required process filter should process multiple packets' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should process multiple packets' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
 
        rm -rf repo &&
@@ -687,8 +684,8 @@ test_expect_success PERL 'required process filter should process multiple packet
        )
 '
 
-test_expect_success PERL 'required process filter with clean error should fail' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter with clean error should fail' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -706,8 +703,8 @@ test_expect_success PERL 'required process filter with clean error should fail'
        )
 '
 
-test_expect_success PERL 'process filter should restart after unexpected write failure' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter should restart after unexpected write failure' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -735,7 +732,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f
                rm -f debug.log &&
                git checkout --quiet --no-progress . 2>git-stderr.log &&
 
-               grep "smudge write error at" git-stderr.log &&
+               grep "smudge write error" git-stderr.log &&
                test_i18ngrep "error: external filter" git-stderr.log &&
 
                cat >expected.log <<-EOF &&
@@ -761,8 +758,8 @@ test_expect_success PERL 'process filter should restart after unexpected write f
        )
 '
 
-test_expect_success PERL 'process filter should not be restarted if it signals an error' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter should not be restarted if it signals an error' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -804,8 +801,8 @@ test_expect_success PERL 'process filter should not be restarted if it signals a
        )
 '
 
-test_expect_success PERL 'process filter abort stops processing of all further files' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter abort stops processing of all further files' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -861,10 +858,10 @@ test_expect_success PERL 'invalid process filter must fail (and not hang!)' '
        )
 '
 
-test_expect_success PERL 'delayed checkout in process filter' '
-       test_config_global filter.a.process "rot13-filter.pl a.log clean smudge delay" &&
+test_expect_success 'delayed checkout in process filter' '
+       test_config_global filter.a.process "test-tool rot13-filter --log=a.log clean smudge delay" &&
        test_config_global filter.a.required true &&
-       test_config_global filter.b.process "rot13-filter.pl b.log clean smudge delay" &&
+       test_config_global filter.b.process "test-tool rot13-filter --log=b.log clean smudge delay" &&
        test_config_global filter.b.required true &&
 
        rm -rf repo &&
@@ -940,8 +937,8 @@ test_expect_success PERL 'delayed checkout in process filter' '
        )
 '
 
-test_expect_success PERL 'missing file in delayed checkout' '
-       test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
+test_expect_success 'missing file in delayed checkout' '
+       test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" &&
        test_config_global filter.bug.required true &&
 
        rm -rf repo &&
@@ -960,8 +957,8 @@ test_expect_success PERL 'missing file in delayed checkout' '
        grep "error: .missing-delay\.a. was not filtered properly" git-stderr.log
 '
 
-test_expect_success PERL 'invalid file in delayed checkout' '
-       test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
+test_expect_success 'invalid file in delayed checkout' '
+       test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" &&
        test_config_global filter.bug.required true &&
 
        rm -rf repo &&
@@ -990,10 +987,10 @@ do
                mode_prereq='UTF8_NFD_TO_NFC' ;;
        esac
 
-       test_expect_success PERL,SYMLINKS,$mode_prereq \
+       test_expect_success SYMLINKS,$mode_prereq \
        "delayed checkout with $mode-collision don't write to the wrong place" '
                test_config_global filter.delay.process \
-                       "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+                       "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
                test_config_global filter.delay.required true &&
 
                git init $mode-collision &&
@@ -1026,12 +1023,12 @@ do
        '
 done
 
-test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \
+test_expect_success SYMLINKS,CASE_INSENSITIVE_FS \
 "delayed checkout with submodule collision don't write to the wrong place" '
        git init collision-with-submodule &&
        (
                cd collision-with-submodule &&
-               git config filter.delay.process "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                # We need Git to treat the submodule "a" and the
@@ -1062,11 +1059,11 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \
        )
 '
 
-test_expect_success PERL 'setup for progress tests' '
+test_expect_success 'setup for progress tests' '
        git init progress &&
        (
                cd progress &&
-               git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --log=delay-progress.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                echo "*.a filter=delay" >.gitattributes &&
@@ -1132,12 +1129,12 @@ do
        '
 done
 
-test_expect_success PERL 'delayed checkout correctly reports the number of updated entries' '
+test_expect_success 'delayed checkout correctly reports the number of updated entries' '
        rm -rf repo &&
        git init repo &&
        (
                cd repo &&
-               git config filter.delay.process "../rot13-filter.pl delayed.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --log=delayed.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                echo "*.a filter=delay" >.gitattributes &&
diff --git a/t/t0021/rot13-filter.pl b/t/t0021/rot13-filter.pl
deleted file mode 100644 (file)
index 7bb9376..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-#
-# Example implementation for the Git filter protocol version 2
-# See Documentation/gitattributes.txt, section "Filter Protocol"
-#
-# Usage: rot13-filter.pl [--always-delay] <log path> <capabilities>
-#
-# Log path defines a debug log file that the script writes to. The
-# subsequent arguments define a list of supported protocol capabilities
-# ("clean", "smudge", etc).
-#
-# When --always-delay is given all pathnames with the "can-delay" flag
-# that don't appear on the list bellow are delayed with a count of 1
-# (see more below).
-#
-# This implementation supports special test cases:
-# (1) If data with the pathname "clean-write-fail.r" is processed with
-#     a "clean" operation then the write operation will die.
-# (2) If data with the pathname "smudge-write-fail.r" is processed with
-#     a "smudge" operation then the write operation will die.
-# (3) If data with the pathname "error.r" is processed with any
-#     operation then the filter signals that it cannot or does not want
-#     to process the file.
-# (4) If data with the pathname "abort.r" is processed with any
-#     operation then the filter signals that it cannot or does not want
-#     to process the file and any file after that is processed with the
-#     same command.
-# (5) If data with a pathname that is a key in the DELAY hash is
-#     requested (e.g. "test-delay10.a") then the filter responds with
-#     a "delay" status and sets the "requested" field in the DELAY hash.
-#     The filter will signal the availability of this object after
-#     "count" (field in DELAY hash) "list_available_blobs" commands.
-# (6) If data with the pathname "missing-delay.a" is processed that the
-#     filter will drop the path from the "list_available_blobs" response.
-# (7) If data with the pathname "invalid-delay.a" is processed that the
-#     filter will add the path "unfiltered" which was not delayed before
-#     to the "list_available_blobs" response.
-#
-
-use 5.008;
-sub gitperllib {
-       # Git assumes that all path lists are Unix-y colon-separated ones. But
-       # when the Git for Windows executes the test suite, its MSYS2 Bash
-       # calls git.exe, and colon-separated path lists are converted into
-       # Windows-y semicolon-separated lists of *Windows* paths (which
-       # naturally contain a colon after the drive letter, so splitting by
-       # colons simply does not cut it).
-       #
-       # Detect semicolon-separated path list and handle them appropriately.
-
-       if ($ENV{GITPERLLIB} =~ /;/) {
-               return split(/;/, $ENV{GITPERLLIB});
-       }
-       return split(/:/, $ENV{GITPERLLIB});
-}
-use lib (gitperllib());
-use strict;
-use warnings;
-use IO::File;
-use Git::Packet;
-
-my $MAX_PACKET_CONTENT_SIZE = 65516;
-
-my $always_delay = 0;
-if ( $ARGV[0] eq '--always-delay' ) {
-       $always_delay = 1;
-       shift @ARGV;
-}
-
-my $log_file                = shift @ARGV;
-my @capabilities            = @ARGV;
-
-open my $debug, ">>", $log_file or die "cannot open log file: $!";
-
-my %DELAY = (
-       'test-delay10.a' => { "requested" => 0, "count" => 1 },
-       'test-delay11.a' => { "requested" => 0, "count" => 1 },
-       'test-delay20.a' => { "requested" => 0, "count" => 2 },
-       'test-delay10.b' => { "requested" => 0, "count" => 1 },
-       'missing-delay.a' => { "requested" => 0, "count" => 1 },
-       'invalid-delay.a' => { "requested" => 0, "count" => 1 },
-);
-
-sub rot13 {
-       my $str = shift;
-       $str =~ y/A-Za-z/N-ZA-Mn-za-m/;
-       return $str;
-}
-
-print $debug "START\n";
-$debug->flush();
-
-packet_initialize("git-filter", 2);
-
-my %remote_caps = packet_read_and_check_capabilities("clean", "smudge", "delay");
-packet_check_and_write_capabilities(\%remote_caps, @capabilities);
-
-print $debug "init handshake complete\n";
-$debug->flush();
-
-while (1) {
-       my ( $res, $command ) = packet_key_val_read("command");
-       if ( $res == -1 ) {
-               print $debug "STOP\n";
-               exit();
-       }
-       print $debug "IN: $command";
-       $debug->flush();
-
-       if ( $command eq "list_available_blobs" ) {
-               # Flush
-               packet_compare_lists([1, ""], packet_bin_read()) ||
-                       die "bad list_available_blobs end";
-
-               foreach my $pathname ( sort keys %DELAY ) {
-                       if ( $DELAY{$pathname}{"requested"} >= 1 ) {
-                               $DELAY{$pathname}{"count"} = $DELAY{$pathname}{"count"} - 1;
-                               if ( $pathname eq "invalid-delay.a" ) {
-                                       # Send Git a pathname that was not delayed earlier
-                                       packet_txt_write("pathname=unfiltered");
-                               }
-                               if ( $pathname eq "missing-delay.a" ) {
-                                       # Do not signal Git that this file is available
-                               } elsif ( $DELAY{$pathname}{"count"} == 0 ) {
-                                       print $debug " $pathname";
-                                       packet_txt_write("pathname=$pathname");
-                               }
-                       }
-               }
-
-               packet_flush();
-
-               print $debug " [OK]\n";
-               $debug->flush();
-               packet_txt_write("status=success");
-               packet_flush();
-       } else {
-               my ( $res, $pathname ) = packet_key_val_read("pathname");
-               if ( $res == -1 ) {
-                       die "unexpected EOF while expecting pathname";
-               }
-               print $debug " $pathname";
-               $debug->flush();
-
-               # Read until flush
-               my ( $done, $buffer ) = packet_txt_read();
-               while ( $buffer ne '' ) {
-                       if ( $buffer eq "can-delay=1" ) {
-                               if ( exists $DELAY{$pathname} and $DELAY{$pathname}{"requested"} == 0 ) {
-                                       $DELAY{$pathname}{"requested"} = 1;
-                               } elsif ( !exists $DELAY{$pathname} and $always_delay ) {
-                                       $DELAY{$pathname} = { "requested" => 1, "count" => 1 };
-                               }
-                       } elsif ($buffer =~ /^(ref|treeish|blob)=/) {
-                               print $debug " $buffer";
-                       } else {
-                               # In general, filters need to be graceful about
-                               # new metadata, since it's documented that we
-                               # can pass any key-value pairs, but for tests,
-                               # let's be a little stricter.
-                               die "Unknown message '$buffer'";
-                       }
-
-                       ( $done, $buffer ) = packet_txt_read();
-               }
-               if ( $done == -1 ) {
-                       die "unexpected EOF after pathname '$pathname'";
-               }
-
-               my $input = "";
-               {
-                       binmode(STDIN);
-                       my $buffer;
-                       my $done = 0;
-                       while ( !$done ) {
-                               ( $done, $buffer ) = packet_bin_read();
-                               $input .= $buffer;
-                       }
-                       if ( $done == -1 ) {
-                               die "unexpected EOF while reading input for '$pathname'";
-                       }                       
-                       print $debug " " . length($input) . " [OK] -- ";
-                       $debug->flush();
-               }
-
-               my $output;
-               if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) {
-                       $output = $DELAY{$pathname}{"output"}
-               } elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
-                       $output = "";
-               } elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
-                       $output = rot13($input);
-               } elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
-                       $output = rot13($input);
-               } else {
-                       die "bad command '$command'";
-               }
-
-               if ( $pathname eq "error.r" ) {
-                       print $debug "[ERROR]\n";
-                       $debug->flush();
-                       packet_txt_write("status=error");
-                       packet_flush();
-               } elsif ( $pathname eq "abort.r" ) {
-                       print $debug "[ABORT]\n";
-                       $debug->flush();
-                       packet_txt_write("status=abort");
-                       packet_flush();
-               } elsif ( $command eq "smudge" and
-                       exists $DELAY{$pathname} and
-                       $DELAY{$pathname}{"requested"} == 1 ) {
-                       print $debug "[DELAYED]\n";
-                       $debug->flush();
-                       packet_txt_write("status=delayed");
-                       packet_flush();
-                       $DELAY{$pathname}{"requested"} = 2;
-                       $DELAY{$pathname}{"output"} = $output;
-               } else {
-                       packet_txt_write("status=success");
-                       packet_flush();
-
-                       if ( $pathname eq "${command}-write-fail.r" ) {
-                               print $debug "[WRITE FAIL]\n";
-                               $debug->flush();
-                               die "${command} write error";
-                       }
-
-                       print $debug "OUT: " . length($output) . " ";
-                       $debug->flush();
-
-                       while ( length($output) > 0 ) {
-                               my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
-                               packet_bin_write($packet);
-                               # dots represent the number of packets
-                               print $debug ".";
-                               if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
-                                       $output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
-                               } else {
-                                       $output = "";
-                               }
-                       }
-                       packet_flush();
-                       print $debug " [OK]\n";
-                       $debug->flush();
-                       packet_flush();
-               }
-       }
-}
index 7f80f463930407410c6d3f671fb1f7833ec3b01e..a22e0e1382c42f192778e34d985c135d8008f232 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='CRLF conversion all combinations'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 compare_files () {
index 82905a2156f7daad6cbdbd1ae5e54d9235f0cdbc..c196fdb0ee21a27b6093902dcafe133abfc98ce0 100755 (executable)
@@ -5,6 +5,8 @@ test_description='working-tree-encoding conversion via gitattributes'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 . "$TEST_DIRECTORY/lib-encoding.sh"
 
@@ -69,6 +71,7 @@ test_expect_success 'check $GIT_DIR/info/attributes support' '
        test_when_finished "rm -f test.utf32.git" &&
        test_when_finished "git reset --hard HEAD" &&
 
+       mkdir .git/info &&
        echo "*.utf32 text working-tree-encoding=utf-32" >.git/info/attributes &&
        git add test.utf32 &&
 
index 0ed14971a5801b68045f27232d9912cc09154a57..471cb37ac28affec3771d0b46e8dae02d4f09716 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='reftable unittests'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'unittests' '
index 3908597d42d705161b5b8808e55c8125f8edffe1..aecb308cf668057995a167090a421b906983f32b 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='verify safe.directory checks'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 GIT_TEST_ASSUME_DIFFERENT_OWNER=1
@@ -16,24 +17,20 @@ test_expect_success 'safe.directory is not set' '
        expect_rejected_dir
 '
 
-test_expect_success 'ignoring safe.directory on the command line' '
-       test_must_fail git -c safe.directory="$(pwd)" status 2>err &&
-       grep "dubious ownership" err
+test_expect_success 'safe.directory on the command line' '
+       git -c safe.directory="$(pwd)" status
 '
 
-test_expect_success 'ignoring safe.directory in the environment' '
-       test_must_fail env GIT_CONFIG_COUNT=1 \
-               GIT_CONFIG_KEY_0="safe.directory" \
-               GIT_CONFIG_VALUE_0="$(pwd)" \
-               git status 2>err &&
-       grep "dubious ownership" err
+test_expect_success 'safe.directory in the environment' '
+       env GIT_CONFIG_COUNT=1 \
+           GIT_CONFIG_KEY_0="safe.directory" \
+           GIT_CONFIG_VALUE_0="$(pwd)" \
+           git status
 '
 
-test_expect_success 'ignoring safe.directory in GIT_CONFIG_PARAMETERS' '
-       test_must_fail env \
-               GIT_CONFIG_PARAMETERS="${SQ}safe.directory${SQ}=${SQ}$(pwd)${SQ}" \
-               git status 2>err &&
-       grep "dubious ownership" err
+test_expect_success 'safe.directory in GIT_CONFIG_PARAMETERS' '
+       env GIT_CONFIG_PARAMETERS="${SQ}safe.directory${SQ}=${SQ}$(pwd)${SQ}" \
+           git status
 '
 
 test_expect_success 'ignoring safe.directory in repo config' '
diff --git a/t/t0035-safe-bare-repository.sh b/t/t0035-safe-bare-repository.sh
new file mode 100755 (executable)
index 0000000..ecbdc82
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+test_description='verify safe.bareRepository checks'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+pwd="$(pwd)"
+
+expect_accepted () {
+       git "$@" rev-parse --git-dir
+}
+
+expect_rejected () {
+       test_must_fail git "$@" rev-parse --git-dir 2>err &&
+       grep -F "cannot use bare repository" err
+}
+
+test_expect_success 'setup bare repo in worktree' '
+       git init outer-repo &&
+       git init --bare outer-repo/bare-repo
+'
+
+test_expect_success 'safe.bareRepository unset' '
+       expect_accepted -C outer-repo/bare-repo
+'
+
+test_expect_success 'safe.bareRepository=all' '
+       test_config_global safe.bareRepository all &&
+       expect_accepted -C outer-repo/bare-repo
+'
+
+test_expect_success 'safe.bareRepository=explicit' '
+       test_config_global safe.bareRepository explicit &&
+       expect_rejected -C outer-repo/bare-repo
+'
+
+test_expect_success 'safe.bareRepository in the repository' '
+       # safe.bareRepository must not be "explicit", otherwise
+       # git config fails with "fatal: not in a git directory" (like
+       # safe.directory)
+       test_config -C outer-repo/bare-repo safe.bareRepository \
+               all &&
+       test_config_global safe.bareRepository explicit &&
+       expect_rejected -C outer-repo/bare-repo
+'
+
+test_expect_success 'safe.bareRepository on the command line' '
+       test_config_global safe.bareRepository explicit &&
+       expect_accepted -C outer-repo/bare-repo \
+               -c safe.bareRepository=all
+'
+
+test_done
index ed2fb620a9d62d80b6fadb7fdbce76aff40cccc4..5cc62306e39c4f7ab2f6fec35d3cafc7bd65be86 100755 (executable)
@@ -456,4 +456,257 @@ test_expect_success '--end-of-options treats remainder as args' '
            --end-of-options --verbose
 '
 
+test_expect_success 'KEEP_DASHDASH works' '
+       test-tool parse-options-flags --keep-dashdash cmd --opt=1 -- --opt=2 --unknown >actual &&
+       cat >expect <<-\EOF &&
+       opt: 1
+       arg 00: --
+       arg 01: --opt=2
+       arg 02: --unknown
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'KEEP_ARGV0 works' '
+       test-tool parse-options-flags --keep-argv0 cmd arg0 --opt=3 >actual &&
+       cat >expect <<-\EOF &&
+       opt: 3
+       arg 00: cmd
+       arg 01: arg0
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'STOP_AT_NON_OPTION works' '
+       test-tool parse-options-flags --stop-at-non-option cmd --opt=4 arg0 --opt=5 --unknown >actual &&
+       cat >expect <<-\EOF &&
+       opt: 4
+       arg 00: arg0
+       arg 01: --opt=5
+       arg 02: --unknown
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'KEEP_UNKNOWN_OPT works' '
+       test-tool parse-options-flags --keep-unknown-opt cmd --unknown=1 --opt=6 -u2 >actual &&
+       cat >expect <<-\EOF &&
+       opt: 6
+       arg 00: --unknown=1
+       arg 01: -u2
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'NO_INTERNAL_HELP works for -h' '
+       test_expect_code 129 test-tool parse-options-flags --no-internal-help cmd -h 2>err &&
+       grep "^error: unknown switch \`h$SQ" err &&
+       grep "^usage: " err
+'
+
+for help_opt in help help-all
+do
+       test_expect_success "NO_INTERNAL_HELP works for --$help_opt" "
+               test_expect_code 129 test-tool parse-options-flags --no-internal-help cmd --$help_opt 2>err &&
+               grep '^error: unknown option \`'$help_opt\' err &&
+               grep '^usage: ' err
+       "
+done
+
+test_expect_success 'KEEP_UNKNOWN_OPT | NO_INTERNAL_HELP works' '
+       test-tool parse-options-flags --keep-unknown-opt --no-internal-help cmd -h --help --help-all >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       arg 00: -h
+       arg 01: --help
+       arg 02: --help-all
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - no subcommand shows error and usage' '
+       test_expect_code 129 test-tool parse-subcommand cmd 2>err &&
+       grep "^error: need a subcommand" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - subcommand after -- shows error and usage' '
+       test_expect_code 129 test-tool parse-subcommand cmd -- subcmd-one 2>err &&
+       grep "^error: need a subcommand" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - subcommand after --end-of-options shows error and usage' '
+       test_expect_code 129 test-tool parse-subcommand cmd --end-of-options subcmd-one 2>err &&
+       grep "^error: need a subcommand" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - unknown subcommand shows error and usage' '
+       test_expect_code 129 test-tool parse-subcommand cmd nope 2>err &&
+       grep "^error: unknown subcommand: \`nope$SQ" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - subcommands cannot be abbreviated' '
+       test_expect_code 129 test-tool parse-subcommand cmd subcmd-o 2>err &&
+       grep "^error: unknown subcommand: \`subcmd-o$SQ$" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - no negated subcommands' '
+       test_expect_code 129 test-tool parse-subcommand cmd no-subcmd-one 2>err &&
+       grep "^error: unknown subcommand: \`no-subcmd-one$SQ" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - simple' '
+       test-tool parse-subcommand cmd subcmd-two >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_two
+       arg 00: subcmd-two
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - stop parsing at the first subcommand' '
+       test-tool parse-subcommand cmd --opt=1 subcmd-two subcmd-one --opt=2 >actual &&
+       cat >expect <<-\EOF &&
+       opt: 1
+       fn: subcmd_two
+       arg 00: subcmd-two
+       arg 01: subcmd-one
+       arg 02: --opt=2
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - KEEP_ARGV0' '
+       test-tool parse-subcommand --keep-argv0 cmd subcmd-two >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_two
+       arg 00: cmd
+       arg 01: subcmd-two
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given' '
+       test-tool parse-subcommand --subcommand-optional cmd >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + given subcommand' '
+       test-tool parse-subcommand --subcommand-optional cmd subcmd-two branch file >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_two
+       arg 00: subcmd-two
+       arg 01: branch
+       arg 02: file
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given + unknown dashless args' '
+       test-tool parse-subcommand --subcommand-optional cmd branch file >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: branch
+       arg 01: file
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given + unknown option' '
+       test_expect_code 129 test-tool parse-subcommand --subcommand-optional cmd --subcommand-opt 2>err &&
+       grep "^error: unknown option" err &&
+       grep ^usage: err
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + subcommand not given + unknown option' '
+       test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: --subcommand-opt
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + subcommand ignored after unknown option' '
+       test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt subcmd-two >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: --subcommand-opt
+       arg 01: subcmd-two
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + command and subcommand options cannot be mixed' '
+       test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt branch --opt=1 >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: --subcommand-opt
+       arg 01: branch
+       arg 02: --opt=1
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT | KEEP_ARGV0' '
+       test-tool parse-subcommand --subcommand-optional --keep-unknown-opt --keep-argv0 cmd --subcommand-opt branch >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: cmd
+       arg 01: --subcommand-opt
+       arg 02: branch
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT | KEEP_DASHDASH' '
+       test-tool parse-subcommand --subcommand-optional --keep-unknown-opt --keep-dashdash cmd -- --subcommand-opt file >actual &&
+       cat >expect <<-\EOF &&
+       opt: 0
+       fn: subcmd_one
+       arg 00: --
+       arg 01: --subcommand-opt
+       arg 02: file
+       EOF
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommand - completion helper' '
+       test-tool parse-subcommand cmd --git-completion-helper >actual &&
+       echo "subcmd-one subcmd-two --opt= --no-opt" >expect &&
+       test_cmp expect actual
+'
+
+test_expect_success 'subcommands are incompatible with STOP_AT_NON_OPTION' '
+       test_must_fail test-tool parse-subcommand --stop-at-non-option cmd subcmd-one 2>err &&
+       grep ^BUG err
+'
+
+test_expect_success 'subcommands are incompatible with KEEP_UNKNOWN_OPT unless in combination with SUBCOMMAND_OPTIONAL' '
+       test_must_fail test-tool parse-subcommand --keep-unknown-opt cmd subcmd-two 2>err &&
+       grep ^BUG err
+'
+
+test_expect_success 'subcommands are incompatible with KEEP_DASHDASH unless in combination with SUBCOMMAND_OPTIONAL' '
+       test_must_fail test-tool parse-subcommand --keep-dashdash cmd subcmd-two 2>err &&
+       grep ^BUG err
+'
+
 test_done
index 5c9dc90d0b096d9f104caedeb035b50b919b6811..325eb1c3cd0add75bcaf3b629c2692420c279f8a 100755 (executable)
@@ -5,6 +5,7 @@ test_description='Various filesystem issues'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 auml=$(printf '\303\244')
index aa35350b6f396f562463a16ffcba364ab94ed5d5..68e29c904a62c9ef51d6a1bfc3449795d50ac10f 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='Test various path utilities'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 norm_path() {
@@ -21,7 +22,7 @@ relative_path() {
 
 test_submodule_relative_url() {
        test_expect_success "test_submodule_relative_url: $1 $2 $3 => $4" "
-               actual=\$(git submodule--helper resolve-relative-url-test '$1' '$2' '$3') &&
+               actual=\$(test-tool submodule resolve-relative-url '$1' '$2' '$3') &&
                test \"\$actual\" = '$4'
        "
 }
index 6f9a501c72b3a2520fd4d4734b45fe56a2a6d35e..ba8ad1d1ca0adeee8f001450951ec782e1fc3d74 100755 (executable)
@@ -5,7 +5,7 @@ test_description='verify sort functions'
 TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
-test_expect_success 'llist_mergesort()' '
+test_expect_success 'DEFINE_LIST_SORT_DEBUG' '
        test-tool mergesort test
 '
 
index 906757264844b67b0c72914b3aa2a6d02e17101c..d8e2fc42e15c73dfffebba57071b8d3e4bfd1ed1 100755 (executable)
@@ -5,6 +5,8 @@ test_description="Test whether cache-tree is properly updated
 Tests whether various commands properly update and/or rewrite the
 cache-tree extension.
 "
+
+TEST_PASSES_SANITIZE_LEAK=true
  . ./test-lib.sh
 
 cmp_cache_tree () {
index 08f5fe9caef0c8901e49401abf2762753d8a0b7c..b6d2f591acdd999483b2d28af8bbbe3d30008ea7 100755 (executable)
@@ -78,4 +78,52 @@ test_expect_success 'indicates populated hooks' '
        test_cmp expect actual
 '
 
+test_expect_success UNZIP '--diagnose creates diagnostics zip archive' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose -o report -s test >out &&
+
+       zip_path=report/git-diagnostics-test.zip &&
+       grep "Available space" out &&
+       test_path_is_file "$zip_path" &&
+
+       # Check zipped archive content
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
+       test_file_not_empty out &&
+
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
+       grep "^Total: [0-9][0-9]*" out &&
+
+       # Should not include .git directory contents by default
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--diagnose=stats excludes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose=stats -o report -s test >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Does not include .git directory contents
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--diagnose=all includes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose=all -o report -s test >out &&
+
+       # Includes .git directory contents
+       "$GIT_UNZIP" -l "$zip_path" | grep ".git/" &&
+
+       "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out &&
+       test_file_not_empty out
+'
+
 test_done
diff --git a/t/t0092-diagnose.sh b/t/t0092-diagnose.sh
new file mode 100755 (executable)
index 0000000..fca9b58
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+test_description='git diagnose'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success UNZIP 'creates diagnostics zip archive' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test >out &&
+       grep "Available space" out &&
+
+       zip_path=report/git-diagnostics-test.zip &&
+       test_path_is_file "$zip_path" &&
+
+       # Check zipped archive content
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
+       test_file_not_empty out &&
+
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
+       grep "^Total: [0-9][0-9]*" out &&
+
+       # Should not include .git directory contents by default
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--mode=stats excludes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test --mode=stats >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Does not include .git directory contents
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--mode=all includes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test --mode=all >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Includes .git directory contents
+       "$GIT_UNZIP" -l "$zip_path" | grep ".git/" &&
+
+       "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out &&
+       test_file_not_empty out
+'
+
+test_done
index 5945973552a6af6ac1971920b573086b081f6160..b567383eb836bff0c743522692cecdb354cbd0e6 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='Testing the various Bloom filter computations in bloom.c'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'compute unseeded murmur3 hash for empty string' '
@@ -67,7 +69,7 @@ test_expect_success 'compute bloom key for test string 2' '
        test_cmp expect actual
 '
 
-test_expect_success 'get bloom filters for commit with no changes' '
+test_expect_success !SANITIZE_LEAK 'get bloom filters for commit with no changes' '
        git init &&
        git commit --allow-empty -m "c0" &&
        cat >expect <<-\EOF &&
index 4dc9fecf7241ef416c7dfa0616d6d9df491fe673..12d817fbd34002cf5e8cd6561551b23303c27f6f 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='urlmatch URL normalization'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 # The base name of the test url files
index df2ea34932bcfe99cd5719cc2e2f587e6b09c49e..5a6f28051bd275577110ddbba5618f756ca0937e 100755 (executable)
@@ -7,22 +7,12 @@ test_description='Perl gettext interface (Git::I18N)'
 
 TEST_PASSES_SANITIZE_LEAK=true
 . ./lib-gettext.sh
+. "$TEST_DIRECTORY"/lib-perl.sh
+skip_all_if_no_Test_More
 
-if ! test_have_prereq PERL; then
-       skip_all='skipping perl interface tests, perl not available'
-       test_done
-fi
-
-perl -MTest::More -e 0 2>/dev/null || {
-       skip_all="Perl Test::More unavailable, skipping test"
-       test_done
-}
-
-# The external test will outputs its own plan
-test_external_has_tap=1
-
-test_external_without_stderr \
-    'Perl Git::I18N API' \
-    perl "$TEST_DIRECTORY"/t0202/test.pl
+test_expect_success 'run t0202/test.pl to test Git::I18N.pm' '
+       "$PERL_PATH" "$TEST_DIRECTORY"/t0202/test.pl 2>stderr &&
+       test_must_be_empty stderr
+'
 
 test_done
index 0ce1f22eff66285ee0da9b1830de961555fbad2b..86cff324ff181110cf1ef8a44af6c4295b7f5d40 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description="The Git C functions aren't broken by setlocale(3)"
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./lib-gettext.sh
 
 test_expect_success 'git show a ISO-8859-1 commit under C locale' '
index b6408560c0ca934601cda3fed8091510a3918745..30a9f51e9f1ecc7820c244fe1bb604e0acea8e94 100644 (file)
@@ -216,12 +216,19 @@ while (<>) {
 
     elsif ($event eq 'data') {
        my $cat = $line->{'category'};
-       if ($cat eq 'test_category') {
-           
-           my $key = $line->{'key'};
-           my $value = $line->{'value'};
-           $processes->{$sid}->{'data'}->{$cat}->{$key} = $value;
-       }
+       my $key = $line->{'key'};
+       my $value = $line->{'value'};
+       $processes->{$sid}->{'data'}->{$cat}->{$key} = $value;
+    }
+
+    elsif ($event eq 'data_json') {
+       # NEEDSWORK: Ignore due to
+       # compat/win32/trace2_win32_process_info.c, which should log a
+       # "cmd_ancestry" event instead.
+    }
+
+    else {
+       push @{$processes->{$sid}->{$event}} => $line->{value};
     }
 
     # This trace2 target does not emit 'printf' events.
index dadf3b14583bec460a51cbf69362a875dd3cc867..23b8942edba45c32822886ee93acef9b5474ea02 100755 (executable)
@@ -88,7 +88,8 @@ done
 
 for opt in --buffer \
        --follow-symlinks \
-       --batch-all-objects
+       --batch-all-objects \
+       -z
 do
        test_expect_success "usage: bad option combination: $opt without batch mode" '
                test_incompatible_usage git cat-file $opt &&
@@ -100,6 +101,10 @@ echo_without_newline () {
     printf '%s' "$*"
 }
 
+echo_without_newline_nul () {
+       echo_without_newline "$@" | tr '\n' '\0'
+}
+
 strlen () {
     echo_without_newline "$1" | wc -c | sed -e 's/^ *//'
 }
@@ -398,6 +403,12 @@ test_expect_success '--batch with multiple sha1s gives correct format' '
        test "$(maybe_remove_timestamp "$batch_output" 1)" = "$(maybe_remove_timestamp "$(echo_without_newline "$batch_input" | git cat-file --batch)" 1)"
 '
 
+test_expect_success '--batch, -z with multiple sha1s gives correct format' '
+       echo_without_newline_nul "$batch_input" >in &&
+       test "$(maybe_remove_timestamp "$batch_output" 1)" = \
+       "$(maybe_remove_timestamp "$(git cat-file --batch -z <in)" 1)"
+'
+
 batch_check_input="$hello_sha1
 $tree_sha1
 $commit_sha1
@@ -418,6 +429,30 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" '
     "$(echo_without_newline "$batch_check_input" | git cat-file --batch-check)"
 '
 
+test_expect_success "--batch-check, -z with multiple sha1s gives correct format" '
+    echo_without_newline_nul "$batch_check_input" >in &&
+    test "$batch_check_output" = "$(git cat-file --batch-check -z <in)"
+'
+
+test_expect_success FUNNYNAMES '--batch-check, -z with newline in input' '
+       touch -- "newline${LF}embedded" &&
+       git add -- "newline${LF}embedded" &&
+       git commit -m "file with newline embedded" &&
+       test_tick &&
+
+       printf "HEAD:newline${LF}embedded" >in &&
+       git cat-file --batch-check -z <in >actual &&
+
+       echo "$(git rev-parse "HEAD:newline${LF}embedded") blob 0" >expect &&
+       test_cmp expect actual
+'
+
+batch_command_multiple_info="info $hello_sha1
+info $tree_sha1
+info $commit_sha1
+info $tag_sha1
+info deadbeef"
+
 test_expect_success '--batch-command with multiple info calls gives correct format' '
        cat >expect <<-EOF &&
        $hello_sha1 blob $hello_size
@@ -427,17 +462,23 @@ test_expect_success '--batch-command with multiple info calls gives correct form
        deadbeef missing
        EOF
 
-       git cat-file --batch-command --buffer >actual <<-EOF &&
-       info $hello_sha1
-       info $tree_sha1
-       info $commit_sha1
-       info $tag_sha1
-       info deadbeef
-       EOF
+       echo "$batch_command_multiple_info" >in &&
+       git cat-file --batch-command --buffer <in >actual &&
+
+       test_cmp expect actual &&
+
+       echo "$batch_command_multiple_info" | tr "\n" "\0" >in &&
+       git cat-file --batch-command --buffer -z <in >actual &&
 
        test_cmp expect actual
 '
 
+batch_command_multiple_contents="contents $hello_sha1
+contents $commit_sha1
+contents $tag_sha1
+contents deadbeef
+flush"
+
 test_expect_success '--batch-command with multiple command calls gives correct format' '
        remove_timestamp >expect <<-EOF &&
        $hello_sha1 blob $hello_size
@@ -449,13 +490,14 @@ test_expect_success '--batch-command with multiple command calls gives correct f
        deadbeef missing
        EOF
 
-       git cat-file --batch-command --buffer >actual_raw <<-EOF &&
-       contents $hello_sha1
-       contents $commit_sha1
-       contents $tag_sha1
-       contents deadbeef
-       flush
-       EOF
+       echo "$batch_command_multiple_contents" >in &&
+       git cat-file --batch-command --buffer <in >actual_raw &&
+
+       remove_timestamp <actual_raw >actual &&
+       test_cmp expect actual &&
+
+       echo "$batch_command_multiple_contents" | tr "\n" "\0" >in &&
+       git cat-file --batch-command --buffer -z <in >actual_raw &&
 
        remove_timestamp <actual_raw >actual &&
        test_cmp expect actual
index 63a553d7b32224550a48008772e396e88455af11..742f0fa909fd6e8a7fe2b376b3e5083a30d71c73 100755 (executable)
@@ -11,6 +11,7 @@ test_description='sparse checkout tests
   A    init.t
 '
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-read-tree.sh
 
@@ -53,6 +54,7 @@ test_expect_success 'read-tree without .git/info/sparse-checkout' '
 '
 
 test_expect_success 'read-tree with .git/info/sparse-checkout but disabled' '
+       mkdir .git/info &&
        echo >.git/info/sparse-checkout &&
        read_tree_u_must_succeed -m -u HEAD &&
        git ls-files -t >result &&
index 9fdbb2af80e0a82429289d06b24d4dcfac3f263d..45eef9457fe13d9eef6e463405190ca84c67bda5 100755 (executable)
@@ -6,6 +6,7 @@
 test_description='Try various core-level commands in subdirectory.
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-read-tree.sh
 
index 042b0e442929b427d48049610e89810a495903bb..f6709c9f569ec7170d24694b369abf0d6f8518ec 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='test conversion filters on large files'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 set_attr() {
index 5b8e47e346c710ac4ddb4592d707c9b9fdf358a7..35261afc9d6d02fe1d9d1572591406b293522fa8 100755 (executable)
@@ -139,4 +139,11 @@ test_expect_success 'internal tree objects are not "missing"' '
        )
 '
 
+test_expect_success 'partial clone of corrupted repository' '
+       test_config -C misnamed uploadpack.allowFilter true &&
+       git clone --no-local --no-checkout --filter=blob:none \
+               misnamed corrupt-partial && \
+       test_must_fail git -C corrupt-partial checkout --force
+'
+
 test_done
index d1833c0f31b467e1dc5173a7332bfbb16c5de05c..3a14218b245d4cf67f9bdde4c18a7eee8ac6524a 100755 (executable)
@@ -5,6 +5,7 @@ test_description='sparse checkout scope tests'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 test_expect_success 'setup' '
@@ -25,6 +26,7 @@ test_expect_success 'create feature branch' '
 
 test_expect_success 'perform sparse checkout of main' '
        git config --local --bool core.sparsecheckout true &&
+       mkdir .git/info &&
        echo "!/*" >.git/info/sparse-checkout &&
        echo "/a" >>.git/info/sparse-checkout &&
        echo "/c" >>.git/info/sparse-checkout &&
@@ -73,7 +75,7 @@ test_expect_success 'skip-worktree on files outside sparse patterns' '
 
 test_expect_success 'in partial clone, sparse checkout only fetches needed blobs' '
        test_create_repo server &&
-       git clone "file://$(pwd)/server" client &&
+       git clone --template= "file://$(pwd)/server" client &&
 
        test_config -C server uploadpack.allowfilter 1 &&
        test_config -C server uploadpack.allowanysha1inwant 1 &&
@@ -85,6 +87,7 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs
        git -C server commit -m message &&
 
        test_config -C client core.sparsecheckout 1 &&
+       mkdir client/.git/info &&
        echo "!/*" >client/.git/info/sparse-checkout &&
        echo "/a" >>client/.git/info/sparse-checkout &&
        git -C client fetch --filter=blob:none origin &&
index 5dced39889fff99335148bd00f900fab4038622d..b9350c075c2a02db00c154bb3522a087208cdc84 100755 (executable)
@@ -380,6 +380,15 @@ test_expect_success 'checkout with modified sparse directory' '
        test_all_match git checkout base
 '
 
+test_expect_success 'checkout orphan then non-orphan' '
+       init_repos &&
+
+       test_all_match git checkout --orphan test-orphan &&
+       test_all_match git status --porcelain=v2 &&
+       test_all_match git checkout base &&
+       test_all_match git status --porcelain=v2
+'
+
 test_expect_success 'add outside sparse cone' '
        init_repos &&
 
@@ -556,7 +565,7 @@ test_expect_success 'blame with pathspec inside sparse definition' '
                        deep/deeper1/a \
                        deep/deeper1/deepest/a
        do
-               test_all_match git blame $file
+               test_all_match git blame $file || return 1
        done
 '
 
@@ -567,7 +576,7 @@ test_expect_success 'blame with pathspec outside sparse definition' '
        init_repos &&
        test_sparse_match git sparse-checkout set &&
 
-       for file in \
+       for file in \
                        deep/a \
                        deep/deeper1/a \
                        deep/deeper1/deepest/a
@@ -579,7 +588,7 @@ test_expect_success 'blame with pathspec outside sparse definition' '
                # We compare sparse-checkout-err and sparse-index-err in
                # `test_sparse_match`. Given we know they are the same, we
                # only check the content of sparse-index-err here.
-               test_cmp expect sparse-index-err
+               test_cmp expect sparse-index-err || return 1
        done
 '
 
@@ -937,7 +946,7 @@ test_expect_success 'read-tree --prefix' '
        test_all_match git read-tree --prefix=deep/deeper1/deepest -u deepest &&
        test_all_match git status --porcelain=v2 &&
 
-       test_all_match git rm -rf --sparse folder1/ &&
+       run_on_all git rm -rf --sparse folder1/ &&
        test_all_match git read-tree --prefix=folder1/ -u update-folder1 &&
        test_all_match git status --porcelain=v2 &&
 
@@ -1365,10 +1374,14 @@ ensure_not_expanded () {
                shift &&
                test_must_fail env \
                        GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
-                       git -C sparse-index "$@" || return 1
+                       git -C sparse-index "$@" \
+                       >sparse-index-out \
+                       2>sparse-index-error || return 1
        else
                GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
-                       git -C sparse-index "$@" || return 1
+                       git -C sparse-index "$@" \
+                       >sparse-index-out \
+                       2>sparse-index-error || return 1
        fi &&
        test_region ! index ensure_full_index trace2.txt
 }
@@ -1567,7 +1580,7 @@ test_expect_success 'sparse index is not expanded: blame' '
                        deep/deeper1/a \
                        deep/deeper1/deepest/a
        do
-               ensure_not_expanded blame $file
+               ensure_not_expanded blame $file || return 1
        done
 '
 
@@ -1853,4 +1866,119 @@ test_expect_success 'checkout behaves oddly with df-conflict-2' '
        test_cmp full-checkout-err sparse-index-err
 '
 
+test_expect_success 'mv directory from out-of-cone to in-cone' '
+       init_repos &&
+
+       # <source> as a sparse directory (or SKIP_WORKTREE_DIR without enabling
+       # sparse index).
+       test_all_match git mv --sparse folder1 deep &&
+       test_all_match git status --porcelain=v2 &&
+       test_sparse_match git ls-files -t &&
+       git -C sparse-checkout ls-files -t >actual &&
+       grep -e "H deep/folder1/0/0/0" actual &&
+       grep -e "H deep/folder1/0/1" actual &&
+       grep -e "H deep/folder1/a" actual &&
+
+       test_all_match git reset --hard &&
+
+       # <source> as a directory deeper than sparse index boundary (where
+       # sparse index will expand).
+       test_sparse_match git mv --sparse folder1/0 deep &&
+       test_sparse_match git status --porcelain=v2 &&
+       test_sparse_match git ls-files -t &&
+       git -C sparse-checkout ls-files -t >actual &&
+       grep -e "H deep/0/0/0" actual &&
+       grep -e "H deep/0/1" actual
+'
+
+test_expect_success 'rm pathspec inside sparse definition' '
+       init_repos &&
+
+       test_all_match git rm deep/a &&
+       test_all_match git status --porcelain=v2 &&
+
+       # test wildcard
+       run_on_all git reset --hard &&
+       test_all_match git rm deep/* &&
+       test_all_match git status --porcelain=v2 &&
+
+       # test recursive rm
+       run_on_all git reset --hard &&
+       test_all_match git rm -r deep &&
+       test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'rm pathspec outside sparse definition' '
+       init_repos &&
+
+       for file in folder1/a folder1/0/1
+       do
+               test_sparse_match test_must_fail git rm $file &&
+               test_sparse_match test_must_fail git rm --cached $file &&
+               test_sparse_match git rm --sparse $file &&
+               test_sparse_match git status --porcelain=v2 || return 1
+       done &&
+
+       cat >folder1-full <<-EOF &&
+       rm ${SQ}folder1/0/0/0${SQ}
+       rm ${SQ}folder1/0/1${SQ}
+       rm ${SQ}folder1/a${SQ}
+       EOF
+
+       cat >folder1-sparse <<-EOF &&
+       rm ${SQ}folder1/${SQ}
+       EOF
+
+       # test wildcard
+       run_on_sparse git reset --hard &&
+       run_on_sparse git sparse-checkout reapply &&
+       test_sparse_match test_must_fail git rm folder1/* &&
+       run_on_sparse git rm --sparse folder1/* &&
+       test_cmp folder1-full sparse-checkout-out &&
+       test_cmp folder1-sparse sparse-index-out &&
+       test_sparse_match git status --porcelain=v2 &&
+
+       # test recursive rm
+       run_on_sparse git reset --hard &&
+       run_on_sparse git sparse-checkout reapply &&
+       test_sparse_match test_must_fail git rm --sparse folder1 &&
+       run_on_sparse git rm --sparse -r folder1 &&
+       test_cmp folder1-full sparse-checkout-out &&
+       test_cmp folder1-sparse sparse-index-out &&
+       test_sparse_match git status --porcelain=v2
+'
+
+test_expect_success 'rm pathspec expands index when necessary' '
+       init_repos &&
+
+       # in-cone pathspec (do not expand)
+       ensure_not_expanded rm "deep/deep*" &&
+       test_must_be_empty sparse-index-err &&
+
+       # out-of-cone pathspec (expand)
+       ! ensure_not_expanded rm --sparse "folder1/a*" &&
+       test_must_be_empty sparse-index-err &&
+
+       # pathspec that should expand index
+       ! ensure_not_expanded rm "*/a" &&
+       test_must_be_empty sparse-index-err &&
+
+       ! ensure_not_expanded rm "**a" &&
+       test_must_be_empty sparse-index-err
+'
+
+test_expect_success 'sparse index is not expanded: rm' '
+       init_repos &&
+
+       ensure_not_expanded rm deep/a &&
+
+       # test in-cone wildcard
+       git -C sparse-index reset --hard &&
+       ensure_not_expanded rm deep/* &&
+
+       # test recursive rm
+       git -C sparse-index reset --hard &&
+       ensure_not_expanded rm -r deep
+'
+
 test_done
index 84bf1970d8bfe7b13327fb0d66f691dcfa58cbb0..93a2f91f8a5181c5307c6de14ab6f2823d2b959a 100755 (executable)
@@ -48,7 +48,7 @@ done
 test_expect_success 'shared=all' '
        mkdir sub &&
        cd sub &&
-       git init --shared=all &&
+       git init --template= --shared=all &&
        test 2 = $(git config core.sharedrepository)
 '
 
@@ -57,6 +57,7 @@ test_expect_success POSIXPERM 'update-server-info honors core.sharedRepository'
        git add a1 &&
        test_tick &&
        git commit -m a1 &&
+       mkdir .git/info &&
        umask 0277 &&
        git update-server-info &&
        actual="$(ls -l .git/info/refs)" &&
index 9fb0b90f252aa17a04a52678eb48a3a9bf9883ee..0c204089b83595bc516e9c26416cd67191d3c083 100755 (executable)
@@ -165,4 +165,14 @@ test_expect_success 'symbolic-ref can resolve d/f name (ENOTDIR)' '
        test_cmp expect actual
 '
 
+test_expect_success 'symbolic-ref refuses invalid target for non-HEAD' '
+       test_must_fail git symbolic-ref refs/heads/invalid foo..bar
+'
+
+test_expect_success 'symbolic-ref allows top-level target for non-HEAD' '
+       git symbolic-ref refs/heads/top-level FETCH_HEAD &&
+       git update-ref FETCH_HEAD HEAD &&
+       test_cmp_rev top-level HEAD
+'
+
 test_done
index cabc516ae9a4fa1c404c245f4b3b9efd7e93200e..5ed9d7318e0cc97435952039c7e8c1af842a24a9 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='Test git check-ref-format'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 valid_ref() {
index 51f829162819740fea8b080db3be6b422452b366..e4627cf1b61f0b3128a79e28f15a9d5e5693e30c 100755 (executable)
@@ -5,6 +5,7 @@ test_description='test main ref store api'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 RUN="test-tool ref-store main"
index ad8006c81397336fc5919858d0b13ec4d564b723..05b1881c5911780b53a3d882dbe1a4dfae0034d8 100755 (executable)
@@ -5,6 +5,7 @@ test_description='test worktree ref store api'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 RWT="test-tool ref-store worktree:wt"
index d51ecd5e9250f004e0b0c894b308172237e5666c..2268bca3c11ac8e2d73e3a399b8debc44fc712cb 100755 (executable)
@@ -4,6 +4,7 @@ test_description='Test reflog display routines'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index 53c2aa10b72745f96c6619fc5f8e5c9f50a48d54..ace4556788de761e225567787bc34ac4e6dc18bf 100755 (executable)
@@ -507,6 +507,54 @@ test_expect_success 'rev-list --verify-objects with bad sha1' '
        test_i18ngrep -q "error: hash mismatch $(dirname $new)$(test_oid ff_2)" out
 '
 
+# An actual bit corruption is more likely than swapped commits, but
+# this provides an easy way to have commits which don't match their purported
+# hashes, but which aren't so broken we can't read them at all.
+test_expect_success 'rev-list --verify-objects notices swapped commits' '
+       git init swapped-commits &&
+       (
+               cd swapped-commits &&
+               test_commit one &&
+               test_commit two &&
+               one_oid=$(git rev-parse HEAD) &&
+               two_oid=$(git rev-parse HEAD^) &&
+               one=.git/objects/$(test_oid_to_path $one_oid) &&
+               two=.git/objects/$(test_oid_to_path $two_oid) &&
+               mv $one tmp &&
+               mv $two $one &&
+               mv tmp $two &&
+               test_must_fail git rev-list --verify-objects HEAD
+       )
+'
+
+test_expect_success 'set up repository with commit-graph' '
+       git init corrupt-graph &&
+       (
+               cd corrupt-graph &&
+               test_commit one &&
+               test_commit two &&
+               git commit-graph write --reachable
+       )
+'
+
+corrupt_graph_obj () {
+       oid=$(git -C corrupt-graph rev-parse "$1") &&
+       obj=corrupt-graph/.git/objects/$(test_oid_to_path $oid) &&
+       test_when_finished 'mv backup $obj' &&
+       mv $obj backup &&
+       echo garbage >$obj
+}
+
+test_expect_success 'rev-list --verify-objects with commit graph (tip)' '
+       corrupt_graph_obj HEAD &&
+       test_must_fail git -C corrupt-graph rev-list --verify-objects HEAD
+'
+
+test_expect_success 'rev-list --verify-objects with commit graph (parent)' '
+       corrupt_graph_obj HEAD^ &&
+       test_must_fail git -C corrupt-graph rev-list --verify-objects HEAD
+'
+
 test_expect_success 'force fsck to ignore double author' '
        git cat-file commit HEAD >basis &&
        sed "s/^author .*/&,&/" <basis | tr , \\n >multiple-authors &&
index 1c2df08333bc600e4fc22f1f55c1d6950b876d67..0e13bcb4ebbf703337bda31e5714620dcc2cf636 100755 (executable)
@@ -4,6 +4,7 @@ test_description='test git rev-parse'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_one () {
index 284fe18e7262ae9198d61a34142aa2e82696ce2a..de1d48f3ba29116d073b0a5573a2031fae7fcc1d 100755 (executable)
@@ -306,6 +306,13 @@ test_expect_success 'test --parseopt help output: "wrapped" options normal "or:"
        test_cmp expect actual
 '
 
+test_expect_success 'test --parseopt invalid opt-spec' '
+       test_write_lines x -- "=, x" >spec &&
+       echo "fatal: missing opt-spec before option flags" >expect &&
+       test_must_fail git rev-parse --parseopt -- >out <spec 2>err &&
+       test_cmp expect err
+'
+
 test_expect_success 'test --parseopt help output: multi-line blurb after empty line' '
        sed -e "s/^|//" >spec <<-\EOF &&
        |cmd [--some-option]
index ba43168d1237ad5216e3ba5ed22069323fbf93b0..bc136833c1098d1c4e43d4b05cfadc7bef50b24c 100755 (executable)
@@ -132,7 +132,7 @@ test_expect_success 'use --default' '
        test_must_fail git rev-parse --verify --default bar
 '
 
-test_expect_success !SANITIZE_LEAK 'main@{n} for various n' '
+test_expect_success 'main@{n} for various n' '
        git reflog >out &&
        N=$(wc -l <out) &&
        Nm1=$(($N-1)) &&
index 5dc221ef382df13089b84dcb03002c2a8f19a9ba..d8fa489998acc5984da7e21a8a2adf43cfe88cee 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='racy split index'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index 7705e3a31708355c05271aa9d5eafa5af359043f..5d119871d416cd4e79b3bff7c56aeac729b276e6 100755 (executable)
@@ -3,6 +3,7 @@
 test_description='basic checkout-index tests
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'checkout-index --gobbledegook' '
index 52e51b0726f39c3e7ffbedad5a77484b8bec6070..771c3c3c50e60583e3ba8dffbb62f47a7619f35b 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='checkout'
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 # Arguments: [!] <branch> <oid> [<checkout options>]
@@ -257,11 +258,12 @@ test_expect_success 'checkout -b to a new branch preserves mergeable changes des
                git checkout branch1-scratch &&
                test_might_fail git branch -D branch3 &&
                git config core.sparseCheckout false &&
-               rm .git/info/sparse-checkout" &&
+               rm -rf .git/info" &&
 
        test_commit file2 &&
 
        echo stuff >>file1 &&
+       mkdir .git/info &&
        echo file2 >.git/info/sparse-checkout &&
        git config core.sparseCheckout true &&
 
index bc46713a43e24193bae9a4be204455eb5c066501..2eab6474f8d0f4b3116455c95cb71cd69f78a311 100755 (executable)
@@ -4,6 +4,7 @@ test_description='checkout into detached HEAD state'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 check_detached () {
index 7b327b754494a8ebb27bb11c7f70649063ccd8d2..81e772fb4ebbf1d6c4d156560aa10dfdb02a0852 100755 (executable)
@@ -7,6 +7,7 @@ Ensures that checkout -m on a resolved file restores the conflicted file'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index c683e60007219807b18bfb42bac83c297cdc8234..00ce3033d3489285b8580d6e23f3eeb24074862f 100755 (executable)
@@ -230,12 +230,9 @@ test_expect_success SYMLINKS 'parallel checkout checks for symlinks in leading d
 # check the final report including sequential, parallel, and delayed entries
 # all at the same time. So we must have finer control of the parallel checkout
 # variables.
-test_expect_success PERL '"git checkout ." report should not include failed entries' '
-       write_script rot13-filter.pl "$PERL_PATH" \
-               <"$TEST_DIRECTORY"/t0021/rot13-filter.pl &&
-
+test_expect_success '"git checkout ." report should not include failed entries' '
        test_config_global filter.delay.process \
-               "\"$(pwd)/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+               "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
        test_config_global filter.delay.required true &&
        test_config_global filter.cat.clean cat  &&
        test_config_global filter.cat.smudge cat  &&
index 252545796182e3e4c469284f1a99a8691a0812a8..f3511cd43a9ddc929c886ef7c44f4e4704b6c367 100755 (executable)
@@ -138,12 +138,9 @@ test_expect_success 'parallel-checkout and external filter' '
 # The delayed queue is independent from the parallel queue, and they should be
 # able to work together in the same checkout process.
 #
-test_expect_success PERL 'parallel-checkout and delayed checkout' '
-       write_script rot13-filter.pl "$PERL_PATH" \
-               <"$TEST_DIRECTORY"/t0021/rot13-filter.pl &&
-
+test_expect_success 'parallel-checkout and delayed checkout' '
        test_config_global filter.delay.process \
-               "\"$(pwd)/rot13-filter.pl\" --always-delay \"$(pwd)/delayed.log\" clean smudge delay" &&
+               "test-tool rot13-filter --always-delay --log=\"$(pwd)/delayed.log\" clean smudge delay" &&
        test_config_global filter.delay.required true &&
 
        echo "abcd" >original &&
index 43d950de6400fca709286c5316e71e037791f04f..98265ba1b495eb91df0fa60467ebf0ac932cf1b5 100755 (executable)
@@ -17,6 +17,7 @@ outside the repository.  Two instances for which this can occur are tested:
           repository can be added to the index.
        '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success '1a: setup--config worktree' '
index 2f564d533d05965fff88f51bc0f1280abae43518..f3242fef6b65772ea973a1906210950cc0411f73 100755 (executable)
@@ -5,6 +5,7 @@ test_description='test git worktree add'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 . "$TEST_DIRECTORY"/lib-rebase.sh
@@ -229,6 +230,7 @@ test_expect_success 'checkout with grafts' '
        SHA1=$(git rev-parse HEAD) &&
        test_commit def &&
        test_commit xyz &&
+       mkdir .git/info &&
        echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
        cat >expected <<-\EOF &&
        xyz
@@ -559,6 +561,8 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' '
 '
 
 post_checkout_hook () {
+       test_when_finished "rm -rf .git/hooks" &&
+       mkdir .git/hooks &&
        test_hook -C "$1" post-checkout <<-\EOF
        {
                echo $*
index a4e1a178e0a00335affa95d566728e3085804b47..1168e9f998232ccfa3d4c7c2e875b95299f23ead 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='test git worktree move, remove, lock and unlock'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
diff --git a/t/t2407-worktree-heads.sh b/t/t2407-worktree-heads.sh
new file mode 100755 (executable)
index 0000000..019a40d
--- /dev/null
@@ -0,0 +1,180 @@
+#!/bin/sh
+
+test_description='test operations trying to overwrite refs at worktree HEAD'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+       test_commit init &&
+
+       for i in 1 2 3 4
+       do
+               git checkout -b conflict-$i &&
+               echo "not I" >$i.t &&
+               git add $i.t &&
+               git commit -m "will conflict" &&
+
+               git checkout - &&
+               test_commit $i &&
+               git branch wt-$i &&
+               git branch fake-$i &&
+               git worktree add wt-$i wt-$i || return 1
+       done &&
+
+       # Create a server that updates each branch by one commit
+       git init server &&
+       test_commit -C server initial &&
+       git remote add server ./server &&
+       for i in 1 2 3 4
+       do
+               git -C server checkout -b wt-$i &&
+               test_commit -C server A-$i || return 1
+       done &&
+       for i in 1 2
+       do
+               git -C server checkout -b fake-$i &&
+               test_commit -C server f-$i || return 1
+       done
+'
+
+test_expect_success 'refuse to overwrite: checked out in worktree' '
+       for i in 1 2 3 4
+       do
+               test_must_fail git branch -f wt-$i HEAD 2>err &&
+               grep "cannot force update the branch" err &&
+
+               test_must_fail git branch -D wt-$i 2>err &&
+               grep "Cannot delete branch" err || return 1
+       done
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in bisect' '
+       test_when_finished git -C wt-4 bisect reset &&
+
+       # Set up a bisect so HEAD no longer points to wt-4.
+       git -C wt-4 bisect start &&
+       git -C wt-4 bisect bad wt-4 &&
+       git -C wt-4 bisect good wt-1 &&
+
+       test_must_fail git branch -f wt-4 HEAD 2>err &&
+       grep "cannot force update the branch '\''wt-4'\'' checked out at.*wt-4" err
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase (apply)' '
+       test_when_finished git -C wt-2 rebase --abort &&
+
+       # This will fail part-way through due to a conflict.
+       test_must_fail git -C wt-2 rebase --apply conflict-2 &&
+
+       test_must_fail git branch -f wt-2 HEAD 2>err &&
+       grep "cannot force update the branch '\''wt-2'\'' checked out at.*wt-2" err
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase (merge)' '
+       test_when_finished git -C wt-2 rebase --abort &&
+
+       # This will fail part-way through due to a conflict.
+       test_must_fail git -C wt-2 rebase conflict-2 &&
+
+       test_must_fail git branch -f wt-2 HEAD 2>err &&
+       grep "cannot force update the branch '\''wt-2'\'' checked out at.*wt-2" err
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase with --update-refs' '
+       test_when_finished git -C wt-3 rebase --abort &&
+
+       git branch -f can-be-updated wt-3 &&
+       test_must_fail git -C wt-3 rebase --update-refs conflict-3 &&
+
+       for i in 3 4
+       do
+               test_must_fail git branch -f can-be-updated HEAD 2>err &&
+               grep "cannot force update the branch '\''can-be-updated'\'' checked out at.*wt-3" err ||
+                       return 1
+       done
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: checked out' '
+       test_must_fail git fetch server +refs/heads/wt-3:refs/heads/wt-3 2>err &&
+       grep "refusing to fetch into branch '\''refs/heads/wt-3'\''" err &&
+
+       # General fetch into refs/heads/ will fail on first ref,
+       # so use a generic error message check.
+       test_must_fail git fetch server +refs/heads/*:refs/heads/* 2>err &&
+       grep "refusing to fetch into branch" err
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: worktree in bisect' '
+       test_when_finished git -C wt-4 bisect reset &&
+
+       # Set up a bisect so HEAD no longer points to wt-4.
+       git -C wt-4 bisect start &&
+       git -C wt-4 bisect bad wt-4 &&
+       git -C wt-4 bisect good wt-1 &&
+
+       test_must_fail git fetch server +refs/heads/wt-4:refs/heads/wt-4 2>err &&
+       grep "refusing to fetch into branch" err
+'
+
+test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: worktree in rebase' '
+       test_when_finished git -C wt-3 rebase --abort &&
+
+       # This will fail part-way through due to a conflict.
+       test_must_fail git -C wt-3 rebase conflict-3 &&
+
+       test_must_fail git fetch server +refs/heads/wt-3:refs/heads/wt-3 2>err &&
+       grep "refusing to fetch into branch" err
+'
+
+test_expect_success 'refuse to overwrite when in error states' '
+       test_when_finished rm -rf .git/worktrees/wt-*/rebase-merge &&
+       test_when_finished rm -rf .git/worktrees/wt-*/BISECT_* &&
+
+       # Both branches are currently under rebase.
+       mkdir -p .git/worktrees/wt-3/rebase-merge &&
+       touch .git/worktrees/wt-3/rebase-merge/interactive &&
+       echo refs/heads/fake-1 >.git/worktrees/wt-3/rebase-merge/head-name &&
+       echo refs/heads/fake-2 >.git/worktrees/wt-3/rebase-merge/onto &&
+       mkdir -p .git/worktrees/wt-4/rebase-merge &&
+       touch .git/worktrees/wt-4/rebase-merge/interactive &&
+       echo refs/heads/fake-2 >.git/worktrees/wt-4/rebase-merge/head-name &&
+       echo refs/heads/fake-1 >.git/worktrees/wt-4/rebase-merge/onto &&
+
+       # Both branches are currently under bisect.
+       touch .git/worktrees/wt-4/BISECT_LOG &&
+       echo refs/heads/fake-2 >.git/worktrees/wt-4/BISECT_START &&
+       touch .git/worktrees/wt-1/BISECT_LOG &&
+       echo refs/heads/fake-1 >.git/worktrees/wt-1/BISECT_START &&
+
+       for i in 1 2
+       do
+               test_must_fail git branch -f fake-$i HEAD 2>err &&
+               grep "cannot force update the branch '\''fake-$i'\'' checked out at" err ||
+                       return 1
+       done
+'
+
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_expect_success !SANITIZE_LEAK 'refuse to overwrite during rebase with --update-refs' '
+       git commit --fixup HEAD~2 --allow-empty &&
+       (
+               set_cat_todo_editor &&
+               test_must_fail git rebase -i --update-refs HEAD~3 >todo &&
+               ! grep "update-refs" todo
+       ) &&
+       git branch -f allow-update HEAD~2 &&
+       (
+               set_cat_todo_editor &&
+               test_must_fail git rebase -i --update-refs HEAD~3 >todo &&
+               grep "update-ref refs/heads/allow-update" todo
+       )
+'
+
+# This must be the last test in this file
+test_expect_success '$EDITOR and friends are unchanged' '
+       test_editor_unchanged
+'
+
+test_done
index e07ac6c6dce93f50ff1900fc42e151f5370935a4..1ed0aa967ece5a38cf49bc43178e4fba187d4cd4 100755 (executable)
@@ -103,7 +103,7 @@ test_expect_success 'git ls-files --others with various exclude options.' '
        test_cmp expect output
 '
 
-test_expect_success !SANITIZE_LEAK 'restore gitignore' '
+test_expect_success 'restore gitignore' '
        git checkout --ignore-skip-worktree-bits $allignores &&
        rm .git/index
 '
@@ -126,7 +126,7 @@ cat > expect << EOF
 #      three/
 EOF
 
-test_expect_success !SANITIZE_LEAK 'git status honors core.excludesfile' \
+test_expect_success 'git status honors core.excludesfile' \
        'test_cmp expect output'
 
 test_expect_success 'trailing slash in exclude allows directory match(1)' '
index 2682b1f43a666564a6f74bd20deca547f721ac34..190e2f6eed758229579a85118106e80d05c8d7d8 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git ls-files --deduplicate test'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
diff --git a/t/t3013-ls-files-format.sh b/t/t3013-ls-files-format.sh
new file mode 100755 (executable)
index 0000000..efb7450
--- /dev/null
@@ -0,0 +1,95 @@
+#!/bin/sh
+
+test_description='git ls-files --format test'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+for flag in -s -o -k -t --resolve-undo --deduplicate --eol
+do
+       test_expect_success "usage: --format is incompatible with $flag" '
+               test_expect_code 129 git ls-files --format="%(objectname)" $flag
+       '
+done
+
+test_expect_success 'setup' '
+       printf "LINEONE\nLINETWO\nLINETHREE\n" >o1.txt &&
+       printf "LINEONE\r\nLINETWO\r\nLINETHREE\r\n" >o2.txt &&
+       printf "LINEONE\r\nLINETWO\nLINETHREE\n" >o3.txt &&
+       git add o?.txt &&
+       oid=$(git hash-object o1.txt) &&
+       git update-index --add --cacheinfo 120000 $oid o4.txt &&
+       git update-index --add --cacheinfo 160000 $oid o5.txt &&
+       git update-index --add --cacheinfo 100755 $oid o6.txt &&
+       git commit -m base
+'
+
+test_expect_success 'git ls-files --format objectmode v.s. -s' '
+       git ls-files -s >files &&
+       cut -d" " -f1 files >expect &&
+       git ls-files --format="%(objectmode)" >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format objectname v.s. -s' '
+       git ls-files -s >files &&
+       cut -d" " -f2 files >expect &&
+       git ls-files --format="%(objectname)" >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format v.s. --eol' '
+       git ls-files --eol >tmp &&
+       sed -e "s/      / /g" -e "s/  */ /g" tmp >expect 2>err &&
+       test_must_be_empty err &&
+       git ls-files --format="i/%(eolinfo:index) w/%(eolinfo:worktree) attr/%(eolattr) %(path)" >actual 2>err &&
+       test_must_be_empty err &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format path v.s. -s' '
+       git ls-files -s >files &&
+       cut -f2 files >expect &&
+       git ls-files --format="%(path)" >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format with -m' '
+       echo change >o1.txt &&
+       cat >expect <<-\EOF &&
+       o1.txt
+       o4.txt
+       o5.txt
+       o6.txt
+       EOF
+       git ls-files --format="%(path)" -m >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format with -d' '
+       echo o7 >o7.txt &&
+       git add o7.txt &&
+       rm o7.txt &&
+       cat >expect <<-\EOF &&
+       o4.txt
+       o5.txt
+       o6.txt
+       o7.txt
+       EOF
+       git ls-files --format="%(path)" -d >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format v.s -s' '
+       git ls-files --stage >expect &&
+       git ls-files --format="%(objectmode) %(objectname) %(stage)%x09%(path)" >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git ls-files --format with --debug' '
+       git ls-files --debug >expect &&
+       git ls-files --format="%(path)" --debug >actual &&
+       test_cmp expect actual
+'
+
+test_done
index d12e4e4cc6c64073fcef4e71ba6a9c52ce8af1dc..459beaf7d9cec09cfbc617d6461eacd1d5917dff 100755 (executable)
@@ -162,7 +162,7 @@ test_expect_success 'A^! and A^-<n> (unmodified)' '
 '
 
 test_expect_success 'A^{/..} is not mistaken for a range' '
-       test_must_fail git range-diff topic^.. topic^{/..} 2>error &&
+       test_must_fail git range-diff topic^.. topic^{/..} -- 2>error &&
        test_i18ngrep "not a commit range" error
 '
 
@@ -772,6 +772,17 @@ test_expect_success '--left-only/--right-only' '
        test_cmp expect actual
 '
 
+test_expect_success 'ranges with pathspecs' '
+       git range-diff topic...mode-only-change -- other-file >actual &&
+       test_line_count = 2 actual &&
+       topic_oid=$(git rev-parse --short topic) &&
+       mode_change_oid=$(git rev-parse --short mode-only-change^) &&
+       file_change_oid=$(git rev-parse --short mode-only-change) &&
+       grep "$mode_change_oid" actual &&
+       ! grep "$file_change_oid" actual &&
+       ! grep "$topic_oid" actual
+'
+
 test_expect_success 'submodule changes are shown irrespective of diff.submodule' '
        git init sub-repo &&
        test_commit -C sub-repo sub-first &&
index d742be8840276a52ef5887659d5405e1f9b08404..3288aaec7dc9362dc923b963e31ccd05005ba351 100755 (executable)
@@ -505,6 +505,11 @@ test_expect_success 'list notes with "git notes"' '
        test_cmp expect actual
 '
 
+test_expect_success '"git notes" without subcommand does not take arguments' '
+       test_expect_code 129 git notes HEAD^^ 2>err &&
+       grep "^error: unknown subcommand" err
+'
+
 test_expect_success 'list specific note with "git notes list <object>"' '
        git rev-parse refs/notes/commits:$commit_3 >expect &&
        git notes list HEAD^^ >actual &&
index 03dfcd3954cee5402dde186c6edd17b47db211dd..2c3a2452668c514ce40c107537cc4e0c1abe5312 100755 (executable)
@@ -5,6 +5,7 @@ test_description='Test notes trees that also contain non-notes'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 number_of_commits=100
index 64a9915761a82a6ea8d640dabfe3811db6450a5a..22ffe5bcb9908d914585dbc90e59194023bd9ff5 100755 (executable)
@@ -51,7 +51,7 @@ test_expect_success 'creating many notes with git-notes' '
        done
 '
 
-test_expect_success !SANITIZE_LEAK 'many notes created correctly with git-notes' '
+test_expect_success 'many notes created correctly with git-notes' '
        git log >output.raw &&
        grep "^    " output.raw >output &&
        i=$num_notes &&
index 1aa366a410e9a3e2ad4c2fa84431198fbb553a5f..ae316502c4531b7cdadfddff12e2f95ea7c9797c 100755 (executable)
@@ -4,6 +4,7 @@ test_description='Examples from the git-notes man page
 
 Make sure the manual is not full of lies.'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index f31afd4a54754a982e26b78271317ea61a3b9cda..688b01e3eb6a387c0d9ad36031dd8beab46d9419 100755 (executable)
@@ -1743,6 +1743,279 @@ test_expect_success 'ORIG_HEAD is updated correctly' '
        test_cmp_rev ORIG_HEAD test-orig-head@{1}
 '
 
+test_expect_success '--update-refs adds label and update-ref commands' '
+       git checkout -b update-refs no-conflict-branch &&
+       git branch -f base HEAD~4 &&
+       git branch -f first HEAD~3 &&
+       git branch -f second HEAD~3 &&
+       git branch -f third HEAD~1 &&
+       git commit --allow-empty --fixup=third &&
+       git branch -f is-not-reordered &&
+       git commit --allow-empty --fixup=HEAD~4 &&
+       git branch -f shared-tip &&
+       (
+               set_cat_todo_editor &&
+
+               cat >expect <<-EOF &&
+               pick $(git log -1 --format=%h J) J
+               fixup $(git log -1 --format=%h update-refs) fixup! J # empty
+               update-ref refs/heads/second
+               update-ref refs/heads/first
+               pick $(git log -1 --format=%h K) K
+               pick $(git log -1 --format=%h L) L
+               fixup $(git log -1 --format=%h is-not-reordered) fixup! L # empty
+               update-ref refs/heads/third
+               pick $(git log -1 --format=%h M) M
+               update-ref refs/heads/no-conflict-branch
+               update-ref refs/heads/is-not-reordered
+               update-ref refs/heads/shared-tip
+               EOF
+
+               test_must_fail git rebase -i --autosquash --update-refs primary >todo &&
+               test_cmp expect todo &&
+
+               test_must_fail git -c rebase.autosquash=true \
+                                  -c rebase.updaterefs=true \
+                                  rebase -i primary >todo &&
+
+               test_cmp expect todo
+       )
+'
+
+test_expect_success '--update-refs adds commands with --rebase-merges' '
+       git checkout -b update-refs-with-merge no-conflict-branch &&
+       git branch -f base HEAD~4 &&
+       git branch -f first HEAD~3 &&
+       git branch -f second HEAD~3 &&
+       git branch -f third HEAD~1 &&
+       git merge -m merge branch2 &&
+       git branch -f merge-branch &&
+       git commit --fixup=third --allow-empty &&
+       (
+               set_cat_todo_editor &&
+
+               cat >expect <<-EOF &&
+               label onto
+               reset onto
+               pick $(git log -1 --format=%h branch2~1) F
+               pick $(git log -1 --format=%h branch2) I
+               update-ref refs/heads/branch2
+               label merge
+               reset onto
+               pick $(git log -1 --format=%h refs/heads/second) J
+               update-ref refs/heads/second
+               update-ref refs/heads/first
+               pick $(git log -1 --format=%h refs/heads/third~1) K
+               pick $(git log -1 --format=%h refs/heads/third) L
+               fixup $(git log -1 --format=%h update-refs-with-merge) fixup! L # empty
+               update-ref refs/heads/third
+               pick $(git log -1 --format=%h HEAD~2) M
+               update-ref refs/heads/no-conflict-branch
+               merge -C $(git log -1 --format=%h HEAD~1) merge # merge
+               update-ref refs/heads/merge-branch
+               EOF
+
+               test_must_fail git rebase -i --autosquash \
+                                  --rebase-merges=rebase-cousins \
+                                  --update-refs primary >todo &&
+
+               test_cmp expect todo &&
+
+               test_must_fail git -c rebase.autosquash=true \
+                                  -c rebase.updaterefs=true \
+                                  rebase -i \
+                                  --rebase-merges=rebase-cousins \
+                                  primary >todo &&
+
+               test_cmp expect todo
+       )
+'
+
+test_expect_success '--update-refs updates refs correctly' '
+       git checkout -B update-refs no-conflict-branch &&
+       git branch -f base HEAD~4 &&
+       git branch -f first HEAD~3 &&
+       git branch -f second HEAD~3 &&
+       git branch -f third HEAD~1 &&
+       test_commit extra2 fileX &&
+       git commit --amend --fixup=L &&
+
+       git rebase -i --autosquash --update-refs primary 2>err &&
+
+       test_cmp_rev HEAD~3 refs/heads/first &&
+       test_cmp_rev HEAD~3 refs/heads/second &&
+       test_cmp_rev HEAD~1 refs/heads/third &&
+       test_cmp_rev HEAD refs/heads/no-conflict-branch &&
+
+       cat >expect <<-\EOF &&
+       Successfully rebased and updated refs/heads/update-refs.
+       Updated the following refs with --update-refs:
+               refs/heads/first
+               refs/heads/no-conflict-branch
+               refs/heads/second
+               refs/heads/third
+       EOF
+
+       # Clear "Rebasing (X/Y)" progress lines and drop leading tabs.
+       sed -e "s/Rebasing.*Successfully/Successfully/g" -e "s/^\t//g" \
+               <err >err.trimmed &&
+       test_cmp expect err.trimmed
+'
+
+test_expect_success 'respect user edits to update-ref steps' '
+       git checkout -B update-refs-break no-conflict-branch &&
+       git branch -f base HEAD~4 &&
+       git branch -f first HEAD~3 &&
+       git branch -f second HEAD~3 &&
+       git branch -f third HEAD~1 &&
+       git branch -f unseen base &&
+
+       # First, we will add breaks to the expected todo file
+       cat >fake-todo-1 <<-EOF &&
+       pick $(git rev-parse HEAD~3)
+       break
+       update-ref refs/heads/second
+       update-ref refs/heads/first
+
+       pick $(git rev-parse HEAD~2)
+       pick $(git rev-parse HEAD~1)
+       update-ref refs/heads/third
+
+       pick $(git rev-parse HEAD)
+       update-ref refs/heads/no-conflict-branch
+       EOF
+
+       # Second, we will drop some update-refs commands (and move one)
+       cat >fake-todo-2 <<-EOF &&
+       update-ref refs/heads/second
+
+       pick $(git rev-parse HEAD~2)
+       update-ref refs/heads/third
+       pick $(git rev-parse HEAD~1)
+       break
+
+       pick $(git rev-parse HEAD)
+       EOF
+
+       # Third, we will:
+       # * insert a new one (new-branch),
+       # * re-add an old one (first), and
+       # * add a second instance of a previously-stored one (second)
+       cat >fake-todo-3 <<-EOF &&
+       update-ref refs/heads/unseen
+       update-ref refs/heads/new-branch
+       pick $(git rev-parse HEAD)
+       update-ref refs/heads/first
+       update-ref refs/heads/second
+       EOF
+
+       (
+               set_replace_editor fake-todo-1 &&
+               git rebase -i --update-refs primary &&
+
+               # These branches are currently locked.
+               for b in first second third no-conflict-branch
+               do
+                       test_must_fail git branch -f $b base || return 1
+               done &&
+
+               set_replace_editor fake-todo-2 &&
+               git rebase --edit-todo &&
+
+               # These branches are currently locked.
+               for b in second third
+               do
+                       test_must_fail git branch -f $b base || return 1
+               done &&
+
+               # These branches are currently unlocked for checkout.
+               for b in first no-conflict-branch
+               do
+                       git worktree add wt-$b $b &&
+                       git worktree remove wt-$b || return 1
+               done &&
+
+               git rebase --continue &&
+
+               set_replace_editor fake-todo-3 &&
+               git rebase --edit-todo &&
+
+               # These branches are currently locked.
+               for b in second third first unseen
+               do
+                       test_must_fail git branch -f $b base || return 1
+               done &&
+
+               # These branches are currently unlocked for checkout.
+               for b in no-conflict-branch
+               do
+                       git worktree add wt-$b $b &&
+                       git worktree remove wt-$b || return 1
+               done &&
+
+               git rebase --continue
+       ) &&
+
+       test_cmp_rev HEAD~2 refs/heads/third &&
+       test_cmp_rev HEAD~1 refs/heads/unseen &&
+       test_cmp_rev HEAD~1 refs/heads/new-branch &&
+       test_cmp_rev HEAD refs/heads/first &&
+       test_cmp_rev HEAD refs/heads/second &&
+       test_cmp_rev HEAD refs/heads/no-conflict-branch
+'
+
+test_expect_success '--update-refs: check failed ref update' '
+       git checkout -B update-refs-error no-conflict-branch &&
+       git branch -f base HEAD~4 &&
+       git branch -f first HEAD~3 &&
+       git branch -f second HEAD~2 &&
+       git branch -f third HEAD~1 &&
+
+       cat >fake-todo <<-EOF &&
+       pick $(git rev-parse HEAD~3)
+       break
+       update-ref refs/heads/first
+
+       pick $(git rev-parse HEAD~2)
+       update-ref refs/heads/second
+
+       pick $(git rev-parse HEAD~1)
+       update-ref refs/heads/third
+
+       pick $(git rev-parse HEAD)
+       update-ref refs/heads/no-conflict-branch
+       EOF
+
+       (
+               set_replace_editor fake-todo &&
+               git rebase -i --update-refs base
+       ) &&
+
+       # At this point, the values of first, second, and third are
+       # recorded in the update-refs file. We will force-update the
+       # "second" ref, but "git branch -f" will not work because of
+       # the lock in the update-refs file.
+       git rev-parse third >.git/refs/heads/second &&
+
+       test_must_fail git rebase --continue 2>err &&
+       grep "update_ref failed for ref '\''refs/heads/second'\''" err &&
+
+       cat >expect <<-\EOF &&
+       Updated the following refs with --update-refs:
+               refs/heads/first
+               refs/heads/no-conflict-branch
+               refs/heads/third
+       Failed to update the following refs with --update-refs:
+               refs/heads/second
+       EOF
+
+       # Clear "Rebasing (X/Y)" progress lines and drop leading tabs.
+       tail -n 6 err >err.last &&
+       sed -e "s/Rebasing.*Successfully/Successfully/g" -e "s/^\t//g" \
+               <err.last >err.trimmed &&
+       test_cmp expect err.trimmed
+'
+
 # This must be the last test in this file
 test_expect_success '$EDITOR and friends are unchanged' '
        test_editor_unchanged
index 0ad3a07bf470939a8fa53f38e83fa609f2d7371f..7a9f1127a4b974aea5c7390447b44f1798ba20ab 100755 (executable)
@@ -35,6 +35,7 @@ git_rebase_interactive () {
        ls -1pR * >>actual &&
        test_cmp expect actual &&
        set_fake_editor &&
+       mkdir .git/info &&
        echo "fake-editor.sh" >.git/info/exclude &&
        may_only_be_test_must_fail "$2" &&
        $2 git rebase -i "$1"
index 979e843c65a97cf914e9f8f966893d2c965a9052..f32799e04633fd831293d5b64106f9c856d3d407 100755 (executable)
@@ -12,6 +12,7 @@ test_description='test cherry-pick and revert with conflicts
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 pristine_detach () {
@@ -558,6 +559,7 @@ test_expect_success 'cherry-pick preserves sparse-checkout' '
                echo \"/*\" >.git/info/sparse-checkout
                git read-tree --reset -u HEAD
                rm .git/info/sparse-checkout" &&
+       mkdir .git/info &&
        echo /unrelated >.git/info/sparse-checkout &&
        git read-tree --reset -u HEAD &&
        test_must_fail git cherry-pick -Xours picked>actual &&
index b354fb39de839aba1506693ee4a0cd7d4967d656..5841f280fb2d4c3185e0a42b9d9a894d72cc4e79 100755 (executable)
@@ -7,9 +7,9 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-terminal.sh
 
-if ! test_have_prereq PERL
+if test_have_prereq !ADD_I_USE_BUILTIN,!PERL
 then
-       skip_all='skipping add -i tests, perl not available'
+       skip_all='skipping add -i (scripted) tests, perl not available'
        test_done
 fi
 
@@ -761,9 +761,33 @@ test_expect_success 'detect bogus diffFilter output' '
        git reset --hard &&
 
        echo content >test &&
-       test_config interactive.diffFilter "sed 1d" &&
+       test_config interactive.diffFilter "sed 6d" &&
        printf y >y &&
-       force_color test_must_fail git add -p <y
+       force_color test_must_fail git add -p <y >output 2>&1 &&
+       grep "mismatched output" output
+'
+
+test_expect_success 'handle iffy colored hunk headers' '
+       git reset --hard &&
+
+       echo content >test &&
+       printf n >n &&
+       force_color git -c interactive.diffFilter="sed s/.*@@.*/XX/" \
+               add -p >output 2>&1 <n &&
+       grep "^XX$" output
+'
+
+test_expect_success 'handle very large filtered diff' '
+       git reset --hard &&
+       # The specific number here is not important, but it must
+       # be large enough that the output of "git diff --color"
+       # fills up the pipe buffer. 10,000 results in ~200k of
+       # colored output.
+       test_seq 10000 >test &&
+       test_config interactive.diffFilter cat &&
+       printf y >y &&
+       force_color git add -p >output 2>&1 <y &&
+       git diff-files --exit-code -- test
 '
 
 test_expect_success 'diff.algorithm is passed to `git diff-files`' '
@@ -931,6 +955,18 @@ test_expect_success 'status ignores dirty submodules (except HEAD)' '
        ! grep dirty-otherwise output
 '
 
+test_expect_success 'handle submodules' '
+       echo 123 >>for-submodules/dirty-otherwise/initial.t &&
+
+       force_color git -C for-submodules add -p dirty-otherwise >output 2>&1 &&
+       grep "No changes" output &&
+
+       force_color git -C for-submodules add -p dirty-head >output 2>&1 <y &&
+       git -C for-submodules ls-files --stage dirty-head >actual &&
+       rev="$(git -C for-submodules/dirty-head rev-parse HEAD)" &&
+       grep "$rev" actual
+'
+
 test_expect_success 'set up pathological context' '
        git reset --hard &&
        test_write_lines a a a a a a a a a a a >a &&
index 2a4c3fd61c000d2278d362a34ae069760709172d..376cc8f4ab8429b0488ad23b0f9731c9af237124 100755 (executable)
@@ -25,7 +25,7 @@ test_expect_success 'usage on main command -h emits a summary of subcommands' '
        grep -F "or: git stash show" usage
 '
 
-test_expect_failure 'usage for subcommands should emit subcommand usage' '
+test_expect_success 'usage for subcommands should emit subcommand usage' '
        test_expect_code 129 git stash push -h >usage &&
        grep -F "usage: git stash [push" usage
 '
index 0276edbe3d389b70cace45906626276cce3db44b..4c661d4d54a779f8bc056c62e5d9e864401420c7 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='Test ref-filter and pretty APIs for commit and tag messages using CRLF'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 LIB_CRLF_BRANCHES=""
index 056e922164d04976570b4e5fc25975f1cbfc8b7a..dfcf3a0aaae3e2a72f6f41023b01c639a99c2f44 100755 (executable)
@@ -352,6 +352,8 @@ log -GF -p --pickaxe-all master
 log -IA -IB -I1 -I2 -p master
 log --decorate --all
 log --decorate=full --all
+log --decorate --clear-decorations --all
+log --decorate=full --clear-decorations --all
 
 rev-list --parents HEAD
 rev-list --children HEAD
index 3f9b872eceb734cb1e7adbd53a4de0580b1cd524..6b0b334a5d6ca160fc60ecf3f8e607a13633f562 100644 (file)
@@ -20,7 +20,7 @@ Date:   Mon Jun 26 00:06:00 2006 +0000
 
     Rearranged lines in dir/sub
 
-commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0
 Author: A U Thor <author@example.com>
 Date:   Mon Jun 26 00:06:00 2006 +0000
 
diff --git a/t/t4013/diff.log_--decorate=full_--clear-decorations_--all b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all
new file mode 100644 (file)
index 0000000..1c030a6
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate=full --clear-decorations --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
diff --git a/t/t4013/diff.log_--decorate=full_--decorate-all_--all b/t/t4013/diff.log_--decorate=full_--decorate-all_--all
new file mode 100644 (file)
index 0000000..d6e7928
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate=full --decorate-all --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
index f5e20e1e14aaef179ac9570b0e16bc47077ff79d..c7df1f581410d1155d87122ce91f6890390cf90a 100644 (file)
@@ -20,7 +20,7 @@ Date:   Mon Jun 26 00:06:00 2006 +0000
 
     Rearranged lines in dir/sub
 
-commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0
 Author: A U Thor <author@example.com>
 Date:   Mon Jun 26 00:06:00 2006 +0000
 
diff --git a/t/t4013/diff.log_--decorate_--clear-decorations_--all b/t/t4013/diff.log_--decorate_--clear-decorations_--all
new file mode 100644 (file)
index 0000000..88be82c
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate --clear-decorations --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
diff --git a/t/t4013/diff.log_--decorate_--decorate-all_--all b/t/t4013/diff.log_--decorate_--decorate-all_--all
new file mode 100644 (file)
index 0000000..5d22618
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate --decorate-all --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
index fbec8ad2ef7bb263765357c2f783448184c6b808..ad5c02927943ab44a6fb986a646cd9b829786ef3 100755 (executable)
@@ -1400,6 +1400,43 @@ test_expect_success '--from omits redundant in-body header' '
        test_cmp expect patch.head
 '
 
+test_expect_success 'with --force-in-body-from, redundant in-body from is kept' '
+       git format-patch --force-in-body-from \
+               -1 --stdout --from="A U Thor <author@example.com>" >patch &&
+       cat >expect <<-\EOF &&
+       From: A U Thor <author@example.com>
+
+       From: A U Thor <author@example.com>
+
+       EOF
+       sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head &&
+       test_cmp expect patch.head
+'
+
+test_expect_success 'format.forceInBodyFrom, equivalent to --force-in-body-from' '
+       git -c format.forceInBodyFrom=yes format-patch \
+               -1 --stdout --from="A U Thor <author@example.com>" >patch &&
+       cat >expect <<-\EOF &&
+       From: A U Thor <author@example.com>
+
+       From: A U Thor <author@example.com>
+
+       EOF
+       sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head &&
+       test_cmp expect patch.head
+'
+
+test_expect_success 'format.forceInBodyFrom, equivalent to --force-in-body-from' '
+       git -c format.forceInBodyFrom=yes format-patch --no-force-in-body-from \
+               -1 --stdout --from="A U Thor <author@example.com>" >patch &&
+       cat >expect <<-\EOF &&
+       From: A U Thor <author@example.com>
+
+       EOF
+       sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head &&
+       test_cmp expect patch.head
+'
+
 test_expect_success 'in-body headers trigger content encoding' '
        test_env GIT_AUTHOR_NAME="éxötìc" test_commit exotic &&
        test_when_finished "git reset --hard HEAD^" &&
index ed461f481e2af5e3d73ac628eeee1fb52218d90b..5bc28ad9f042a0476d94d9e90e5b58073cc17f99 100755 (executable)
@@ -5,6 +5,7 @@ test_description='Return value of diffs'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index 858a5522f96b63b971daf85f20e5d66aaebc48e6..c1ac09ecc7140a3dcfcdf906bb4533ba131881de 100755 (executable)
@@ -33,7 +33,7 @@ test_expect_success 'GIT_EXTERNAL_DIFF environment' '
 
 '
 
-test_expect_success !SANITIZE_LEAK 'GIT_EXTERNAL_DIFF environment should apply only to diff' '
+test_expect_success 'GIT_EXTERNAL_DIFF environment should apply only to diff' '
        GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD >out &&
        grep "^diff --git a/file b/file" out
 
@@ -74,7 +74,7 @@ test_expect_success 'diff.external' '
        test_cmp expect actual
 '
 
-test_expect_success !SANITIZE_LEAK 'diff.external should apply only to diff' '
+test_expect_success 'diff.external should apply only to diff' '
        test_config diff.external echo &&
        git log -p -1 HEAD >out &&
        grep "^diff --git a/file b/file" out
index 4701796d10e1028debe4581f2fa10038b38be003..29e49d22902dd7e7566f2662174475bf35c6ebed 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='test unique sha1 abbreviation on "index from..to" line'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index 4838a1df8b4369dc5024cdd7929d851b76482805..725278ad19c720468113659dbcc63aa013ac7de0 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='diff function context'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 dir="$TEST_DIRECTORY/t4051"
index 04b8a1542a8ec3ad2ffc28964f21940d034c2ed6..9a7505cbb8bf900510e8be4fc1a636124deec366 100755 (executable)
@@ -5,6 +5,7 @@ test_description='combined diff show only paths that are different to all parent
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 # verify that diffc.expect matches output of
index 35f94957fceb2b02df9846a256f925833c6b4ba0..07323ebafe0d0cb49f08c4396116fa1f45687cd8 100755 (executable)
@@ -56,6 +56,11 @@ test_expect_success 'remerge-diff on a clean merge' '
        test_cmp expect actual
 '
 
+test_expect_success 'remerge-diff on a clean merge with a filter' '
+       git show --oneline --remerge-diff --diff-filter=U bc_resolution >actual &&
+       test_must_be_empty actual
+'
+
 test_expect_success 'remerge-diff with both a resolved conflict and an unrelated change' '
        git log -1 --oneline ab_resolution >tmp &&
        cat <<-EOF >>tmp &&
@@ -89,6 +94,22 @@ test_expect_success 'remerge-diff with both a resolved conflict and an unrelated
        test_cmp expect actual
 '
 
+test_expect_success 'pickaxe still includes additional headers for relevant changes' '
+       # reuses "expect" from the previous testcase
+
+       git log --oneline --remerge-diff -Sacht ab_resolution >tmp &&
+       sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'can filter out additional headers with pickaxe' '
+       git show --remerge-diff --submodule=log --find-object=HEAD ab_resolution >actual &&
+       test_must_be_empty actual &&
+
+       git show --remerge-diff -S"not present" --all >actual &&
+       test_must_be_empty actual
+'
+
 test_expect_success 'setup non-content conflicts' '
        git switch --orphan base &&
 
@@ -184,6 +205,14 @@ test_expect_success 'remerge-diff w/ diff-filter=U: all conflict headers, no dif
        test_cmp expect actual
 '
 
+test_expect_success 'submodule formatting ignores additional headers' '
+       # Reuses "expect" from last testcase
+
+       git show --oneline --remerge-diff --diff-filter=U --submodule=log >tmp &&
+       sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'remerge-diff w/ diff-filter=R: relevant file + conflict header' '
        git log -1 --oneline resolution >tmp &&
        cat <<-EOF >>tmp &&
index da3e64f8110d54d7243c017a9a614baa7098142f..8ff364076673747adaaa74aec3f0a966caf033e5 100755 (executable)
@@ -7,6 +7,7 @@ test_description='git apply should not get confused with type changes.
 
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup repository and commits' '
index c614eaf04cca93a22157d08e9fc464639468eb3a..b375aca0d74ea3a23e6eae69077e9b286b5c144f 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git apply of i-t-a file'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 6e66352558212e9a40404ef892a0944ab3d09db3..cc15cb4ff62ab4c939b369826e5795e46f218d84 100755 (executable)
@@ -704,9 +704,12 @@ test_expect_success 'set up more tangled history' '
        git checkout -b tangle HEAD~6 &&
        test_commit tangle-a tangle-a a &&
        git merge main~3 &&
+       git update-ref refs/prefetch/merge HEAD &&
        git merge side~1 &&
+       git update-ref refs/rewritten/merge HEAD &&
        git checkout main &&
        git merge tangle &&
+       git update-ref refs/hidden/tangle HEAD &&
        git checkout -b reach &&
        test_commit reach &&
        git checkout main &&
@@ -974,9 +977,9 @@ test_expect_success 'decorate-refs-exclude and simplify-by-decoration' '
        Merge-tag-reach (HEAD -> main)
        reach (tag: reach, reach)
        seventh (tag: seventh)
-       Merge-branch-tangle
-       Merge-branch-side-early-part-into-tangle (tangle)
-       tangle-a (tag: tangle-a)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
        EOF
        git log -n6 --decorate=short --pretty="tformat:%f%d" \
                --decorate-refs-exclude="*octopus*" \
@@ -1025,6 +1028,115 @@ test_expect_success 'decorate-refs and simplify-by-decoration without output' '
        test_cmp expect actual
 '
 
+test_expect_success 'decorate-refs-exclude HEAD' '
+       git log --decorate=full --oneline \
+               --decorate-refs-exclude="HEAD" >actual &&
+       ! grep HEAD actual
+'
+
+test_expect_success 'decorate-refs focus from default' '
+       git log --decorate=full --oneline \
+               --decorate-refs="refs/heads" >actual &&
+       ! grep HEAD actual
+'
+
+test_expect_success '--clear-decorations overrides defaults' '
+       cat >expect.default <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       Merge-tags-octopus-a-and-octopus-b
+       seventh (tag: refs/tags/seventh)
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-branch-tangle
+       Merge-branch-side-early-part-into-tangle (refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle
+       tangle-a (tag: refs/tags/tangle-a)
+       Merge-branch-side
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       Second
+       sixth
+       fifth
+       fourth
+       third
+       second
+       initial
+       EOF
+       git log --decorate=full --pretty="tformat:%f%d" >actual &&
+       test_cmp expect.default actual &&
+
+       cat >expect.all <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       Merge-tags-octopus-a-and-octopus-b
+       seventh (tag: refs/tags/seventh)
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
+       tangle-a (tag: refs/tags/tangle-a)
+       Merge-branch-side
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       Second
+       sixth
+       fifth
+       fourth
+       third
+       second
+       initial
+       EOF
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --clear-decorations >actual &&
+       test_cmp expect.all actual &&
+       git -c log.initialDecorationSet=all log \
+               --decorate=full --pretty="tformat:%f%d" >actual &&
+       test_cmp expect.all actual
+'
+
+test_expect_success '--clear-decorations clears previous exclusions' '
+       cat >expect.all <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-tags-octopus-a-and-octopus-b
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       seventh (tag: refs/tags/seventh)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
+       tangle-a (tag: refs/tags/tangle-a)
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       initial
+       EOF
+
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --simplify-by-decoration \
+               --decorate-refs-exclude="heads/octopus*" \
+               --decorate-refs="heads" \
+               --clear-decorations >actual &&
+       test_cmp expect.all actual &&
+
+       cat >expect.filtered <<-\EOF &&
+       Merge-tags-octopus-a-and-octopus-b
+       octopus-b (refs/heads/octopus-b)
+       octopus-a (refs/heads/octopus-a)
+       initial
+       EOF
+
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --simplify-by-decoration \
+               --decorate-refs-exclude="heads/octopus" \
+               --decorate-refs="heads" \
+               --clear-decorations \
+               --decorate-refs-exclude="tags/" \
+               --decorate-refs="heads/octopus*" >actual &&
+       test_cmp expect.filtered actual
+'
+
 test_expect_success 'log.decorate config parsing' '
        git log --oneline --decorate=full >expect.full &&
        git log --oneline --decorate=short >expect.short &&
@@ -2112,9 +2224,9 @@ test_expect_success REFFILES 'log diagnoses bogus HEAD hash' '
        test_i18ngrep broken stderr
 '
 
-test_expect_success 'log diagnoses bogus HEAD symref' '
+test_expect_success REFFILES 'log diagnoses bogus HEAD symref' '
        git init empty &&
-       git --git-dir empty/.git symbolic-ref HEAD refs/heads/invalid.lock &&
+       echo "ref: refs/heads/invalid.lock" > empty/.git/HEAD &&
        test_must_fail git -C empty log 2>stderr &&
        test_i18ngrep broken stderr &&
        test_must_fail git -C empty log --default totally-bogus 2>stderr &&
@@ -2192,6 +2304,20 @@ test_expect_success 'log --decorate includes all levels of tag annotated tags' '
        test_cmp expect actual
 '
 
+test_expect_success 'log --decorate does not include things outside filter' '
+       reflist="refs/prefetch refs/rebase-merge refs/bundle" &&
+
+       for ref in $reflist
+       do
+               git update-ref $ref/fake HEAD || return 1
+       done &&
+
+       git log --decorate=full --oneline >actual &&
+
+       # None of the refs are visible:
+       ! grep /fake actual
+'
+
 test_expect_success 'log --end-of-options' '
        git update-ref refs/heads/--source HEAD &&
        git log --end-of-options --source >actual &&
index 0b2d21ec5510158cc9bb5b861a94bdfc6eccea12..cd1cab3e54b9170d5751279bd68e02dbc3cbcad2 100755 (executable)
@@ -963,4 +963,63 @@ test_expect_success SYMLINKS 'symlinks not respected in-tree' '
        test_cmp expect actual
 '
 
+test_expect_success 'prepare for cat-file --mailmap' '
+       rm -f .mailmap &&
+       git commit --allow-empty -m foo --author="Orig <orig@example.com>"
+'
+
+test_expect_success '--no-use-mailmap disables mailmap in cat-file' '
+       test_when_finished "rm .mailmap" &&
+       cat >.mailmap <<-EOF &&
+       A U Thor <author@example.com> Orig <orig@example.com>
+       EOF
+       cat >expect <<-EOF &&
+       author Orig <orig@example.com>
+       EOF
+       git cat-file --no-use-mailmap commit HEAD >log &&
+       sed -n "/^author /s/\([^>]*>\).*/\1/p" log >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--use-mailmap enables mailmap in cat-file' '
+       test_when_finished "rm .mailmap" &&
+       cat >.mailmap <<-EOF &&
+       A U Thor <author@example.com> Orig <orig@example.com>
+       EOF
+       cat >expect <<-EOF &&
+       author A U Thor <author@example.com>
+       EOF
+       git cat-file --use-mailmap commit HEAD >log &&
+       sed -n "/^author /s/\([^>]*>\).*/\1/p" log >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--no-mailmap disables mailmap in cat-file for annotated tag objects' '
+       test_when_finished "rm .mailmap" &&
+       cat >.mailmap <<-EOF &&
+       Orig <orig@example.com> C O Mitter <committer@example.com>
+       EOF
+       cat >expect <<-EOF &&
+       tagger C O Mitter <committer@example.com>
+       EOF
+       git tag -a -m "annotated tag" v1 &&
+       git cat-file --no-mailmap -p v1 >log &&
+       sed -n "/^tagger /s/\([^>]*>\).*/\1/p" log >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--mailmap enables mailmap in cat-file for annotated tag objects' '
+       test_when_finished "rm .mailmap" &&
+       cat >.mailmap <<-EOF &&
+       Orig <orig@example.com> C O Mitter <committer@example.com>
+       EOF
+       cat >expect <<-EOF &&
+       tagger Orig <orig@example.com>
+       EOF
+       git tag -a -m "annotated tag" v2 &&
+       git cat-file --mailmap -p v2 >log &&
+       sed -n "/^tagger /s/\([^>]*>\).*/\1/p" log >actual &&
+       test_cmp expect actual
+'
+
 test_done
index 36ac6aff1e40dd8ebc1bc894bf63d2228bcd32a3..ded33a82e2c94cd4f44c1df4c9fe93ce9bf5f148 100755 (executable)
@@ -3,7 +3,7 @@
 # Copyright (c) 2010 Nazri Ramliy
 #
 
-test_description='Test for "git log --decorate" colors'
+test_description='test "git log --decorate" colors'
 
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
@@ -17,6 +17,7 @@ test_expect_success setup '
        git config color.decorate.remoteBranch red &&
        git config color.decorate.tag "reverse bold yellow" &&
        git config color.decorate.stash magenta &&
+       git config color.decorate.grafted black &&
        git config color.decorate.HEAD cyan &&
 
        c_reset="<RESET>" &&
@@ -27,6 +28,7 @@ test_expect_success setup '
        c_tag="<BOLD;REVERSE;YELLOW>" &&
        c_stash="<MAGENTA>" &&
        c_HEAD="<CYAN>" &&
+       c_grafted="<BLACK>" &&
 
        test_commit A &&
        git clone . other &&
@@ -42,25 +44,79 @@ test_expect_success setup '
        git stash save Changes to A.t
 '
 
-cat >expected <<EOF
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD ->\
- ${c_reset}${c_branch}main${c_reset}${c_commit},\
- ${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit},\
- ${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit},\
- ${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset}\
- On main: Changes to A.t
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
-EOF
+cmp_filtered_decorations () {
+       sed "s/$OID_REGEX/COMMIT_ID/" actual | test_decode_color >filtered &&
+       test_cmp expect filtered
+}
 
 # We want log to show all, but the second parent to refs/stash is irrelevant
 # to this test since it does not contain any decoration, hence --first-parent
-test_expect_success 'Commit Decorations Colored Correctly' '
-       git log --first-parent --abbrev=10 --all --decorate --oneline --color=always |
-       sed "s/[0-9a-f]\{10,10\}/COMMIT_ID/" |
-       test_decode_color >out &&
-       test_cmp expected out
+test_expect_success 'commit decorations colored correctly' '
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
+${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit}, \
+${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset} \
+On main: Changes to A.t
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+       EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always --all >actual &&
+       cmp_filtered_decorations
+'
+
+test_expect_success 'test coloring with replace-objects' '
+       test_when_finished rm -rf .git/refs/replace* &&
+       test_commit C &&
+       test_commit D &&
+
+       git replace HEAD~1 HEAD~2 &&
+
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: D${c_reset}${c_commit})${c_reset} D
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: C${c_reset}${c_commit}, \
+${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} B
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations &&
+       git replace -d HEAD~1 &&
+
+       GIT_REPLACE_REF_BASE=refs/replace2/ git replace HEAD~1 HEAD~2 &&
+       GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \
+               --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations
+'
+
+test_expect_success 'test coloring with grafted commit' '
+       test_when_finished rm -rf .git/refs/replace* &&
+
+       git replace --graft HEAD HEAD~2 &&
+
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: D${c_reset}${c_commit}, \
+${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} D
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+       EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations &&
+       git replace -d HEAD &&
+
+       GIT_REPLACE_REF_BASE=refs/replace2/ git replace --graft HEAD HEAD~2 &&
+       GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \
+               --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations
 '
 
 test_done
diff --git a/t/t4301-merge-tree-write-tree.sh b/t/t4301-merge-tree-write-tree.sh
new file mode 100755 (executable)
index 0000000..28ca5c3
--- /dev/null
@@ -0,0 +1,813 @@
+#!/bin/sh
+
+test_description='git merge-tree --write-tree'
+
+. ./test-lib.sh
+
+# This test is ort-specific
+if test "$GIT_TEST_MERGE_ALGORITHM" != "ort"
+then
+       skip_all="GIT_TEST_MERGE_ALGORITHM != ort"
+       test_done
+fi
+
+test_expect_success setup '
+       test_write_lines 1 2 3 4 5 >numbers &&
+       echo hello >greeting &&
+       echo foo >whatever &&
+       git add numbers greeting whatever &&
+       test_tick &&
+       git commit -m initial &&
+
+       git branch side1 &&
+       git branch side2 &&
+       git branch side3 &&
+
+       git checkout side1 &&
+       test_write_lines 1 2 3 4 5 6 >numbers &&
+       echo hi >greeting &&
+       echo bar >whatever &&
+       git add numbers greeting whatever &&
+       test_tick &&
+       git commit -m modify-stuff &&
+
+       git checkout side2 &&
+       test_write_lines 0 1 2 3 4 5 >numbers &&
+       echo yo >greeting &&
+       git rm whatever &&
+       mkdir whatever &&
+       >whatever/empty &&
+       git add numbers greeting whatever/empty &&
+       test_tick &&
+       git commit -m other-modifications &&
+
+       git checkout side3 &&
+       git mv numbers sequence &&
+       test_tick &&
+       git commit -m rename-numbers &&
+
+       git switch --orphan unrelated &&
+       >something-else &&
+       git add something-else &&
+       test_tick &&
+       git commit -m first-commit
+'
+
+test_expect_success 'Clean merge' '
+       TREE_OID=$(git merge-tree --write-tree side1 side3) &&
+       q_to_tab <<-EOF >expect &&
+       100644 blob $(git rev-parse side1:greeting)Qgreeting
+       100644 blob $(git rev-parse side1:numbers)Qsequence
+       100644 blob $(git rev-parse side1:whatever)Qwhatever
+       EOF
+
+       git ls-tree $TREE_OID >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'Content merge and a few conflicts' '
+       git checkout side1^0 &&
+       test_must_fail git merge side2 &&
+       expected_tree=$(git rev-parse AUTO_MERGE) &&
+
+       # We will redo the merge, while we are still in a conflicted state!
+       git ls-files -u >conflicted-file-info &&
+       test_when_finished "git reset --hard" &&
+
+       test_expect_code 1 git merge-tree --write-tree side1 side2 >RESULT &&
+       actual_tree=$(head -n 1 RESULT) &&
+
+       # Due to differences of e.g. "HEAD" vs "side1", the results will not
+       # exactly match.  Dig into individual files.
+
+       # Numbers should have three-way merged cleanly
+       test_write_lines 0 1 2 3 4 5 6 >expect &&
+       git show ${actual_tree}:numbers >actual &&
+       test_cmp expect actual &&
+
+       # whatever and whatever~<branch> should have same HASHES
+       git rev-parse ${expected_tree}:whatever ${expected_tree}:whatever~HEAD >expect &&
+       git rev-parse ${actual_tree}:whatever ${actual_tree}:whatever~side1 >actual &&
+       test_cmp expect actual &&
+
+       # greeting should have a merge conflict
+       git show ${expected_tree}:greeting >tmp &&
+       sed -e s/HEAD/side1/ tmp >expect &&
+       git show ${actual_tree}:greeting >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'Barf on misspelled option, with exit code other than 0 or 1' '
+       # Mis-spell with single "s" instead of double "s"
+       test_expect_code 129 git merge-tree --write-tree --mesages FOOBAR side1 side2 2>expect &&
+
+       grep "error: unknown option.*mesages" expect
+'
+
+test_expect_success 'Barf on too many arguments' '
+       test_expect_code 129 git merge-tree --write-tree side1 side2 invalid 2>expect &&
+
+       grep "^usage: git merge-tree" expect
+'
+
+anonymize_hash() {
+       sed -e "s/[0-9a-f]\{40,\}/HASH/g" "$@"
+}
+
+test_expect_success 'test conflict notices and such' '
+       test_expect_code 1 git merge-tree --write-tree --name-only side1 side2 >out &&
+       anonymize_hash out >actual &&
+
+       # Expected results:
+       #   "greeting" should merge with conflicts
+       #   "numbers" should merge cleanly
+       #   "whatever" has *both* a modify/delete and a file/directory conflict
+       cat <<-EOF >expect &&
+       HASH
+       greeting
+       whatever~side1
+
+       Auto-merging greeting
+       CONFLICT (content): Merge conflict in greeting
+       Auto-merging numbers
+       CONFLICT (file/directory): directory in the way of whatever from side1; moving it to whatever~side1 instead.
+       CONFLICT (modify/delete): whatever~side1 deleted in side2 and modified in side1.  Version side1 of whatever~side1 left in tree.
+       EOF
+
+       test_cmp expect actual
+'
+
+# directory rename + content conflict
+#   Commit O: foo, olddir/{a,b,c}
+#   Commit A: modify foo, newdir/{a,b,c}
+#   Commit B: modify foo differently & rename foo -> olddir/bar
+#   Expected: CONFLICT(content) for for newdir/bar (not olddir/bar or foo)
+
+test_expect_success 'directory rename + content conflict' '
+       # Setup
+       git init dir-rename-and-content &&
+       (
+               cd dir-rename-and-content &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               mkdir olddir &&
+               for i in a b c; do echo $i >olddir/$i || exit 1; done &&
+               git add foo olddir &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv olddir newdir &&
+               git commit -m "Modify foo, rename olddir to newdir" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 six >foo &&
+               git add foo &&
+               git mv foo olddir/bar &&
+               git commit -m "Modify foo & rename foo -> olddir/bar"
+       ) &&
+       # Testing
+       (
+               cd dir-rename-and-content &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 1Qnewdir/bar
+               100644 HASH 2Qnewdir/bar
+               100644 HASH 3Qnewdir/bar
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q2Qnewdir/barQolddir/barQCONFLICT (directory rename suggested)QCONFLICT (file location): foo renamed to olddir/bar in B^0, inside a directory that was renamed in A^0, suggesting it should perhaps be moved to newdir/bar.
+               Q1Qnewdir/barQAuto-mergingQAuto-merging newdir/bar
+               Q1Qnewdir/barQCONFLICT (contents)QCONFLICT (content): Merge conflict in newdir/bar
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/delete + modify/delete handling
+#   Commit O: foo
+#   Commit A: modify foo + rename to bar
+#   Commit B: delete foo
+#   Expected: CONFLICT(rename/delete) + CONFLICT(modify/delete)
+
+test_expect_success 'rename/delete handling' '
+       # Setup
+       git init rename-delete &&
+       (
+               cd rename-delete &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               git add foo &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv foo bar &&
+               git commit -m "Modify foo, rename to bar" &&
+
+               git checkout B &&
+               git rm foo &&
+               git commit -m "remove foo"
+       ) &&
+       # Testing
+       (
+               cd rename-delete &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 1Qbar
+               100644 HASH 2Qbar
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q2QbarQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to bar in A^0, but deleted in B^0.
+               Q1QbarQCONFLICT (modify/delete)QCONFLICT (modify/delete): bar deleted in B^0 and modified in A^0.  Version A^0 of bar left in tree.
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/add handling
+#   Commit O: foo
+#   Commit A: modify foo, add different bar
+#   Commit B: modify & rename foo->bar
+#   Expected: CONFLICT(add/add) [via rename collide] for bar
+
+test_expect_success 'rename/add handling' '
+       # Setup
+       git init rename-add &&
+       (
+               cd rename-add &&
+               test_write_lines original 1 2 3 4 5 >foo &&
+               git add foo &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               echo "different file" >bar &&
+               git add foo bar &&
+               git commit -m "Modify foo, add bar" &&
+
+               git checkout B &&
+               test_write_lines original 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv foo bar &&
+               git commit -m "rename foo to bar"
+       ) &&
+       # Testing
+       (
+               cd rename-add &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+
+               #
+               # First, check that the bar that appears at stage 3 does not
+               # correspond to an individual blob anywhere in history
+               #
+               hash=$(cat out | tr "\0" "\n" | head -n 3 | grep 3.bar | cut -f 2 -d " ") &&
+               git rev-list --objects --all >all_blobs &&
+               ! grep $hash all_blobs &&
+
+               #
+               # Second, check anonymized hash output against expectation
+               #
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 2Qbar
+               100644 HASH 3Qbar
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q1QbarQAuto-mergingQAuto-merging bar
+               Q1QbarQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in bar
+               Q1QfooQAuto-mergingQAuto-merging foo
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/add, where add is a mode conflict
+#   Commit O: foo
+#   Commit A: modify foo, add symlink bar
+#   Commit B: modify & rename foo->bar
+#   Expected: CONFLICT(distinct modes) for bar
+
+test_expect_success SYMLINKS 'rename/add, where add is a mode conflict' '
+       # Setup
+       git init rename-add-symlink &&
+       (
+               cd rename-add-symlink &&
+               test_write_lines original 1 2 3 4 5 >foo &&
+               git add foo &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               ln -s foo bar &&
+               git add foo bar &&
+               git commit -m "Modify foo, add symlink bar" &&
+
+               git checkout B &&
+               test_write_lines original 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv foo bar &&
+               git commit -m "rename foo to bar"
+       ) &&
+       # Testing
+       (
+               cd rename-add-symlink &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+
+               #
+               # First, check that the bar that appears at stage 3 does not
+               # correspond to an individual blob anywhere in history
+               #
+               hash=$(cat out | tr "\0" "\n" | head -n 3 | grep 3.bar | cut -f 2 -d " ") &&
+               git rev-list --objects --all >all_blobs &&
+               ! grep $hash all_blobs &&
+
+               #
+               # Second, check anonymized hash output against expectation
+               #
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               120000 HASH 2Qbar
+               100644 HASH 3Qbar~B^0
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q2QbarQbar~B^0QCONFLICT (distinct modes)QCONFLICT (distinct types): bar had different types on each side; renamed one of them so each can be recorded somewhere.
+               Q1QfooQAuto-mergingQAuto-merging foo
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/rename(1to2) + content conflict handling
+#   Commit O: foo
+#   Commit A: modify foo & rename to bar
+#   Commit B: modify foo & rename to baz
+#   Expected: CONFLICT(rename/rename)
+
+test_expect_success 'rename/rename + content conflict' '
+       # Setup
+       git init rr-plus-content &&
+       (
+               cd rr-plus-content &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               git add foo &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 six >foo &&
+               git add foo &&
+               git mv foo bar &&
+               git commit -m "Modify foo + rename to bar" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv foo baz &&
+               git commit -m "Modify foo + rename to baz"
+       ) &&
+       # Testing
+       (
+               cd rr-plus-content &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 2Qbar
+               100644 HASH 3Qbaz
+               100644 HASH 1Qfoo
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q1QfooQAuto-mergingQAuto-merging foo
+               Q3QfooQbarQbazQCONFLICT (rename/rename)QCONFLICT (rename/rename): foo renamed to bar in A^0 and to baz in B^0.
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/add/delete
+#   Commit O: foo
+#   Commit A: rm foo, add different bar
+#   Commit B: rename foo->bar
+#   Expected: CONFLICT (rename/delete), CONFLICT(add/add) [via rename collide]
+#             for bar
+
+test_expect_success 'rename/add/delete conflict' '
+       # Setup
+       git init rad &&
+       (
+               cd rad &&
+               echo "original file" >foo &&
+               git add foo &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm foo &&
+               echo "different file" >bar &&
+               git add bar &&
+               git commit -m "Remove foo, add bar" &&
+
+               git checkout B &&
+               git mv foo bar &&
+               git commit -m "rename foo to bar"
+       ) &&
+       # Testing
+       (
+               cd rad &&
+
+               test_expect_code 1 \
+                       git merge-tree -z B^0 A^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 2Qbar
+               100644 HASH 3Qbar
+
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               2QbarQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to bar in B^0, but deleted in A^0.
+               Q1QbarQAuto-mergingQAuto-merging bar
+               Q1QbarQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in bar
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# rename/rename(2to1)/delete/delete
+#   Commit O: foo, bar
+#   Commit A: rename foo->baz, rm bar
+#   Commit B: rename bar->baz, rm foo
+#   Expected: 2x CONFLICT (rename/delete), CONFLICT (add/add) via colliding
+#             renames for baz
+
+test_expect_success 'rename/rename(2to1)/delete/delete conflict' '
+       # Setup
+       git init rrdd &&
+       (
+               cd rrdd &&
+               echo foo >foo &&
+               echo bar >bar &&
+               git add foo bar &&
+               git commit -m O &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv foo baz &&
+               git rm bar &&
+               git commit -m "Rename foo, remove bar" &&
+
+               git checkout B &&
+               git mv bar baz &&
+               git rm foo &&
+               git commit -m "Rename bar, remove foo"
+       ) &&
+       # Testing
+       (
+               cd rrdd &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 2Qbaz
+               100644 HASH 3Qbaz
+
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               2QbazQbarQCONFLICT (rename/delete)QCONFLICT (rename/delete): bar renamed to baz in B^0, but deleted in A^0.
+               Q2QbazQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to baz in A^0, but deleted in B^0.
+               Q1QbazQAuto-mergingQAuto-merging baz
+               Q1QbazQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in baz
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# mod6: chains of rename/rename(1to2) + add/add via colliding renames
+#   Commit O: one,      three,       five
+#   Commit A: one->two, three->four, five->six
+#   Commit B: one->six, three->two,  five->four
+#   Expected: three CONFLICT(rename/rename) messages + three CONFLICT(add/add)
+#             messages; each path in two of the multi-way merged contents
+#             found in two, four, six
+
+test_expect_success 'mod6: chains of rename/rename(1to2) and add/add via colliding renames' '
+       # Setup
+       git init mod6 &&
+       (
+               cd mod6 &&
+               test_seq 11 19 >one &&
+               test_seq 31 39 >three &&
+               test_seq 51 59 >five &&
+               git add . &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_seq 10 19 >one &&
+               echo 40        >>three &&
+               git add one three &&
+               git mv  one   two  &&
+               git mv  three four &&
+               git mv  five  six  &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 20    >>one       &&
+               echo forty >>three     &&
+               echo 60    >>five      &&
+               git add one three five &&
+               git mv  one   six  &&
+               git mv  three two  &&
+               git mv  five  four &&
+               test_tick &&
+               git commit -m "B"
+       ) &&
+       # Testing
+       (
+               cd mod6 &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+
+               #
+               # First, check that some of the hashes that appear as stage
+               # conflict entries do not appear as individual blobs anywhere
+               # in history.
+               #
+               hash1=$(cat out | tr "\0" "\n" | head | grep 2.four | cut -f 2 -d " ") &&
+               hash2=$(cat out | tr "\0" "\n" | head | grep 3.two | cut -f 2 -d " ") &&
+               git rev-list --objects --all >all_blobs &&
+               ! grep $hash1 all_blobs &&
+               ! grep $hash2 all_blobs &&
+
+               #
+               # Now compare anonymized hash output with expectation
+               #
+               anonymize_hash out >actual &&
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 1Qfive
+               100644 HASH 2Qfour
+               100644 HASH 3Qfour
+               100644 HASH 1Qone
+               100644 HASH 2Qsix
+               100644 HASH 3Qsix
+               100644 HASH 1Qthree
+               100644 HASH 2Qtwo
+               100644 HASH 3Qtwo
+
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               3QfiveQsixQfourQCONFLICT (rename/rename)QCONFLICT (rename/rename): five renamed to six in A^0 and to four in B^0.
+               Q1QfourQAuto-mergingQAuto-merging four
+               Q1QfourQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in four
+               Q1QoneQAuto-mergingQAuto-merging one
+               Q3QoneQtwoQsixQCONFLICT (rename/rename)QCONFLICT (rename/rename): one renamed to two in A^0 and to six in B^0.
+               Q1QsixQAuto-mergingQAuto-merging six
+               Q1QsixQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in six
+               Q1QthreeQAuto-mergingQAuto-merging three
+               Q3QthreeQfourQtwoQCONFLICT (rename/rename)QCONFLICT (rename/rename): three renamed to four in A^0 and to two in B^0.
+               Q1QtwoQAuto-mergingQAuto-merging two
+               Q1QtwoQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in two
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+# directory rename + rename/delete + modify/delete + directory/file conflict
+#   Commit O: foo, olddir/{a,b,c}
+#   Commit A: delete foo, rename olddir/ -> newdir/, add newdir/bar/file
+#   Commit B: modify foo & rename foo -> olddir/bar
+#   Expected: CONFLICT(content) for for newdir/bar (not olddir/bar or foo)
+
+test_expect_success 'directory rename + rename/delete + modify/delete + directory/file conflict' '
+       # Setup
+       git init 4-stacked-conflict &&
+       (
+               cd 4-stacked-conflict &&
+               test_write_lines 1 2 3 4 5 >foo &&
+               mkdir olddir &&
+               for i in a b c; do echo $i >olddir/$i || exit 1; done &&
+               git add foo olddir &&
+               git commit -m "original" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm foo &&
+               git mv olddir newdir &&
+               mkdir newdir/bar &&
+               >newdir/bar/file &&
+               git add newdir/bar/file &&
+               git commit -m "rm foo, olddir/ -> newdir/, + newdir/bar/file" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 6 >foo &&
+               git add foo &&
+               git mv foo olddir/bar &&
+               git commit -m "Modify foo & rename foo -> olddir/bar"
+       ) &&
+       # Testing
+       (
+               cd 4-stacked-conflict &&
+
+               test_expect_code 1 \
+                       git merge-tree -z A^0 B^0 >out &&
+               echo >>out &&
+               anonymize_hash out >actual &&
+
+               q_to_tab <<-\EOF | lf_to_nul >expect &&
+               HASH
+               100644 HASH 1Qnewdir/bar~B^0
+               100644 HASH 3Qnewdir/bar~B^0
+               EOF
+
+               q_to_nul <<-EOF >>expect &&
+               Q2Qnewdir/barQolddir/barQCONFLICT (directory rename suggested)QCONFLICT (file location): foo renamed to olddir/bar in B^0, inside a directory that was renamed in A^0, suggesting it should perhaps be moved to newdir/bar.
+               Q2Qnewdir/barQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to newdir/bar in B^0, but deleted in A^0.
+               Q2Qnewdir/bar~B^0Qnewdir/barQCONFLICT (file/directory)QCONFLICT (file/directory): directory in the way of newdir/bar from B^0; moving it to newdir/bar~B^0 instead.
+               Q1Qnewdir/bar~B^0QCONFLICT (modify/delete)QCONFLICT (modify/delete): newdir/bar~B^0 deleted in A^0 and modified in B^0.  Version B^0 of newdir/bar~B^0 left in tree.
+               Q
+               EOF
+               test_cmp expect actual
+       )
+'
+
+for opt in $(git merge-tree --git-completion-helper-all)
+do
+       if test $opt = "--trivial-merge" || test $opt = "--write-tree"
+       then
+               continue
+       fi
+
+       test_expect_success "usage: --trivial-merge is incompatible with $opt" '
+               test_expect_code 128 git merge-tree --trivial-merge $opt side1 side2 side3
+       '
+done
+
+test_expect_success 'Just the conflicted files without the messages' '
+       test_expect_code 1 git merge-tree --write-tree --no-messages --name-only side1 side2 >out &&
+       anonymize_hash out >actual &&
+
+       test_write_lines HASH greeting whatever~side1 >expect &&
+
+       test_cmp expect actual
+'
+
+test_expect_success 'Check conflicted oids and modes without messages' '
+       test_expect_code 1 git merge-tree --write-tree --no-messages side1 side2 >out &&
+       anonymize_hash out >actual &&
+
+       # Compare the basic output format
+       q_to_tab >expect <<-\EOF &&
+       HASH
+       100644 HASH 1Qgreeting
+       100644 HASH 2Qgreeting
+       100644 HASH 3Qgreeting
+       100644 HASH 1Qwhatever~side1
+       100644 HASH 2Qwhatever~side1
+       EOF
+
+       test_cmp expect actual &&
+
+       # Check the actual hashes against the `ls-files -u` output too
+       tail -n +2 out | sed -e s/side1/HEAD/ >actual &&
+       test_cmp conflicted-file-info actual
+'
+
+test_expect_success 'NUL terminated conflicted file "lines"' '
+       git checkout -b tweak1 side1 &&
+       test_write_lines zero 1 2 3 4 5 6 >numbers &&
+       git add numbers &&
+       git mv numbers "Αυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±" &&
+       git commit -m "Renamed numbers" &&
+
+       test_expect_code 1 git merge-tree --write-tree -z tweak1 side2 >out &&
+       echo >>out &&
+       anonymize_hash out >actual &&
+
+       # Expected results:
+       #   "greeting" should merge with conflicts
+       #   "whatever" has *both* a modify/delete and a file/directory conflict
+       #   "Αυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±" should have a conflict
+       echo HASH | lf_to_nul >expect &&
+
+       q_to_tab <<-EOF | lf_to_nul >>expect &&
+       100644 HASH 1Qgreeting
+       100644 HASH 2Qgreeting
+       100644 HASH 3Qgreeting
+       100644 HASH 1Qwhatever~tweak1
+       100644 HASH 2Qwhatever~tweak1
+       100644 HASH 1QΑυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±
+       100644 HASH 2QΑυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±
+       100644 HASH 3QΑυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±
+
+       EOF
+
+       q_to_nul <<-EOF >>expect &&
+       1QgreetingQAuto-mergingQAuto-merging greeting
+       Q1QgreetingQCONFLICT (contents)QCONFLICT (content): Merge conflict in greeting
+       Q2Qwhatever~tweak1QwhateverQCONFLICT (file/directory)QCONFLICT (file/directory): directory in the way of whatever from tweak1; moving it to whatever~tweak1 instead.
+       Q1Qwhatever~tweak1QCONFLICT (modify/delete)QCONFLICT (modify/delete): whatever~tweak1 deleted in side2 and modified in tweak1.  Version tweak1 of whatever~tweak1 left in tree.
+       Q1QΑυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±QAuto-mergingQAuto-merging Î‘υτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±
+       Q1QΑυτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±QCONFLICT (contents)QCONFLICT (content): Merge conflict in Î‘υτά Î¼Î¿Ï… Ï†Î±Î¯Î½Î¿Î½Ï„αι ÎºÎ¹Î½Î­Î¶Î¹ÎºÎ±
+       Q
+       EOF
+
+       test_cmp expect actual
+'
+
+test_expect_success 'error out by default for unrelated histories' '
+       test_expect_code 128 git merge-tree --write-tree side1 unrelated 2>error &&
+
+       grep "refusing to merge unrelated histories" error
+'
+
+test_expect_success 'can override merge of unrelated histories' '
+       git merge-tree --write-tree --allow-unrelated-histories side1 unrelated >tree &&
+       TREE=$(cat tree) &&
+
+       git rev-parse side1:numbers side1:greeting side1:whatever unrelated:something-else >expect &&
+       git rev-parse $TREE:numbers $TREE:greeting $TREE:whatever $TREE:something-else >actual &&
+
+       test_cmp expect actual
+'
+
+test_done
index 7f8d2ab0a72dac716198a424556d1e9323f2f8c3..eaa0b22ece4ff3ce7d79faed2edf8ab829c97051 100755 (executable)
@@ -24,6 +24,7 @@ commit id embedding:
 
 '
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 SUBSTFORMAT=%H%n
@@ -143,6 +144,7 @@ test_expect_success 'populate workdir' '
 test_expect_success \
     'add ignored file' \
     'echo ignore me >a/ignored &&
+     mkdir .git/info &&
      echo ignored export-ignore >.git/info/attributes'
 
 test_expect_success 'add files to repository' '
@@ -157,7 +159,8 @@ test_expect_success 'setup export-subst' '
 '
 
 test_expect_success 'create bare clone' '
-       git clone --bare . bare.git &&
+       git clone --template= --bare . bare.git &&
+       mkdir bare.git/info &&
        cp .git/info/attributes bare.git/info/attributes
 '
 
@@ -339,21 +342,21 @@ test_expect_success 'only enabled filters are available remotely' '
        test_cmp_bin remote.bar config.bar
 '
 
-test_expect_success GZIP 'git archive --format=tgz' '
+test_expect_success 'git archive --format=tgz' '
        git archive --format=tgz HEAD >j.tgz
 '
 
-test_expect_success GZIP 'git archive --format=tar.gz' '
+test_expect_success 'git archive --format=tar.gz' '
        git archive --format=tar.gz HEAD >j1.tar.gz &&
        test_cmp_bin j.tgz j1.tar.gz
 '
 
-test_expect_success GZIP 'infer tgz from .tgz filename' '
+test_expect_success 'infer tgz from .tgz filename' '
        git archive --output=j2.tgz HEAD &&
        test_cmp_bin j.tgz j2.tgz
 '
 
-test_expect_success GZIP 'infer tgz from .tar.gz filename' '
+test_expect_success 'infer tgz from .tar.gz filename' '
        git archive --output=j3.tar.gz HEAD &&
        test_cmp_bin j.tgz j3.tar.gz
 '
@@ -363,17 +366,33 @@ test_expect_success GZIP 'extract tgz file' '
        test_cmp_bin b.tar j.tar
 '
 
-test_expect_success GZIP 'remote tar.gz is allowed by default' '
+test_expect_success 'remote tar.gz is allowed by default' '
        git archive --remote=. --format=tar.gz HEAD >remote.tar.gz &&
        test_cmp_bin j.tgz remote.tar.gz
 '
 
-test_expect_success GZIP 'remote tar.gz can be disabled' '
+test_expect_success 'remote tar.gz can be disabled' '
        git config tar.tar.gz.remote false &&
        test_must_fail git archive --remote=. --format=tar.gz HEAD \
                >remote.tar.gz
 '
 
+test_expect_success GZIP 'git archive --format=tgz (external gzip)' '
+       test_config tar.tgz.command "gzip -cn" &&
+       git archive --format=tgz HEAD >external_gzip.tgz
+'
+
+test_expect_success GZIP 'git archive --format=tar.gz (external gzip)' '
+       test_config tar.tar.gz.command "gzip -cn" &&
+       git archive --format=tar.gz HEAD >external_gzip.tar.gz &&
+       test_cmp_bin external_gzip.tgz external_gzip.tar.gz
+'
+
+test_expect_success GZIP 'extract tgz file (external gzip)' '
+       gzip -d -c <external_gzip.tgz >external_gzip.tar &&
+       test_cmp_bin b.tar external_gzip.tar
+'
+
 test_expect_success 'archive and :(glob)' '
        git archive -v HEAD -- ":(glob)**/sh" >/dev/null 2>actual &&
        cat >expect <<EOF &&
index 712ae52299431c3d31038123a02f895708086bdd..2f6eef5e3720452df49c4810bd9f4f63dc1167fb 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git archive attribute tests'
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 SUBSTFORMAT='%H (%h)%n'
@@ -20,6 +21,7 @@ extract_tar_to_dir () {
 
 test_expect_success 'setup' '
        echo ignored >ignored &&
+       mkdir .git/info &&
        echo ignored export-ignore >>.git/info/attributes &&
        git add ignored &&
 
@@ -46,7 +48,8 @@ test_expect_success 'setup' '
 
        git commit -m. &&
 
-       git clone --bare . bare &&
+       git clone --template= --bare . bare &&
+       mkdir bare/info &&
        cp .git/info/attributes bare/info/attributes
 '
 
index a66b5ba27e869e377a317258c03cde8953323d8f..78ab75f1bc2442144670a2df06de790ee43d82b7 100755 (executable)
@@ -3,6 +3,7 @@
 test_description='git archive attribute pattern tests'
 
 TEST_PASSES_SANITIZE_LEAK=true
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 test_expect_exists() {
@@ -15,6 +16,7 @@ test_expect_missing() {
 
 test_expect_success 'setup' '
        echo ignored >ignored &&
+       mkdir .git/info &&
        echo ignored export-ignore >>.git/info/attributes &&
        git add ignored &&
 
@@ -54,7 +56,8 @@ test_expect_success 'setup' '
 
        git commit -m. &&
 
-       git clone --bare . bare &&
+       git clone --template= --bare . bare &&
+       mkdir bare/info &&
        cp .git/info/attributes bare/info/attributes
 '
 
index 3992d08158a29ee8a9a1b73a7f7b6446308b2e6a..fc499cdff01d01a6079221e78b50a7924e5ecb71 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git archive --format=zip test'
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 SUBSTFORMAT=%H%n
@@ -121,6 +122,7 @@ test_expect_success 'prepare file list' '
 test_expect_success \
     'add ignored file' \
     'echo ignore me >a/ignored &&
+     mkdir .git/info &&
      echo ignored export-ignore >.git/info/attributes'
 
 test_expect_success 'add files to repository' '
@@ -139,7 +141,8 @@ test_expect_success 'setup export-subst and diff attributes' '
 '
 
 test_expect_success 'create bare clone' '
-       git clone --bare . bare.git &&
+       git clone --template= --bare . bare.git &&
+       mkdir bare.git/info &&
        cp .git/info/attributes bare.git/info/attributes &&
        # Recreate our changes to .git/config rather than just copying it, as
        # we do not want to clobber core.bare or other settings.
index 41e6dc4dcfc5163c8db4d7875567162ea7cc1828..2926e8dfc41223cc0030685beec8f6612db25a4d 100755 (executable)
@@ -4,6 +4,8 @@
 #
 
 test_description='resilience to pack corruptions with redundant objects'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 # Note: the test objects are created with knowledge of their pack encoding
index 693b2411c89929b7cf3b765419c1f8db96554636..655cafa054121130945056643f2fb05a165f3ef1 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='handling of duplicate objects in incoming packfiles'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-pack.sh
 
index 55b787630fcb6e0631bc6b8a1cd6626c30063417..4e910c5b9d2a9ddba8c2d808248189fe993873c3 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='test index-pack handling of delta cycles in packfiles'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-pack.sh
 
index f775fc1ce691d5337b31d067cd93944b7bae3bc5..7e50f8e7653f65128d965502b13d7aa3d61d4782 100755 (executable)
@@ -26,22 +26,415 @@ has_any () {
        grep -Ff "$1" "$2"
 }
 
-setup_bitmap_history
-
-test_expect_success 'setup writing bitmaps during repack' '
-       git config repack.writeBitmaps true
-'
-
-test_expect_success 'full repack creates bitmaps' '
-       GIT_TRACE2_EVENT="$(pwd)/trace" \
+test_bitmap_cases () {
+       writeLookupTable=false
+       for i in "$@"
+       do
+               case "$i" in
+               "pack.writeBitmapLookupTable") writeLookupTable=true;;
+               esac
+       done
+
+       test_expect_success 'setup test repository' '
+               rm -fr * .git &&
+               git init &&
+               git config pack.writeBitmapLookupTable '"$writeLookupTable"'
+       '
+       setup_bitmap_history
+
+       test_expect_success 'setup writing bitmaps during repack' '
+               git config repack.writeBitmaps true
+       '
+
+       test_expect_success 'full repack creates bitmaps' '
+               GIT_TRACE2_EVENT="$(pwd)/trace" \
+                       git repack -ad &&
+               ls .git/objects/pack/ | grep bitmap >output &&
+               test_line_count = 1 output &&
+               grep "\"key\":\"num_selected_commits\",\"value\":\"106\"" trace &&
+               grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace
+       '
+
+       basic_bitmap_tests
+
+       test_expect_success 'pack-objects respects --local (non-local loose)' '
+               git init --bare alt.git &&
+               echo $(pwd)/alt.git/objects >.git/objects/info/alternates &&
+               echo content1 >file1 &&
+               # non-local loose object which is not present in bitmapped pack
+               altblob=$(GIT_DIR=alt.git git hash-object -w file1) &&
+               # non-local loose object which is also present in bitmapped pack
+               git cat-file blob $blob | GIT_DIR=alt.git git hash-object -w --stdin &&
+               git add file1 &&
+               test_tick &&
+               git commit -m commit_file1 &&
+               echo HEAD | git pack-objects --local --stdout --revs >1.pack &&
+               git index-pack 1.pack &&
+               list_packed_objects 1.idx >1.objects &&
+               printf "%s\n" "$altblob" "$blob" >nonlocal-loose &&
+               ! has_any nonlocal-loose 1.objects
+       '
+
+       test_expect_success 'pack-objects respects --honor-pack-keep (local non-bitmapped pack)' '
+               echo content2 >file2 &&
+               blob2=$(git hash-object -w file2) &&
+               git add file2 &&
+               test_tick &&
+               git commit -m commit_file2 &&
+               printf "%s\n" "$blob2" "$bitmaptip" >keepobjects &&
+               pack2=$(git pack-objects pack2 <keepobjects) &&
+               mv pack2-$pack2.* .git/objects/pack/ &&
+               >.git/objects/pack/pack2-$pack2.keep &&
+               rm $(objpath $blob2) &&
+               echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >2a.pack &&
+               git index-pack 2a.pack &&
+               list_packed_objects 2a.idx >2a.objects &&
+               ! has_any keepobjects 2a.objects
+       '
+
+       test_expect_success 'pack-objects respects --local (non-local pack)' '
+               mv .git/objects/pack/pack2-$pack2.* alt.git/objects/pack/ &&
+               echo HEAD | git pack-objects --local --stdout --revs >2b.pack &&
+               git index-pack 2b.pack &&
+               list_packed_objects 2b.idx >2b.objects &&
+               ! has_any keepobjects 2b.objects
+       '
+
+       test_expect_success 'pack-objects respects --honor-pack-keep (local bitmapped pack)' '
+               ls .git/objects/pack/ | grep bitmap >output &&
+               test_line_count = 1 output &&
+               packbitmap=$(basename $(cat output) .bitmap) &&
+               list_packed_objects .git/objects/pack/$packbitmap.idx >packbitmap.objects &&
+               test_when_finished "rm -f .git/objects/pack/$packbitmap.keep" &&
+               >.git/objects/pack/$packbitmap.keep &&
+               echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >3a.pack &&
+               git index-pack 3a.pack &&
+               list_packed_objects 3a.idx >3a.objects &&
+               ! has_any packbitmap.objects 3a.objects
+       '
+
+       test_expect_success 'pack-objects respects --local (non-local bitmapped pack)' '
+               mv .git/objects/pack/$packbitmap.* alt.git/objects/pack/ &&
+               rm -f .git/objects/pack/multi-pack-index &&
+               test_when_finished "mv alt.git/objects/pack/$packbitmap.* .git/objects/pack/" &&
+               echo HEAD | git pack-objects --local --stdout --revs >3b.pack &&
+               git index-pack 3b.pack &&
+               list_packed_objects 3b.idx >3b.objects &&
+               ! has_any packbitmap.objects 3b.objects
+       '
+
+       test_expect_success 'pack-objects to file can use bitmap' '
+               # make sure we still have 1 bitmap index from previous tests
+               ls .git/objects/pack/ | grep bitmap >output &&
+               test_line_count = 1 output &&
+               # verify equivalent packs are generated with/without using bitmap index
+               packasha1=$(git pack-objects --no-use-bitmap-index --all packa </dev/null) &&
+               packbsha1=$(git pack-objects --use-bitmap-index --all packb </dev/null) &&
+               list_packed_objects packa-$packasha1.idx >packa.objects &&
+               list_packed_objects packb-$packbsha1.idx >packb.objects &&
+               test_cmp packa.objects packb.objects
+       '
+
+       test_expect_success 'full repack, reusing previous bitmaps' '
                git repack -ad &&
-       ls .git/objects/pack/ | grep bitmap >output &&
-       test_line_count = 1 output &&
-       grep "\"key\":\"num_selected_commits\",\"value\":\"106\"" trace &&
-       grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace
-'
+               ls .git/objects/pack/ | grep bitmap >output &&
+               test_line_count = 1 output
+       '
+
+       test_expect_success 'fetch (full bitmap)' '
+               git --git-dir=clone.git fetch origin second:second &&
+               git rev-parse HEAD >expect &&
+               git --git-dir=clone.git rev-parse HEAD >actual &&
+               test_cmp expect actual
+       '
+
+       test_expect_success 'create objects for missing-HAVE tests' '
+               blob=$(echo "missing have" | git hash-object -w --stdin) &&
+               tree=$(printf "100644 blob $blob\tfile\n" | git mktree) &&
+               parent=$(echo parent | git commit-tree $tree) &&
+               commit=$(echo commit | git commit-tree $tree -p $parent) &&
+               cat >revs <<-EOF
+               HEAD
+               ^HEAD^
+               ^$commit
+               EOF
+       '
+
+       test_expect_success 'pack-objects respects --incremental' '
+               cat >revs2 <<-EOF &&
+               HEAD
+               $commit
+               EOF
+               git pack-objects --incremental --stdout --revs <revs2 >4.pack &&
+               git index-pack 4.pack &&
+               list_packed_objects 4.idx >4.objects &&
+               test_line_count = 4 4.objects &&
+               git rev-list --objects $commit >revlist &&
+               cut -d" " -f1 revlist |sort >objects &&
+               test_cmp 4.objects objects
+       '
+
+       test_expect_success 'pack with missing blob' '
+               rm $(objpath $blob) &&
+               git pack-objects --stdout --revs <revs >/dev/null
+       '
+
+       test_expect_success 'pack with missing tree' '
+               rm $(objpath $tree) &&
+               git pack-objects --stdout --revs <revs >/dev/null
+       '
+
+       test_expect_success 'pack with missing parent' '
+               rm $(objpath $parent) &&
+               git pack-objects --stdout --revs <revs >/dev/null
+       '
+
+       test_expect_success JGIT,SHA1 'we can read jgit bitmaps' '
+               git clone --bare . compat-jgit.git &&
+               (
+                       cd compat-jgit.git &&
+                       rm -f objects/pack/*.bitmap &&
+                       jgit gc &&
+                       git rev-list --test-bitmap HEAD
+               )
+       '
+
+       test_expect_success JGIT,SHA1 'jgit can read our bitmaps' '
+               git clone --bare . compat-us.git &&
+               (
+                       cd compat-us.git &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+                       git repack -adb &&
+                       # jgit gc will barf if it does not like our bitmaps
+                       jgit gc
+               )
+       '
+
+       test_expect_success 'splitting packs does not generate bogus bitmaps' '
+               test-tool genrandom foo $((1024 * 1024)) >rand &&
+               git add rand &&
+               git commit -m "commit with big file" &&
+               git -c pack.packSizeLimit=500k repack -adb &&
+               git init --bare no-bitmaps.git &&
+               git -C no-bitmaps.git fetch .. HEAD
+       '
+
+       test_expect_success 'set up reusable pack' '
+               rm -f .git/objects/pack/*.keep &&
+               git repack -adb &&
+               reusable_pack () {
+                       git for-each-ref --format="%(objectname)" |
+                       git pack-objects --delta-base-offset --revs --stdout "$@"
+               }
+       '
+
+       test_expect_success 'pack reuse respects --honor-pack-keep' '
+               test_when_finished "rm -f .git/objects/pack/*.keep" &&
+               for i in .git/objects/pack/*.pack
+               do
+                       >${i%.pack}.keep || return 1
+               done &&
+               reusable_pack --honor-pack-keep >empty.pack &&
+               git index-pack empty.pack &&
+               git show-index <empty.idx >actual &&
+               test_must_be_empty actual
+       '
+
+       test_expect_success 'pack reuse respects --local' '
+               mv .git/objects/pack/* alt.git/objects/pack/ &&
+               test_when_finished "mv alt.git/objects/pack/* .git/objects/pack/" &&
+               reusable_pack --local >empty.pack &&
+               git index-pack empty.pack &&
+               git show-index <empty.idx >actual &&
+               test_must_be_empty actual
+       '
+
+       test_expect_success 'pack reuse respects --incremental' '
+               reusable_pack --incremental >empty.pack &&
+               git index-pack empty.pack &&
+               git show-index <empty.idx >actual &&
+               test_must_be_empty actual
+       '
+
+       test_expect_success 'truncated bitmap fails gracefully (ewah)' '
+               test_config pack.writebitmaphashcache false &&
+               test_config pack.writebitmaplookuptable false &&
+               git repack -ad &&
+               git rev-list --use-bitmap-index --count --all >expect &&
+               bitmap=$(ls .git/objects/pack/*.bitmap) &&
+               test_when_finished "rm -f $bitmap" &&
+               test_copy_bytes 256 <$bitmap >$bitmap.tmp &&
+               mv -f $bitmap.tmp $bitmap &&
+               git rev-list --use-bitmap-index --count --all >actual 2>stderr &&
+               test_cmp expect actual &&
+               test_i18ngrep corrupt.ewah.bitmap stderr
+       '
+
+       test_expect_success 'truncated bitmap fails gracefully (cache)' '
+               git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+               git repack -ad &&
+               git rev-list --use-bitmap-index --count --all >expect &&
+               bitmap=$(ls .git/objects/pack/*.bitmap) &&
+               test_when_finished "rm -f $bitmap" &&
+               test_copy_bytes 512 <$bitmap >$bitmap.tmp &&
+               mv -f $bitmap.tmp $bitmap &&
+               git rev-list --use-bitmap-index --count --all >actual 2>stderr &&
+               test_cmp expect actual &&
+               test_i18ngrep corrupted.bitmap.index stderr
+       '
+
+       # Create a state of history with these properties:
+       #
+       #  - refs that allow a client to fetch some new history, while sharing some old
+       #    history with the server; we use branches delta-reuse-old and
+       #    delta-reuse-new here
+       #
+       #  - the new history contains an object that is stored on the server as a delta
+       #    against a base that is in the old history
+       #
+       #  - the base object is not immediately reachable from the tip of the old
+       #    history; finding it would involve digging down through history we know the
+       #    other side has
+       #
+       # This should result in a state where fetching from old->new would not
+       # traditionally reuse the on-disk delta (because we'd have to dig to realize
+       # that the client has it), but we will do so if bitmaps can tell us cheaply
+       # that the other side has it.
+       test_expect_success 'set up thin delta-reuse parent' '
+               # This first commit contains the buried base object.
+               test-tool genrandom delta 16384 >file &&
+               git add file &&
+               git commit -m "delta base" &&
+               base=$(git rev-parse --verify HEAD:file) &&
+
+               # These intermediate commits bury the base back in history.
+               # This becomes the "old" state.
+               for i in 1 2 3 4 5
+               do
+                       echo $i >file &&
+                       git commit -am "intermediate $i" || return 1
+               done &&
+               git branch delta-reuse-old &&
+
+               # And now our new history has a delta against the buried base. Note
+               # that this must be smaller than the original file, since pack-objects
+               # prefers to create deltas from smaller objects to larger.
+               test-tool genrandom delta 16300 >file &&
+               git commit -am "delta result" &&
+               delta=$(git rev-parse --verify HEAD:file) &&
+               git branch delta-reuse-new &&
+
+               # Repack with bitmaps and double check that we have the expected delta
+               # relationship.
+               git repack -adb &&
+               have_delta $delta $base
+       '
+
+       # Now we can sanity-check the non-bitmap behavior (that the server is not able
+       # to reuse the delta). This isn't strictly something we care about, so this
+       # test could be scrapped in the future. But it makes sure that the next test is
+       # actually triggering the feature we want.
+       #
+       # Note that our tools for working with on-the-wire "thin" packs are limited. So
+       # we actually perform the fetch, retain the resulting pack, and inspect the
+       # result.
+       test_expect_success 'fetch without bitmaps ignores delta against old base' '
+               test_config pack.usebitmaps false &&
+               test_when_finished "rm -rf client.git" &&
+               git init --bare client.git &&
+               (
+                       cd client.git &&
+                       git config transfer.unpackLimit 1 &&
+                       git fetch .. delta-reuse-old:delta-reuse-old &&
+                       git fetch .. delta-reuse-new:delta-reuse-new &&
+                       have_delta $delta $ZERO_OID
+               )
+       '
+
+       # And do the same for the bitmap case, where we do expect to find the delta.
+       test_expect_success 'fetch with bitmaps can reuse old base' '
+               test_config pack.usebitmaps true &&
+               test_when_finished "rm -rf client.git" &&
+               git init --bare client.git &&
+               (
+                       cd client.git &&
+                       git config transfer.unpackLimit 1 &&
+                       git fetch .. delta-reuse-old:delta-reuse-old &&
+                       git fetch .. delta-reuse-new:delta-reuse-new &&
+                       have_delta $delta $base
+               )
+       '
+
+       test_expect_success 'pack.preferBitmapTips' '
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+
+                       # create enough commits that not all are receive bitmap
+                       # coverage even if they are all at the tip of some reference.
+                       test_commit_bulk --message="%s" 103 &&
+
+                       git rev-list HEAD >commits.raw &&
+                       sort <commits.raw >commits &&
+
+                       git log --format="create refs/tags/%s %H" HEAD >refs &&
+                       git update-ref --stdin <refs &&
+
+                       git repack -adb &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+
+                       # remember which commits did not receive bitmaps
+                       comm -13 bitmaps commits >before &&
+                       test_file_not_empty before &&
+
+                       # mark the commits which did not receive bitmaps as preferred,
+                       # and generate the bitmap again
+                       perl -pe "s{^}{create refs/tags/include/$. }" <before |
+                               git update-ref --stdin &&
+                       git -c pack.preferBitmapTips=refs/tags/include repack -adb &&
+
+                       # finally, check that the commit(s) without bitmap coverage
+                       # are not the same ones as before
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       comm -13 bitmaps commits >after &&
+
+                       ! test_cmp before after
+               )
+       '
+
+       test_expect_success 'complains about multiple pack bitmaps' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+
+                       test_commit base &&
+
+                       git repack -adb &&
+                       bitmap="$(ls .git/objects/pack/pack-*.bitmap)" &&
+                       mv "$bitmap" "$bitmap.bak" &&
+
+                       test_commit other &&
+                       git repack -ab &&
+
+                       mv "$bitmap.bak" "$bitmap" &&
+
+                       find .git/objects/pack -type f -name "*.pack" >packs &&
+                       find .git/objects/pack -type f -name "*.bitmap" >bitmaps &&
+                       test_line_count = 2 packs &&
+                       test_line_count = 2 bitmaps &&
+
+                       git rev-list --use-bitmap-index HEAD 2>err &&
+                       grep "ignoring extra bitmap file" err
+               )
+       '
+}
 
-basic_bitmap_tests
+test_bitmap_cases
 
 test_expect_success 'incremental repack fails when bitmaps are requested' '
        test_commit more-1 &&
@@ -54,219 +447,24 @@ test_expect_success 'incremental repack can disable bitmaps' '
        git repack -d --no-write-bitmap-index
 '
 
-test_expect_success 'pack-objects respects --local (non-local loose)' '
-       git init --bare alt.git &&
-       echo $(pwd)/alt.git/objects >.git/objects/info/alternates &&
-       echo content1 >file1 &&
-       # non-local loose object which is not present in bitmapped pack
-       altblob=$(GIT_DIR=alt.git git hash-object -w file1) &&
-       # non-local loose object which is also present in bitmapped pack
-       git cat-file blob $blob | GIT_DIR=alt.git git hash-object -w --stdin &&
-       git add file1 &&
-       test_tick &&
-       git commit -m commit_file1 &&
-       echo HEAD | git pack-objects --local --stdout --revs >1.pack &&
-       git index-pack 1.pack &&
-       list_packed_objects 1.idx >1.objects &&
-       printf "%s\n" "$altblob" "$blob" >nonlocal-loose &&
-       ! has_any nonlocal-loose 1.objects
-'
-
-test_expect_success 'pack-objects respects --honor-pack-keep (local non-bitmapped pack)' '
-       echo content2 >file2 &&
-       blob2=$(git hash-object -w file2) &&
-       git add file2 &&
-       test_tick &&
-       git commit -m commit_file2 &&
-       printf "%s\n" "$blob2" "$bitmaptip" >keepobjects &&
-       pack2=$(git pack-objects pack2 <keepobjects) &&
-       mv pack2-$pack2.* .git/objects/pack/ &&
-       >.git/objects/pack/pack2-$pack2.keep &&
-       rm $(objpath $blob2) &&
-       echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >2a.pack &&
-       git index-pack 2a.pack &&
-       list_packed_objects 2a.idx >2a.objects &&
-       ! has_any keepobjects 2a.objects
-'
-
-test_expect_success 'pack-objects respects --local (non-local pack)' '
-       mv .git/objects/pack/pack2-$pack2.* alt.git/objects/pack/ &&
-       echo HEAD | git pack-objects --local --stdout --revs >2b.pack &&
-       git index-pack 2b.pack &&
-       list_packed_objects 2b.idx >2b.objects &&
-       ! has_any keepobjects 2b.objects
-'
-
-test_expect_success 'pack-objects respects --honor-pack-keep (local bitmapped pack)' '
-       ls .git/objects/pack/ | grep bitmap >output &&
-       test_line_count = 1 output &&
-       packbitmap=$(basename $(cat output) .bitmap) &&
-       list_packed_objects .git/objects/pack/$packbitmap.idx >packbitmap.objects &&
-       test_when_finished "rm -f .git/objects/pack/$packbitmap.keep" &&
-       >.git/objects/pack/$packbitmap.keep &&
-       echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >3a.pack &&
-       git index-pack 3a.pack &&
-       list_packed_objects 3a.idx >3a.objects &&
-       ! has_any packbitmap.objects 3a.objects
-'
-
-test_expect_success 'pack-objects respects --local (non-local bitmapped pack)' '
-       mv .git/objects/pack/$packbitmap.* alt.git/objects/pack/ &&
-       rm -f .git/objects/pack/multi-pack-index &&
-       test_when_finished "mv alt.git/objects/pack/$packbitmap.* .git/objects/pack/" &&
-       echo HEAD | git pack-objects --local --stdout --revs >3b.pack &&
-       git index-pack 3b.pack &&
-       list_packed_objects 3b.idx >3b.objects &&
-       ! has_any packbitmap.objects 3b.objects
-'
-
-test_expect_success 'pack-objects to file can use bitmap' '
-       # make sure we still have 1 bitmap index from previous tests
-       ls .git/objects/pack/ | grep bitmap >output &&
-       test_line_count = 1 output &&
-       # verify equivalent packs are generated with/without using bitmap index
-       packasha1=$(git pack-objects --no-use-bitmap-index --all packa </dev/null) &&
-       packbsha1=$(git pack-objects --use-bitmap-index --all packb </dev/null) &&
-       list_packed_objects packa-$packasha1.idx >packa.objects &&
-       list_packed_objects packb-$packbsha1.idx >packb.objects &&
-       test_cmp packa.objects packb.objects
-'
-
-test_expect_success 'full repack, reusing previous bitmaps' '
-       git repack -ad &&
-       ls .git/objects/pack/ | grep bitmap >output &&
-       test_line_count = 1 output
-'
-
-test_expect_success 'fetch (full bitmap)' '
-       git --git-dir=clone.git fetch origin second:second &&
-       git rev-parse HEAD >expect &&
-       git --git-dir=clone.git rev-parse HEAD >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'create objects for missing-HAVE tests' '
-       blob=$(echo "missing have" | git hash-object -w --stdin) &&
-       tree=$(printf "100644 blob $blob\tfile\n" | git mktree) &&
-       parent=$(echo parent | git commit-tree $tree) &&
-       commit=$(echo commit | git commit-tree $tree -p $parent) &&
-       cat >revs <<-EOF
-       HEAD
-       ^HEAD^
-       ^$commit
-       EOF
-'
-
-test_expect_success 'pack-objects respects --incremental' '
-       cat >revs2 <<-EOF &&
-       HEAD
-       $commit
-       EOF
-       git pack-objects --incremental --stdout --revs <revs2 >4.pack &&
-       git index-pack 4.pack &&
-       list_packed_objects 4.idx >4.objects &&
-       test_line_count = 4 4.objects &&
-       git rev-list --objects $commit >revlist &&
-       cut -d" " -f1 revlist |sort >objects &&
-       test_cmp 4.objects objects
-'
-
-test_expect_success 'pack with missing blob' '
-       rm $(objpath $blob) &&
-       git pack-objects --stdout --revs <revs >/dev/null
-'
-
-test_expect_success 'pack with missing tree' '
-       rm $(objpath $tree) &&
-       git pack-objects --stdout --revs <revs >/dev/null
-'
-
-test_expect_success 'pack with missing parent' '
-       rm $(objpath $parent) &&
-       git pack-objects --stdout --revs <revs >/dev/null
-'
+test_bitmap_cases "pack.writeBitmapLookupTable"
 
-test_expect_success JGIT,SHA1 'we can read jgit bitmaps' '
-       git clone --bare . compat-jgit.git &&
-       (
-               cd compat-jgit.git &&
-               rm -f objects/pack/*.bitmap &&
-               jgit gc &&
-               git rev-list --test-bitmap HEAD
-       )
-'
-
-test_expect_success JGIT,SHA1 'jgit can read our bitmaps' '
-       git clone --bare . compat-us.git &&
-       (
-               cd compat-us.git &&
-               git repack -adb &&
-               # jgit gc will barf if it does not like our bitmaps
-               jgit gc
-       )
-'
-
-test_expect_success 'splitting packs does not generate bogus bitmaps' '
-       test-tool genrandom foo $((1024 * 1024)) >rand &&
-       git add rand &&
-       git commit -m "commit with big file" &&
-       git -c pack.packSizeLimit=500k repack -adb &&
-       git init --bare no-bitmaps.git &&
-       git -C no-bitmaps.git fetch .. HEAD
+test_expect_success 'verify writing bitmap lookup table when enabled' '
+       GIT_TRACE2_EVENT="$(pwd)/trace2" \
+               git repack -ad &&
+       grep "\"label\":\"writing_lookup_table\"" trace2
 '
 
-test_expect_success 'set up reusable pack' '
-       rm -f .git/objects/pack/*.keep &&
+test_expect_success 'lookup table is actually used to traverse objects' '
        git repack -adb &&
-       reusable_pack () {
-               git for-each-ref --format="%(objectname)" |
-               git pack-objects --delta-base-offset --revs --stdout "$@"
-       }
+       GIT_TRACE2_EVENT="$(pwd)/trace3" \
+               git rev-list --use-bitmap-index --count --all &&
+       grep "\"label\":\"reading_lookup_table\"" trace3
 '
 
-test_expect_success 'pack reuse respects --honor-pack-keep' '
-       test_when_finished "rm -f .git/objects/pack/*.keep" &&
-       for i in .git/objects/pack/*.pack
-       do
-               >${i%.pack}.keep || return 1
-       done &&
-       reusable_pack --honor-pack-keep >empty.pack &&
-       git index-pack empty.pack &&
-       git show-index <empty.idx >actual &&
-       test_must_be_empty actual
-'
-
-test_expect_success 'pack reuse respects --local' '
-       mv .git/objects/pack/* alt.git/objects/pack/ &&
-       test_when_finished "mv alt.git/objects/pack/* .git/objects/pack/" &&
-       reusable_pack --local >empty.pack &&
-       git index-pack empty.pack &&
-       git show-index <empty.idx >actual &&
-       test_must_be_empty actual
-'
-
-test_expect_success 'pack reuse respects --incremental' '
-       reusable_pack --incremental >empty.pack &&
-       git index-pack empty.pack &&
-       git show-index <empty.idx >actual &&
-       test_must_be_empty actual
-'
-
-test_expect_success 'truncated bitmap fails gracefully (ewah)' '
+test_expect_success 'truncated bitmap fails gracefully (lookup table)' '
        test_config pack.writebitmaphashcache false &&
-       git repack -ad &&
-       git rev-list --use-bitmap-index --count --all >expect &&
-       bitmap=$(ls .git/objects/pack/*.bitmap) &&
-       test_when_finished "rm -f $bitmap" &&
-       test_copy_bytes 256 <$bitmap >$bitmap.tmp &&
-       mv -f $bitmap.tmp $bitmap &&
-       git rev-list --use-bitmap-index --count --all >actual 2>stderr &&
-       test_cmp expect actual &&
-       test_i18ngrep corrupt.ewah.bitmap stderr
-'
-
-test_expect_success 'truncated bitmap fails gracefully (cache)' '
-       git repack -ad &&
+       git repack -adb &&
        git rev-list --use-bitmap-index --count --all >expect &&
        bitmap=$(ls .git/objects/pack/*.bitmap) &&
        test_when_finished "rm -f $bitmap" &&
@@ -277,152 +475,4 @@ test_expect_success 'truncated bitmap fails gracefully (cache)' '
        test_i18ngrep corrupted.bitmap.index stderr
 '
 
-# Create a state of history with these properties:
-#
-#  - refs that allow a client to fetch some new history, while sharing some old
-#    history with the server; we use branches delta-reuse-old and
-#    delta-reuse-new here
-#
-#  - the new history contains an object that is stored on the server as a delta
-#    against a base that is in the old history
-#
-#  - the base object is not immediately reachable from the tip of the old
-#    history; finding it would involve digging down through history we know the
-#    other side has
-#
-# This should result in a state where fetching from old->new would not
-# traditionally reuse the on-disk delta (because we'd have to dig to realize
-# that the client has it), but we will do so if bitmaps can tell us cheaply
-# that the other side has it.
-test_expect_success 'set up thin delta-reuse parent' '
-       # This first commit contains the buried base object.
-       test-tool genrandom delta 16384 >file &&
-       git add file &&
-       git commit -m "delta base" &&
-       base=$(git rev-parse --verify HEAD:file) &&
-
-       # These intermediate commits bury the base back in history.
-       # This becomes the "old" state.
-       for i in 1 2 3 4 5
-       do
-               echo $i >file &&
-               git commit -am "intermediate $i" || return 1
-       done &&
-       git branch delta-reuse-old &&
-
-       # And now our new history has a delta against the buried base. Note
-       # that this must be smaller than the original file, since pack-objects
-       # prefers to create deltas from smaller objects to larger.
-       test-tool genrandom delta 16300 >file &&
-       git commit -am "delta result" &&
-       delta=$(git rev-parse --verify HEAD:file) &&
-       git branch delta-reuse-new &&
-
-       # Repack with bitmaps and double check that we have the expected delta
-       # relationship.
-       git repack -adb &&
-       have_delta $delta $base
-'
-
-# Now we can sanity-check the non-bitmap behavior (that the server is not able
-# to reuse the delta). This isn't strictly something we care about, so this
-# test could be scrapped in the future. But it makes sure that the next test is
-# actually triggering the feature we want.
-#
-# Note that our tools for working with on-the-wire "thin" packs are limited. So
-# we actually perform the fetch, retain the resulting pack, and inspect the
-# result.
-test_expect_success 'fetch without bitmaps ignores delta against old base' '
-       test_config pack.usebitmaps false &&
-       test_when_finished "rm -rf client.git" &&
-       git init --bare client.git &&
-       (
-               cd client.git &&
-               git config transfer.unpackLimit 1 &&
-               git fetch .. delta-reuse-old:delta-reuse-old &&
-               git fetch .. delta-reuse-new:delta-reuse-new &&
-               have_delta $delta $ZERO_OID
-       )
-'
-
-# And do the same for the bitmap case, where we do expect to find the delta.
-test_expect_success 'fetch with bitmaps can reuse old base' '
-       test_config pack.usebitmaps true &&
-       test_when_finished "rm -rf client.git" &&
-       git init --bare client.git &&
-       (
-               cd client.git &&
-               git config transfer.unpackLimit 1 &&
-               git fetch .. delta-reuse-old:delta-reuse-old &&
-               git fetch .. delta-reuse-new:delta-reuse-new &&
-               have_delta $delta $base
-       )
-'
-
-test_expect_success 'pack.preferBitmapTips' '
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
-
-               # create enough commits that not all are receive bitmap
-               # coverage even if they are all at the tip of some reference.
-               test_commit_bulk --message="%s" 103 &&
-
-               git rev-list HEAD >commits.raw &&
-               sort <commits.raw >commits &&
-
-               git log --format="create refs/tags/%s %H" HEAD >refs &&
-               git update-ref --stdin <refs &&
-
-               git repack -adb &&
-               test-tool bitmap list-commits | sort >bitmaps &&
-
-               # remember which commits did not receive bitmaps
-               comm -13 bitmaps commits >before &&
-               test_file_not_empty before &&
-
-               # mark the commits which did not receive bitmaps as preferred,
-               # and generate the bitmap again
-               perl -pe "s{^}{create refs/tags/include/$. }" <before |
-                       git update-ref --stdin &&
-               git -c pack.preferBitmapTips=refs/tags/include repack -adb &&
-
-               # finally, check that the commit(s) without bitmap coverage
-               # are not the same ones as before
-               test-tool bitmap list-commits | sort >bitmaps &&
-               comm -13 bitmaps commits >after &&
-
-               ! test_cmp before after
-       )
-'
-
-test_expect_success 'complains about multiple pack bitmaps' '
-       rm -fr repo &&
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
-
-               test_commit base &&
-
-               git repack -adb &&
-               bitmap="$(ls .git/objects/pack/pack-*.bitmap)" &&
-               mv "$bitmap" "$bitmap.bak" &&
-
-               test_commit other &&
-               git repack -ab &&
-
-               mv "$bitmap.bak" "$bitmap" &&
-
-               find .git/objects/pack -type f -name "*.pack" >packs &&
-               find .git/objects/pack -type f -name "*.bitmap" >bitmaps &&
-               test_line_count = 2 packs &&
-               test_line_count = 2 bitmaps &&
-
-               git rev-list --use-bitmap-index HEAD 2>err &&
-               grep "ignoring extra bitmap file" err
-       )
-'
-
 test_done
index 872a95df3383742e392ef3750be3dfcbecb01ec8..9dae60f73e3253bbb4b44355535c8cf1ebc58be8 100755 (executable)
@@ -17,23 +17,40 @@ test_description='check bitmap operation with shallow repositories'
 # the tree for A. But in a shallow one, we've grafted away
 # A, and fetching A to B requires that the other side send
 # us the tree for file=1.
-test_expect_success 'setup shallow repo' '
-       echo 1 >file &&
-       git add file &&
-       git commit -m orig &&
-       echo 2 >file &&
-       git commit -a -m update &&
-       git clone --no-local --bare --depth=1 . shallow.git &&
-       echo 1 >file &&
-       git commit -a -m repeat
-'
-
-test_expect_success 'turn on bitmaps in the parent' '
-       git repack -adb
-'
-
-test_expect_success 'shallow fetch from bitmapped repo' '
-       (cd shallow.git && git fetch)
-'
+test_shallow_bitmaps () {
+       writeLookupTable=false
+
+       for i in "$@"
+       do
+               case $i in
+               "pack.writeBitmapLookupTable") writeLookupTable=true;;
+               esac
+       done
+
+       test_expect_success 'setup shallow repo' '
+               rm -rf * .git &&
+               git init &&
+               git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+               echo 1 >file &&
+               git add file &&
+               git commit -m orig &&
+               echo 2 >file &&
+               git commit -a -m update &&
+               git clone --no-local --bare --depth=1 . shallow.git &&
+               echo 1 >file &&
+               git commit -a -m repeat
+       '
+
+       test_expect_success 'turn on bitmaps in the parent' '
+               git repack -adb
+       '
+
+       test_expect_success 'shallow fetch from bitmapped repo' '
+               (cd shallow.git && git fetch)
+       '
+}
+
+test_shallow_bitmaps
+test_shallow_bitmaps "pack.writeBitmapLookupTable"
 
 test_done
index 0aec8619e22dc97c9ec7d78c7e136fb388034fc7..73a241743aa50101c032b53566de7ba2e7a3b410 100755 (executable)
@@ -49,9 +49,9 @@ Then no matter which order we start looking at the packs in, we know that we
 will always find a delta for "file", because its lookup will always come
 immediately after the lookup for "dummy".
 '
-. ./test-lib.sh
-
 
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
 
 # Create a pack containing the tree $1 and blob $1:file, with
 # the latter stored as a delta against $2:file.
index 8bacd96275b0ac881b9202ecaff83394ca562dd2..c80ea9e8b71ee8707c8c1cc4020fb83a1b90029f 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='pack-object compression configuration'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index db89542dfb3fe93dad02b90cf5ced1b46ee96a15..049c5fc8ead328860ef7ed38dea410f4524107c9 100755 (executable)
@@ -12,12 +12,12 @@ test_expect_success 'usage' '
 
 test_expect_success 'usage shown without sub-command' '
        test_expect_code 129 git commit-graph 2>err &&
-       ! grep error: err
+       grep usage: err
 '
 
 test_expect_success 'usage shown with an error on unknown sub-command' '
        cat >expect <<-\EOF &&
-       error: unrecognized subcommand: unknown
+       error: unknown subcommand: `unknown'\''
        EOF
        test_expect_code 129 git commit-graph unknown 2>stderr &&
        grep error stderr >actual &&
@@ -361,13 +361,14 @@ test_expect_success 'replace-objects invalidates commit-graph' '
 test_expect_success 'commit grafts invalidate commit-graph' '
        cd "$TRASH_DIRECTORY" &&
        test_when_finished rm -rf graft &&
-       git clone full graft &&
+       git clone --template= full graft &&
        (
                cd graft &&
                git commit-graph write --reachable &&
                test_path_is_file .git/objects/info/commit-graph &&
                H1=$(git rev-parse --verify HEAD~1) &&
                H3=$(git rev-parse --verify HEAD~3) &&
+               mkdir .git/info &&
                echo "$H1 $H3" >.git/info/grafts &&
                git -c core.commitGraph=false log >expect &&
                git -c core.commitGraph=true log >actual &&
index 8a56d98a0e88cef9add7d5603730daf3c6e91897..70770fe274d84fca1d988c1d4dfd7457943363ee 100755 (executable)
@@ -6,6 +6,8 @@
 test_description='git pack-object with "large" deltas
 
 '
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-pack.sh
 
index 4fe57414c13caca90d862b417ad5471cab28ee12..ad6eea5fa0f20a4e9a72612eee4b3c22729a0da8 100755 (executable)
@@ -15,17 +15,24 @@ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
 sane_unset GIT_TEST_MIDX_WRITE_REV
 sane_unset GIT_TEST_MIDX_READ_RIDX
 
-midx_bitmap_core
-
 bitmap_reuse_tests() {
        from=$1
        to=$2
+       writeLookupTable=false
+
+       for i in $3-${$#}
+       do
+               case $i in
+               "pack.writeBitmapLookupTable") writeLookupTable=true;;
+               esac
+       done
 
        test_expect_success "setup pack reuse tests ($from -> $to)" '
                rm -fr repo &&
                git init repo &&
                (
                        cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
                        test_commit_bulk 16 &&
                        git tag old-tip &&
 
@@ -43,6 +50,7 @@ bitmap_reuse_tests() {
        test_expect_success "build bitmap from existing ($from -> $to)" '
                (
                        cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
                        test_commit_bulk --id=further 16 &&
                        git tag new-tip &&
 
@@ -59,6 +67,7 @@ bitmap_reuse_tests() {
        test_expect_success "verify resulting bitmaps ($from -> $to)" '
                (
                        cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
                        git for-each-ref &&
                        git rev-list --test-bitmap refs/tags/old-tip &&
                        git rev-list --test-bitmap refs/tags/new-tip
@@ -66,244 +75,338 @@ bitmap_reuse_tests() {
        '
 }
 
-bitmap_reuse_tests 'pack' 'MIDX'
-bitmap_reuse_tests 'MIDX' 'pack'
-bitmap_reuse_tests 'MIDX' 'MIDX'
+test_midx_bitmap_cases () {
+       writeLookupTable=false
+       writeBitmapLookupTable=
+
+       for i in "$@"
+       do
+               case $i in
+               "pack.writeBitmapLookupTable")
+                       writeLookupTable=true
+                       writeBitmapLookupTable="$i"
+                       ;;
+               esac
+       done
+
+       test_expect_success 'setup test_repository' '
+               rm -rf * .git &&
+               git init &&
+               git config pack.writeBitmapLookupTable '"$writeLookupTable"'
+       '
 
-test_expect_success 'missing object closure fails gracefully' '
-       rm -fr repo &&
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+       midx_bitmap_core
 
-               test_commit loose &&
-               test_commit packed &&
+       bitmap_reuse_tests 'pack' 'MIDX' "$writeBitmapLookupTable"
+       bitmap_reuse_tests 'MIDX' 'pack' "$writeBitmapLookupTable"
+       bitmap_reuse_tests 'MIDX' 'MIDX' "$writeBitmapLookupTable"
 
-               # Do not pass "--revs"; we want a pack without the "loose"
-               # commit.
-               git pack-objects $objdir/pack/pack <<-EOF &&
-               $(git rev-parse packed)
-               EOF
+       test_expect_success 'missing object closure fails gracefully' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-               test_must_fail git multi-pack-index write --bitmap 2>err &&
-               grep "doesn.t have full closure" err &&
-               test_path_is_missing $midx
-       )
-'
+                       test_commit loose &&
+                       test_commit packed &&
 
-midx_bitmap_partial_tests
+                       # Do not pass "--revs"; we want a pack without the "loose"
+                       # commit.
+                       git pack-objects $objdir/pack/pack <<-EOF &&
+                       $(git rev-parse packed)
+                       EOF
 
-test_expect_success 'removing a MIDX clears stale bitmaps' '
-       rm -fr repo &&
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
-               test_commit base &&
-               git repack &&
-               git multi-pack-index write --bitmap &&
+                       test_must_fail git multi-pack-index write --bitmap 2>err &&
+                       grep "doesn.t have full closure" err &&
+                       test_path_is_missing $midx
+               )
+       '
 
-               # Write a MIDX and bitmap; remove the MIDX but leave the bitmap.
-               stale_bitmap=$midx-$(midx_checksum $objdir).bitmap &&
-               rm $midx &&
+       midx_bitmap_partial_tests
 
-               # Then write a new MIDX.
-               test_commit new &&
-               git repack &&
-               git multi-pack-index write --bitmap &&
+       test_expect_success 'removing a MIDX clears stale bitmaps' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
+                       test_commit base &&
+                       git repack &&
+                       git multi-pack-index write --bitmap &&
+
+                       # Write a MIDX and bitmap; remove the MIDX but leave the bitmap.
+                       stale_bitmap=$midx-$(midx_checksum $objdir).bitmap &&
+                       rm $midx &&
+
+                       # Then write a new MIDX.
+                       test_commit new &&
+                       git repack &&
+                       git multi-pack-index write --bitmap &&
+
+                       test_path_is_file $midx &&
+                       test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+                       test_path_is_missing $stale_bitmap
+               )
+       '
 
-               test_path_is_file $midx &&
-               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
-               test_path_is_missing $stale_bitmap
-       )
-'
+       test_expect_success 'pack.preferBitmapTips' '
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-test_expect_success 'pack.preferBitmapTips' '
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+                       test_commit_bulk --message="%s" 103 &&
 
-               test_commit_bulk --message="%s" 103 &&
+                       git log --format="%H" >commits.raw &&
+                       sort <commits.raw >commits &&
 
-               git log --format="%H" >commits.raw &&
-               sort <commits.raw >commits &&
+                       git log --format="create refs/tags/%s %H" HEAD >refs &&
+                       git update-ref --stdin <refs &&
 
-               git log --format="create refs/tags/%s %H" HEAD >refs &&
-               git update-ref --stdin <refs &&
+                       git multi-pack-index write --bitmap &&
+                       test_path_is_file $midx &&
+                       test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
 
-               git multi-pack-index write --bitmap &&
-               test_path_is_file $midx &&
-               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       comm -13 bitmaps commits >before &&
+                       test_line_count = 1 before &&
 
-               test-tool bitmap list-commits | sort >bitmaps &&
-               comm -13 bitmaps commits >before &&
-               test_line_count = 1 before &&
+                       perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \
+                               <before | git update-ref --stdin &&
 
-               perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \
-                       <before | git update-ref --stdin &&
+                       rm -fr $midx-$(midx_checksum $objdir).bitmap &&
+                       rm -fr $midx &&
 
-               rm -fr $midx-$(midx_checksum $objdir).bitmap &&
-               rm -fr $midx &&
+                       git -c pack.preferBitmapTips=refs/tags/include \
+                               multi-pack-index write --bitmap &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       comm -13 bitmaps commits >after &&
 
-               git -c pack.preferBitmapTips=refs/tags/include \
-                       multi-pack-index write --bitmap &&
-               test-tool bitmap list-commits | sort >bitmaps &&
-               comm -13 bitmaps commits >after &&
+                       ! test_cmp before after
+               )
+       '
 
-               ! test_cmp before after
-       )
-'
+       test_expect_success 'writing a bitmap with --refs-snapshot' '
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-test_expect_success 'writing a bitmap with --refs-snapshot' '
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+                       test_commit one &&
+                       test_commit two &&
 
-               test_commit one &&
-               test_commit two &&
+                       git rev-parse one >snapshot &&
 
-               git rev-parse one >snapshot &&
+                       git repack -ad &&
 
-               git repack -ad &&
+                       # First, write a MIDX which see both refs/tags/one and
+                       # refs/tags/two (causing both of those commits to receive
+                       # bitmaps).
+                       git multi-pack-index write --bitmap &&
 
-               # First, write a MIDX which see both refs/tags/one and
-               # refs/tags/two (causing both of those commits to receive
-               # bitmaps).
-               git multi-pack-index write --bitmap &&
+                       test_path_is_file $midx &&
+                       test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
 
-               test_path_is_file $midx &&
-               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       grep "$(git rev-parse one)" bitmaps &&
+                       grep "$(git rev-parse two)" bitmaps &&
 
-               test-tool bitmap list-commits | sort >bitmaps &&
-               grep "$(git rev-parse one)" bitmaps &&
-               grep "$(git rev-parse two)" bitmaps &&
+                       rm -fr $midx-$(midx_checksum $objdir).bitmap &&
+                       rm -fr $midx &&
 
-               rm -fr $midx-$(midx_checksum $objdir).bitmap &&
-               rm -fr $midx &&
+                       # Then again, but with a refs snapshot which only sees
+                       # refs/tags/one.
+                       git multi-pack-index write --bitmap --refs-snapshot=snapshot &&
 
-               # Then again, but with a refs snapshot which only sees
-               # refs/tags/one.
-               git multi-pack-index write --bitmap --refs-snapshot=snapshot &&
+                       test_path_is_file $midx &&
+                       test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
 
-               test_path_is_file $midx &&
-               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       grep "$(git rev-parse one)" bitmaps &&
+                       ! grep "$(git rev-parse two)" bitmaps
+               )
+       '
 
-               test-tool bitmap list-commits | sort >bitmaps &&
-               grep "$(git rev-parse one)" bitmaps &&
-               ! grep "$(git rev-parse two)" bitmaps
-       )
-'
+       test_expect_success 'write a bitmap with --refs-snapshot (preferred tips)' '
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-test_expect_success 'write a bitmap with --refs-snapshot (preferred tips)' '
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+                       test_commit_bulk --message="%s" 103 &&
 
-               test_commit_bulk --message="%s" 103 &&
+                       git log --format="%H" >commits.raw &&
+                       sort <commits.raw >commits &&
 
-               git log --format="%H" >commits.raw &&
-               sort <commits.raw >commits &&
+                       git log --format="create refs/tags/%s %H" HEAD >refs &&
+                       git update-ref --stdin <refs &&
 
-               git log --format="create refs/tags/%s %H" HEAD >refs &&
-               git update-ref --stdin <refs &&
+                       git multi-pack-index write --bitmap &&
+                       test_path_is_file $midx &&
+                       test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
 
-               git multi-pack-index write --bitmap &&
-               test_path_is_file $midx &&
-               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       comm -13 bitmaps commits >before &&
+                       test_line_count = 1 before &&
+
+                       (
+                               grep -vf before commits.raw &&
+                               # mark missing commits as preferred
+                               sed "s/^/+/" before
+                       ) >snapshot &&
+
+                       rm -fr $midx-$(midx_checksum $objdir).bitmap &&
+                       rm -fr $midx &&
 
-               test-tool bitmap list-commits | sort >bitmaps &&
-               comm -13 bitmaps commits >before &&
-               test_line_count = 1 before &&
+                       git multi-pack-index write --bitmap --refs-snapshot=snapshot &&
+                       test-tool bitmap list-commits | sort >bitmaps &&
+                       comm -13 bitmaps commits >after &&
 
+                       ! test_cmp before after
+               )
+       '
+
+       test_expect_success 'hash-cache values are propagated from pack bitmaps' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
                (
-                       grep -vf before commits.raw &&
-                       # mark missing commits as preferred
-                       sed "s/^/+/" before
-               ) >snapshot &&
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-               rm -fr $midx-$(midx_checksum $objdir).bitmap &&
-               rm -fr $midx &&
+                       test_commit base &&
+                       test_commit base2 &&
+                       git repack -adb &&
 
-               git multi-pack-index write --bitmap --refs-snapshot=snapshot &&
-               test-tool bitmap list-commits | sort >bitmaps &&
-               comm -13 bitmaps commits >after &&
+                       test-tool bitmap dump-hashes >pack.raw &&
+                       test_file_not_empty pack.raw &&
+                       sort pack.raw >pack.hashes &&
 
-               ! test_cmp before after
-       )
-'
+                       test_commit new &&
+                       git repack &&
+                       git multi-pack-index write --bitmap &&
 
-test_expect_success 'hash-cache values are propagated from pack bitmaps' '
-       rm -fr repo &&
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+                       test-tool bitmap dump-hashes >midx.raw &&
+                       sort midx.raw >midx.hashes &&
 
-               test_commit base &&
-               test_commit base2 &&
-               git repack -adb &&
+                       # ensure that every namehash in the pack bitmap can be found in
+                       # the midx bitmap (i.e., that there are no oid-namehash pairs
+                       # unique to the pack bitmap).
+                       comm -23 pack.hashes midx.hashes >dropped.hashes &&
+                       test_must_be_empty dropped.hashes
+               )
+       '
 
-               test-tool bitmap dump-hashes >pack.raw &&
-               test_file_not_empty pack.raw &&
-               sort pack.raw >pack.hashes &&
+       test_expect_success 'no .bitmap is written without any objects' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-               test_commit new &&
-               git repack &&
-               git multi-pack-index write --bitmap &&
+                       empty="$(git pack-objects $objdir/pack/pack </dev/null)" &&
+                       cat >packs <<-EOF &&
+                       pack-$empty.idx
+                       EOF
 
-               test-tool bitmap dump-hashes >midx.raw &&
-               sort midx.raw >midx.hashes &&
+                       git multi-pack-index write --bitmap --stdin-packs \
+                               <packs 2>err &&
 
-               # ensure that every namehash in the pack bitmap can be found in
-               # the midx bitmap (i.e., that there are no oid-namehash pairs
-               # unique to the pack bitmap).
-               comm -23 pack.hashes midx.hashes >dropped.hashes &&
-               test_must_be_empty dropped.hashes
-       )
-'
+                       grep "bitmap without any objects" err &&
 
-test_expect_success 'no .bitmap is written without any objects' '
-       rm -fr repo &&
-       git init repo &&
-       test_when_finished "rm -fr repo" &&
-       (
-               cd repo &&
+                       test_path_is_file $midx &&
+                       test_path_is_missing $midx-$(midx_checksum $objdir).bitmap
+               )
+       '
+
+       test_expect_success 'graceful fallback when missing reverse index' '
+               rm -fr repo &&
+               git init repo &&
+               test_when_finished "rm -fr repo" &&
+               (
+                       cd repo &&
+                       git config pack.writeBitmapLookupTable '"$writeLookupTable"' &&
 
-               empty="$(git pack-objects $objdir/pack/pack </dev/null)" &&
-               cat >packs <<-EOF &&
-               pack-$empty.idx
-               EOF
+                       test_commit base &&
 
-               git multi-pack-index write --bitmap --stdin-packs \
-                       <packs 2>err &&
+                       # write a pack and MIDX bitmap containing base
+                       git repack -adb &&
+                       git multi-pack-index write --bitmap &&
 
-               grep "bitmap without any objects" err &&
+                       GIT_TEST_MIDX_READ_RIDX=0 \
+                               git rev-list --use-bitmap-index HEAD 2>err &&
+                       ! grep "ignoring extra bitmap file" err
+               )
+       '
+}
 
-               test_path_is_file $midx &&
-               test_path_is_missing $midx-$(midx_checksum $objdir).bitmap
-       )
-'
+test_midx_bitmap_cases
 
-test_expect_success 'graceful fallback when missing reverse index' '
+test_midx_bitmap_cases "pack.writeBitmapLookupTable"
+
+test_expect_success 'multi-pack-index write writes lookup table if enabled' '
        rm -fr repo &&
        git init repo &&
        test_when_finished "rm -fr repo" &&
        (
                cd repo &&
+               test_commit base &&
+               git config pack.writeBitmapLookupTable true &&
+               git repack -ad &&
+               GIT_TRACE2_EVENT="$(pwd)/trace" \
+                       git multi-pack-index write --bitmap &&
+               grep "\"label\":\"writing_lookup_table\"" trace
+       )
+'
+
+test_expect_success 'preferred pack change with existing MIDX bitmap' '
+       git init preferred-pack-with-existing &&
+       (
+               cd preferred-pack-with-existing &&
 
                test_commit base &&
+               test_commit other &&
+
+               git rev-list --objects --no-object-names base >p1.objects &&
+               git rev-list --objects --no-object-names other >p2.objects &&
 
-               # write a pack and MIDX bitmap containing base
-               git repack -adb &&
-               git multi-pack-index write --bitmap &&
+               p1="$(git pack-objects "$objdir/pack/pack" \
+                       --delta-base-offset <p1.objects)" &&
+               p2="$(git pack-objects "$objdir/pack/pack" \
+                       --delta-base-offset <p2.objects)" &&
+
+               # Generate a MIDX containing the first two packs,
+               # marking p1 as preferred, and ensure that it can be
+               # successfully cloned.
+               git multi-pack-index write --bitmap \
+                       --preferred-pack="pack-$p1.pack" &&
+               test_path_is_file $midx &&
+               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+               git clone --no-local . clone1 &&
+
+               # Then generate a new pack which sorts ahead of any
+               # existing pack (by tweaking the pack prefix).
+               test_commit foo &&
+               git pack-objects --all --unpacked $objdir/pack/pack0 &&
+
+               # Generate a new MIDX which changes the preferred pack
+               # to a pack contained in the existing MIDX.
+               git multi-pack-index write --bitmap \
+                       --preferred-pack="pack-$p2.pack" &&
+               test_path_is_file $midx &&
+               test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
 
-               GIT_TEST_MIDX_READ_RIDX=0 \
-                       git rev-list --use-bitmap-index HEAD 2>err &&
-               ! grep "ignoring extra bitmap file" err
+               # When the above circumstances are met, the preferred
+               # pack should change appropriately and clones should
+               # (still) succeed.
+               git clone --no-local . clone2
        )
 '
 
index d30ba632c87c072b591cb085061a4d6f2dcb1c0b..e65e311cd73aec492ac0850cd6e79d126415ac36 100755 (executable)
@@ -17,7 +17,27 @@ GIT_TEST_MIDX_READ_RIDX=0
 export GIT_TEST_MIDX_WRITE_REV
 export GIT_TEST_MIDX_READ_RIDX
 
-midx_bitmap_core rev
-midx_bitmap_partial_tests rev
+test_midx_bitmap_rev () {
+       writeLookupTable=false
+
+       for i in "$@"
+       do
+               case $i in
+               "pack.writeBitmapLookupTable") writeLookupTable=true;;
+               esac
+       done
+
+       test_expect_success 'setup bitmap config' '
+               rm -rf * .git &&
+               git init &&
+               git config pack.writeBitmapLookupTable '"$writeLookupTable"'
+       '
+
+       midx_bitmap_core rev
+       midx_bitmap_partial_tests rev
+}
+
+test_midx_bitmap_rev
+test_midx_bitmap_rev "pack.writeBitmapLookupTable"
 
 test_done
index 8968f7a08d8700bb22aec956e3f8e50ab2f376bd..303f7a5d842d36edb191bb05ec61df9a6af8d7c9 100755 (executable)
@@ -29,7 +29,8 @@ basic_cruft_pack_tests () {
                                while read oid
                                do
                                        path="$objdir/$(test_oid_to_path "$oid")" &&
-                                       printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")"
+                                       printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")" ||
+                                       echo "object list generation failed for $oid"
                                done |
                                sort -k1
                        ) >expect &&
@@ -232,7 +233,7 @@ test_expect_success 'cruft tags rescue tagged objects' '
                while read oid
                do
                        test-tool chmtime -1000 \
-                               "$objdir/$(test_oid_to_path $oid)"
+                               "$objdir/$(test_oid_to_path $oid)" || exit 1
                done <objects &&
 
                test-tool chmtime -500 \
@@ -272,7 +273,7 @@ test_expect_success 'cruft commits rescue parents, trees' '
                while read object
                do
                        test-tool chmtime -1000 \
-                               "$objdir/$(test_oid_to_path $object)"
+                               "$objdir/$(test_oid_to_path $object)" || exit 1
                done <objects &&
                test-tool chmtime +500 "$objdir/$(test_oid_to_path \
                        $(git rev-parse HEAD))" &&
@@ -345,7 +346,7 @@ test_expect_success 'expired objects are pruned' '
                while read object
                do
                        test-tool chmtime -1000 \
-                               "$objdir/$(test_oid_to_path $object)"
+                               "$objdir/$(test_oid_to_path $object)" || exit 1
                done <objects &&
 
                keep="$(basename "$(ls $packdir/pack-*.pack)")" &&
diff --git a/t/t5351-unpack-large-objects.sh b/t/t5351-unpack-large-objects.sh
new file mode 100755 (executable)
index 0000000..8c8af99
--- /dev/null
@@ -0,0 +1,103 @@
+#!/bin/sh
+#
+# Copyright (c) 2022 Han Xin
+#
+
+test_description='git unpack-objects with large objects'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+prepare_dest () {
+       test_when_finished "rm -rf dest.git" &&
+       git init --bare dest.git &&
+       git -C dest.git config core.bigFileThreshold "$1"
+}
+
+test_expect_success "create large objects (1.5 MB) and PACK" '
+       test-tool genrandom foo 1500000 >big-blob &&
+       test_commit --append foo big-blob &&
+       test-tool genrandom bar 1500000 >big-blob &&
+       test_commit --append bar big-blob &&
+       PACK=$(echo HEAD | git pack-objects --revs pack) &&
+       git verify-pack -v pack-$PACK.pack >out &&
+       sed -n -e "s/^\([0-9a-f][0-9a-f]*\).*\(commit\|tree\|blob\).*/\1/p" \
+               <out >obj-list
+'
+
+test_expect_success 'set memory limitation to 1MB' '
+       GIT_ALLOC_LIMIT=1m &&
+       export GIT_ALLOC_LIMIT
+'
+
+test_expect_success 'unpack-objects failed under memory limitation' '
+       prepare_dest 2m &&
+       test_must_fail git -C dest.git unpack-objects <pack-$PACK.pack 2>err &&
+       grep "fatal: attempting to allocate" err
+'
+
+test_expect_success 'unpack-objects works with memory limitation in dry-run mode' '
+       prepare_dest 2m &&
+       git -C dest.git unpack-objects -n <pack-$PACK.pack &&
+       test_stdout_line_count = 0 find dest.git/objects -type f &&
+       test_dir_is_empty dest.git/objects/pack
+'
+
+test_expect_success 'unpack big object in stream' '
+       prepare_dest 1m &&
+       git -C dest.git unpack-objects <pack-$PACK.pack &&
+       test_dir_is_empty dest.git/objects/pack
+'
+
+check_fsync_events () {
+       local trace="$1" &&
+       shift &&
+
+       cat >expect &&
+       sed -n \
+               -e '/^{"event":"data",.*"category":"fsync",/ {
+                       s/.*"category":"fsync",//;
+                       s/}$//;
+                       p;
+               }' \
+               <"$trace" >actual &&
+       test_cmp expect actual
+}
+
+BATCH_CONFIGURATION='-c core.fsync=loose-object -c core.fsyncmethod=batch'
+
+test_expect_success 'unpack big object in stream (core.fsyncmethod=batch)' '
+       prepare_dest 1m &&
+       GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
+       GIT_TEST_FSYNC=true \
+               git -C dest.git $BATCH_CONFIGURATION unpack-objects <pack-$PACK.pack &&
+       if grep "core.fsyncMethod = batch is unsupported" trace2.txt
+       then
+               flush_count=7
+       else
+               flush_count=1
+       fi &&
+       check_fsync_events trace2.txt <<-EOF &&
+       "key":"fsync/writeout-only","value":"6"
+       "key":"fsync/hardware-flush","value":"$flush_count"
+       EOF
+
+       test_dir_is_empty dest.git/objects/pack &&
+       git -C dest.git cat-file --batch-check="%(objectname)" <obj-list >current &&
+       cmp obj-list current
+'
+
+test_expect_success 'do not unpack existing large objects' '
+       prepare_dest 1m &&
+       git -C dest.git index-pack --stdin <pack-$PACK.pack &&
+       git -C dest.git unpack-objects <pack-$PACK.pack &&
+
+       # The destination came up with the exact same pack...
+       DEST_PACK=$(echo dest.git/objects/pack/pack-*.pack) &&
+       cmp pack-$PACK.pack $DEST_PACK &&
+
+       # ...and wrote no loose objects
+       test_stdout_line_count = 0 find dest.git/objects -type f ! -name "pack-*"
+'
+
+test_done
index 915af2de95e162a9581ca23a6efb229737e665a6..46ebdfbeebaf522281e8e4aa4c68e9fb33ca8513 100755 (executable)
@@ -7,6 +7,7 @@ test_description='Test the post-merge hook.'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index ee6d2dde9f35677fb9c83a228839bb7ad405a4fa..d18f2823d86e8b94c5331b1087f962469fc91f2c 100755 (executable)
@@ -407,6 +407,7 @@ test_expect_success 'in_vain not triggered before first ACK' '
 '
 
 test_expect_success 'in_vain resetted upon ACK' '
+       test_when_finished rm -f log trace2 &&
        rm -rf myserver myclient &&
        git init myserver &&
 
@@ -432,7 +433,8 @@ test_expect_success 'in_vain resetted upon ACK' '
        # first. The 256th commit is common between the client and the server,
        # and should reset in_vain. This allows negotiation to continue until
        # the client reports that first_anotherbranch_commit is common.
-       git -C myclient fetch --progress origin main 2>log &&
+       GIT_TRACE2_EVENT="$(pwd)/trace2" git -C myclient fetch --progress origin main 2>log &&
+       grep \"key\":\"total_rounds\",\"value\":\"6\" trace2 &&
        test_i18ngrep "Total 3 " log
 '
 
index 195fc64dd44ae74c1546698e111f1a19c07dbb04..5ebbaa489689dce28d1b47e16b73e19e25183c5f 100755 (executable)
@@ -5,6 +5,7 @@ test_description='test automatic tag following'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 # End state of the repository:
index fff14e13ed43b385f1ce7c0d01c0bea815c954d5..9006196ac601e8f93f046b54eafe11aa9df58563 100755 (executable)
@@ -241,6 +241,26 @@ test_expect_success 'add invalid foreign_vcs remote' '
        test_cmp expect actual
 '
 
+test_expect_success 'without subcommand' '
+       echo origin >expect &&
+       git -C test remote >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'without subcommand accepts -v' '
+       cat >expect <<-EOF &&
+       origin  $(pwd)/one (fetch)
+       origin  $(pwd)/one (push)
+       EOF
+       git -C test remote -v >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'without subcommand does not take arguments' '
+       test_expect_code 129 git -C test remote origin 2>err &&
+       grep "^error: unknown subcommand:" err
+'
+
 cat >test/expect <<EOF
 * remote origin
   Fetch URL: $(pwd)/one
@@ -302,6 +322,52 @@ test_expect_success 'show' '
        )
 '
 
+cat >expect <<EOF
+* remote origin
+  Fetch URL: $(pwd)/one
+  Push  URL: $(pwd)/one
+  HEAD branch: main
+  Remote branches:
+    main skipped
+    side tracked
+  Local branches configured for 'git pull':
+    ahead merges with remote main
+    main  merges with remote main
+  Local refs configured for 'git push':
+    main pushes to main     (local out of date)
+    main pushes to upstream (create)
+EOF
+
+test_expect_success 'show with negative refspecs' '
+       test_when_finished "git -C test config --unset-all --fixed-value remote.origin.fetch ^refs/heads/main" &&
+       git -C test config --add remote.origin.fetch ^refs/heads/main &&
+       git -C test remote show origin >output &&
+       test_cmp expect output
+'
+
+cat >expect <<EOF
+* remote origin
+  Fetch URL: $(pwd)/one
+  Push  URL: $(pwd)/one
+  HEAD branch: main
+  Remote branches:
+    main new (next fetch will store in remotes/origin)
+    side stale (use 'git remote prune' to remove)
+  Local branches configured for 'git pull':
+    ahead merges with remote main
+    main  merges with remote main
+  Local refs configured for 'git push':
+    main pushes to main     (local out of date)
+    main pushes to upstream (create)
+EOF
+
+test_expect_failure 'show stale with negative refspecs' '
+       test_when_finished "git -C test config --unset-all --fixed-value remote.origin.fetch ^refs/heads/side" &&
+       git -C test config --add remote.origin.fetch ^refs/heads/side &&
+       git -C test remote show origin >output &&
+       test_cmp expect output
+'
+
 cat >test/expect <<EOF
 * remote origin
   Fetch URL: $(pwd)/one
@@ -957,11 +1023,12 @@ test_expect_success 'migrate a remote from named file in $GIT_DIR/remotes' '
 '
 
 test_expect_success 'migrate a remote from named file in $GIT_DIR/branches' '
-       git clone one six &&
+       git clone --template= one six &&
        origin_url=$(pwd)/one &&
        (
                cd six &&
                git remote rm origin &&
+               mkdir .git/branches &&
                echo "$origin_url#main" >.git/branches/origin &&
                git remote rename origin origin &&
                test_path_is_missing .git/branches/origin &&
@@ -972,10 +1039,11 @@ test_expect_success 'migrate a remote from named file in $GIT_DIR/branches' '
 '
 
 test_expect_success 'migrate a remote from named file in $GIT_DIR/branches (2)' '
-       git clone one seven &&
+       git clone --template= one seven &&
        (
                cd seven &&
                git remote rm origin &&
+               mkdir .git/branches &&
                echo "quux#foom" > .git/branches/origin &&
                git remote rename origin origin &&
                test_path_is_missing .git/branches/origin &&
index 541adbb31034f11820c77972719258ef49131353..3211002d466867fb2abb99eeff897029a7fdfa71 100755 (executable)
@@ -18,6 +18,7 @@ This test checks the following functionality:
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 D=$(pwd)
@@ -26,7 +27,8 @@ mk_empty () {
        repo_name="$1"
        test_when_finished "rm -rf \"$repo_name\"" &&
        test_path_is_missing "$repo_name" &&
-       git init "$repo_name" &&
+       git init --template= "$repo_name" &&
+       mkdir "$repo_name"/.git/hooks &&
        git -C "$repo_name" config receive.denyCurrentBranch warn
 }
 
@@ -78,7 +80,7 @@ mk_test_with_hooks() {
 
 mk_child() {
        test_when_finished "rm -rf \"$2\"" &&
-       git clone "$1" "$2"
+       git clone --template= "$1" "$2"
 }
 
 check_push_result () {
@@ -198,7 +200,10 @@ test_expect_success 'push with negotiation' '
        test_commit -C testrepo unrelated_commit &&
        git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
        test_when_finished "rm event" &&
-       GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main &&
+       GIT_TRACE2_EVENT="$(pwd)/event" \
+               git -c protocol.version=2 -c push.negotiate=1 \
+               push testrepo refs/heads/main:refs/remotes/origin/main &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
        grep_wrote 2 event # 1 commit, 1 tree
 '
 
@@ -222,7 +227,10 @@ test_expect_success 'push with negotiation does not attempt to fetch submodules'
        git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
        test_commit -C testrepo unrelated_commit &&
        git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
-       git -c submodule.recurse=true -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
+       GIT_TRACE2_EVENT="$(pwd)/event"  git -c submodule.recurse=true \
+               -c protocol.version=2 -c push.negotiate=1 \
+               push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
        ! grep "Fetching submodule" err
 '
 
@@ -937,6 +945,7 @@ test_expect_success 'fetch with branches' '
        mk_empty testrepo &&
        git branch second $the_first_commit &&
        git checkout second &&
+       mkdir testrepo/.git/branches &&
        echo ".." > testrepo/.git/branches/branch1 &&
        (
                cd testrepo &&
@@ -950,6 +959,7 @@ test_expect_success 'fetch with branches' '
 
 test_expect_success 'fetch with branches containing #' '
        mk_empty testrepo &&
+       mkdir testrepo/.git/branches &&
        echo "..#second" > testrepo/.git/branches/branch2 &&
        (
                cd testrepo &&
@@ -964,7 +974,11 @@ test_expect_success 'fetch with branches containing #' '
 test_expect_success 'push with branches' '
        mk_empty testrepo &&
        git checkout second &&
+
+       test_when_finished "rm -rf .git/branches" &&
+       mkdir .git/branches &&
        echo "testrepo" > .git/branches/branch1 &&
+
        git push branch1 &&
        (
                cd testrepo &&
@@ -976,7 +990,11 @@ test_expect_success 'push with branches' '
 
 test_expect_success 'push with branches containing #' '
        mk_empty testrepo &&
+
+       test_when_finished "rm -rf .git/branches" &&
+       mkdir .git/branches &&
        echo "testrepo#branch3" > .git/branches/branch2 &&
+
        git push branch2 &&
        (
                cd testrepo &&
@@ -1865,4 +1883,26 @@ test_expect_success LIBCURL 'push warns or fails when using username:password' '
        test_line_count = 1 warnings
 '
 
+test_expect_success 'push with config push.useBitmaps' '
+       mk_test testrepo heads/main &&
+       git checkout main &&
+       test_unconfig push.useBitmaps &&
+       GIT_TRACE2_EVENT="$PWD/default" \
+       git push testrepo main:test &&
+       test_subcommand git pack-objects --all-progress-implied --revs --stdout \
+               --thin --delta-base-offset -q <default &&
+
+       test_config push.useBitmaps true &&
+       GIT_TRACE2_EVENT="$PWD/true" \
+       git push testrepo main:test2 &&
+       test_subcommand git pack-objects --all-progress-implied --revs --stdout \
+               --thin --delta-base-offset -q <true &&
+
+       test_config push.useBitmaps false &&
+       GIT_TRACE2_EVENT="$PWD/false" \
+       git push testrepo main:test3 &&
+       test_subcommand git pack-objects --all-progress-implied --revs --stdout \
+               --thin --delta-base-offset -q --no-use-bitmap-index <false
+'
+
 test_done
index b2be3605f5a3f0649879a31803b5fc7cfe40d8a1..56716e29ddf1c3ded4cfae0443ee60ffe4eb0945 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git pull message generation'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 dollar='$Dollar'
index 2f09ff4fac60e832ea86c4158740ce36e58d3132..fbad2d5ff5e9e3a7d23a9117b48edf0c9a2b59d8 100755 (executable)
@@ -80,6 +80,25 @@ test_expect_success 'push to remote repository (standard)' '
         test $HEAD = $(git rev-parse --verify HEAD))
 '
 
+test_expect_success 'push to remote repository (standard) with sending Accept-Language' '
+       cat >exp <<-\EOF &&
+       => Send header: Accept-Language: ko-KR, *;q=0.9
+       => Send header: Accept-Language: ko-KR, *;q=0.9
+       EOF
+
+       cd "$ROOT_PATH"/test_repo_clone &&
+       : >path_lang &&
+       git add path_lang &&
+       test_tick &&
+       git commit -m path_lang &&
+       HEAD=$(git rev-parse --verify HEAD) &&
+       GIT_TRACE_CURL=true LANGUAGE="ko_KR.UTF-8" git push -v -v 2>err &&
+       ! grep "Expect: 100-continue" err &&
+
+       grep "=> Send header: Accept-Language:" err >err.language &&
+       test_cmp exp err.language
+'
+
 test_expect_success 'push already up-to-date' '
        git push
 '
index dd5f44d986f2f774313ca11e7a1bfecf87060e7e..54f54f8d2ebde39c846e1c193e93e13e8b509618 100755 (executable)
@@ -56,7 +56,12 @@ test_expect_success 'hook does not run from repo config' '
        ! grep "hook running" stderr &&
        test_path_is_missing .git/hook.args &&
        test_path_is_missing .git/hook.stdin &&
-       test_path_is_missing .git/hook.stdout
+       test_path_is_missing .git/hook.stdout &&
+
+       # check that global config is used instead
+       test_config_global uploadpack.packObjectsHook ./hook &&
+       git clone --no-local . dst2.git 2>stderr &&
+       grep "hook running" stderr
 '
 
 test_expect_success 'hook works with partial clone' '
index f0d9cd584d3b1865490994f6ce85a7917e3bf129..d7cf85ffeadefcc62e051cc61f9a0fbeae58cd53 100755 (executable)
@@ -369,7 +369,7 @@ ja;q=0.95, zh;q=0.94, sv;q=0.93, pt;q=0.92, nb;q=0.91, *;q=0.90" \
                ko_KR.EUC-KR:en_US.UTF-8:fr_CA:de.UTF-8@euro:sr@latin:ja:zh:sv:pt:nb
 '
 
-test_expect_success 'git client does not send an empty Accept-Language' '
+test_expect_success 'git client send an empty Accept-Language' '
        GIT_TRACE_CURL=true LANGUAGE= git ls-remote "$HTTPD_URL/dumb/repo.git" 2>stderr &&
        ! grep "^=> Send header: Accept-Language:" stderr
 '
@@ -422,7 +422,8 @@ test_expect_success 'set up evil alternates scheme' '
        sha1=$(git -C "$victim" rev-parse HEAD) &&
 
        evil=$HTTPD_DOCUMENT_ROOT_PATH/evil.git &&
-       git init --bare "$evil" &&
+       git init --template= --bare "$evil" &&
+       mkdir "$evil/info" &&
        # do this by hand to avoid object existence check
        printf "%s\\t%s\\n" $sha1 refs/heads/main >"$evil/info/refs"
 '
index b9351a732f6ef0267b2f4698f458471e54a40a93..6a38294a47671dccdc81849e1385a8c6e0310488 100755 (executable)
@@ -31,6 +31,7 @@ test_expect_success 'clone http repository' '
        > GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1
        > Accept: */*
        > Accept-Encoding: ENCODINGS
+       > Accept-Language: ko-KR, *;q=0.9
        > Pragma: no-cache
        < HTTP/1.1 200 OK
        < Pragma: no-cache
@@ -40,13 +41,15 @@ test_expect_success 'clone http repository' '
        > Accept-Encoding: ENCODINGS
        > Content-Type: application/x-git-upload-pack-request
        > Accept: application/x-git-upload-pack-result
+       > Accept-Language: ko-KR, *;q=0.9
        > Content-Length: xxx
        < HTTP/1.1 200 OK
        < Pragma: no-cache
        < Cache-Control: no-cache, max-age=0, must-revalidate
        < Content-Type: application/x-git-upload-pack-result
        EOF
-       GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION=0 \
+
+       GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION=0 LANGUAGE="ko_KR.UTF-8" \
                git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
        test_cmp file clone/file &&
        tr '\''\015'\'' Q <err |
@@ -94,7 +97,10 @@ test_expect_success 'clone http repository' '
                test_cmp exp actual.smudged &&
 
                grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
-               test_line_count = 2 actual.gzip
+               test_line_count = 2 actual.gzip &&
+
+               grep "Accept-Language: ko-KR, *" actual >actual.language &&
+               test_line_count = 2 actual.language
        fi
 '
 
@@ -175,8 +181,8 @@ test_expect_success 'no-op half-auth fetch does not require a password' '
        # This is not possible with protocol v2, since both objects and refs
        # are obtained from the "git-upload-pack" path. A solution to this is
        # to teach the server and client to be able to inline ls-refs requests
-       # as an Extra Parameter (see pack-protocol.txt), so that "info/refs"
-       # can serve refs, just like it does in protocol v0.
+       # as an Extra Parameter (see "git help gitformat-pack-protocol"), so that
+       # "info/refs" can serve refs, just like it does in protocol v0.
        GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch &&
        expect_askpass none
 '
diff --git a/t/t5557-http-get.sh b/t/t5557-http-get.sh
new file mode 100755 (executable)
index 0000000..76a4bbd
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+test_description='test downloading a file by URL'
+
+TEST_PASSES_SANITIZE_LEAK=true
+
+. ./test-lib.sh
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'get by URL: 404' '
+       test_when_finished "rm -f file.temp" &&
+       url="$HTTPD_URL/none.txt" &&
+       cat >input <<-EOF &&
+       capabilities
+       get $url file1
+       EOF
+
+       test_must_fail git remote-http $url <input 2>err &&
+       test_path_is_missing file1 &&
+       grep "failed to download file at URL" err
+'
+
+test_expect_success 'get by URL: 200' '
+       echo data >"$HTTPD_DOCUMENT_ROOT_PATH/exists.txt" &&
+
+       url="$HTTPD_URL/exists.txt" &&
+       cat >input <<-EOF &&
+       capabilities
+       get $url file2
+
+       EOF
+
+       git remote-http $url <input &&
+       test_cmp "$HTTPD_DOCUMENT_ROOT_PATH/exists.txt" file2
+'
+
+test_done
diff --git a/t/t5558-clone-bundle-uri.sh b/t/t5558-clone-bundle-uri.sh
new file mode 100755 (executable)
index 0000000..ad666a2
--- /dev/null
@@ -0,0 +1,81 @@
+#!/bin/sh
+
+test_description='test fetching bundles with --bundle-uri'
+
+. ./test-lib.sh
+
+test_expect_success 'fail to clone from non-existent file' '
+       test_when_finished rm -rf test &&
+       git clone --bundle-uri="$(pwd)/does-not-exist" . test 2>err &&
+       grep "failed to download bundle from URI" err
+'
+
+test_expect_success 'fail to clone from non-bundle file' '
+       test_when_finished rm -rf test &&
+       echo bogus >bogus &&
+       git clone --bundle-uri="$(pwd)/bogus" . test 2>err &&
+       grep "is not a bundle" err
+'
+
+test_expect_success 'create bundle' '
+       git init clone-from &&
+       git -C clone-from checkout -b topic &&
+       test_commit -C clone-from A &&
+       test_commit -C clone-from B &&
+       git -C clone-from bundle create B.bundle topic
+'
+
+test_expect_success 'clone with path bundle' '
+       git clone --bundle-uri="clone-from/B.bundle" \
+               clone-from clone-path &&
+       git -C clone-path rev-parse refs/bundles/topic >actual &&
+       git -C clone-from rev-parse topic >expect &&
+       test_cmp expect actual
+'
+
+test_expect_success 'clone with file:// bundle' '
+       git clone --bundle-uri="file://$(pwd)/clone-from/B.bundle" \
+               clone-from clone-file &&
+       git -C clone-file rev-parse refs/bundles/topic >actual &&
+       git -C clone-from rev-parse topic >expect &&
+       test_cmp expect actual
+'
+
+#########################################################################
+# HTTP tests begin here
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fail to fetch from non-existent HTTP URL' '
+       test_when_finished rm -rf test &&
+       git clone --bundle-uri="$HTTPD_URL/does-not-exist" . test 2>err &&
+       grep "failed to download bundle from URI" err
+'
+
+test_expect_success 'fail to fetch from non-bundle HTTP URL' '
+       test_when_finished rm -rf test &&
+       echo bogus >"$HTTPD_DOCUMENT_ROOT_PATH/bogus" &&
+       git clone --bundle-uri="$HTTPD_URL/bogus" . test 2>err &&
+       grep "is not a bundle" err
+'
+
+test_expect_success 'clone HTTP bundle' '
+       cp clone-from/B.bundle "$HTTPD_DOCUMENT_ROOT_PATH/B.bundle" &&
+
+       git clone --no-local --mirror clone-from \
+               "$HTTPD_DOCUMENT_ROOT_PATH/fetch.git" &&
+
+       git clone --bundle-uri="$HTTPD_URL/B.bundle" \
+               "$HTTPD_URL/smart/fetch.git" clone-http &&
+       git -C clone-http rev-parse refs/bundles/topic >actual &&
+       git -C clone-from rev-parse topic >expect &&
+       test_cmp expect actual &&
+
+       test_config -C clone-http log.excludedecoration refs/bundle/
+'
+
+# Do not add tests here unless they use the HTTP server, as they will
+# not run unless the HTTP dependencies exist.
+
+test_done
index cf3be0584f40d13ba187cd8c04efc358910714a7..2e57de9c12a39a63b870079c17b014ad24fb93c3 100755 (executable)
@@ -743,7 +743,11 @@ test_expect_success 'batch missing blob request during checkout' '
 
        # Ensure that there is only one negotiation by checking that there is
        # only "done" line sent. ("done" marks the end of negotiation.)
-       GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+       GIT_TRACE_PACKET="$(pwd)/trace" \
+               GIT_TRACE2_EVENT="$(pwd)/trace2_event" \
+               git -C client -c trace2.eventNesting=5 checkout HEAD^ &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" trace2_event >trace_lines &&
+       test_line_count = 1 trace_lines &&
        grep "fetch> done" trace >done_lines &&
        test_line_count = 1 done_lines
 '
index 8f676d6b0c0e2f6f72b2c0285afaca5b16be3017..f6bb02ab947c1de61484e95e0f4a6228a16da103 100755 (executable)
@@ -58,6 +58,14 @@ test_expect_success 'disallows --bare with --separate-git-dir' '
 
 '
 
+test_expect_success 'disallows --bundle-uri with shallow options' '
+       for option in --depth=1 --shallow-since=01-01-2000 --shallow-exclude=HEAD
+       do
+               test_must_fail git clone --bundle-uri=bundle $option from to 2>err &&
+               grep "bundle-uri is incompatible" err || return 1
+       done
+'
+
 test_expect_success 'reject cloning shallow repository' '
        test_when_finished "rm -rf repo" &&
        test_must_fail git clone --reject-shallow shallow-repo out 2>err &&
index 4a3778d04a82df6322048e34864578860b96f859..9aeacc2f6a5267cfdf8a05f2c924f4ecd04fe319 100755 (executable)
@@ -49,6 +49,13 @@ test_expect_success 'do partial clone 1' '
        test "$(git -C pc1 config --local remote.origin.partialclonefilter)" = "blob:none"
 '
 
+test_expect_success 'rev-list --missing=allow-promisor on partial clone' '
+       git -C pc1 rev-list --objects --missing=allow-promisor HEAD >actual &&
+       git -C pc1 rev-list --objects --missing=print HEAD >expect.raw &&
+       grep -v "^?" expect.raw >expect &&
+       test_cmp expect actual
+'
+
 test_expect_success 'verify that .promisor file contains refs fetched' '
        ls pc1/.git/objects/pack/pack-*.promisor >promisorlist &&
        test_line_count = 1 promisorlist &&
index 9d6cd7d98649c0fe0f40c474d5ed88528ac9ba8c..df74f80061c564b7f69961f0f3d665b1afca460f 100755 (executable)
@@ -229,14 +229,16 @@ test_expect_success 'setup repos for fetching with ref-in-want tests' '
 '
 
 test_expect_success 'fetching with exact OID' '
-       test_when_finished "rm -f log" &&
+       test_when_finished "rm -f log trace2" &&
 
        rm -rf local &&
        cp -r "$LOCAL_PRISTINE" local &&
        oid=$(git -C "$REPO" rev-parse d) &&
-       GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \
+       GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE2_EVENT="$(pwd)/trace2" \
+               git -C local fetch origin \
                "$oid":refs/heads/actual &&
 
+       grep \"key\":\"total_rounds\",\"value\":\"2\" trace2 &&
        git -C "$REPO" rev-parse "d" >expected &&
        git -C local rev-parse refs/heads/actual >actual &&
        test_cmp expected actual &&
index 7294147334a92e6be082ad57177adb6e0857facc..16635ecc33e5e67ab56985a5e4a071492ed22cc9 100755 (executable)
@@ -99,6 +99,7 @@ do
        "
 
        test_expect_success 'with grafts' "
+               mkdir -p .git/info &&
                echo '$B0 $A2' >.git/info/grafts &&
                check $type $B2 -- $B2 $B1 $B0 $A2 $A1 $A0
        "
index 3153a0d891046421f53ea6543182c47ca750e07a..12e67e187ef21456f79a2ebdcc19c641d8ec81a6 100755 (executable)
@@ -8,6 +8,7 @@ test_description='git rev-list involving submodules that this repo has'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index af57a04b7ffa01644768ee507714ad5f5d5b79c6..738da23628b12b4660105195e2bbcedf68416f33 100755 (executable)
@@ -8,8 +8,13 @@ test_description='--ancestry-path'
 #   /                     \
 #  A-------K---------------L--M
 #
-#  D..M                 == E F G H I J K L M
-#  --ancestry-path D..M == E F H I J L M
+#  D..M                                     == E F G H I J K L M
+#  --ancestry-path                     D..M == E F   H I J   L M
+#  --ancestry-path=F                   D..M == E F       J   L M
+#  --ancestry-path=G                   D..M ==     G H I J   L M
+#  --ancestry-path=H                   D..M == E   G H I J   L M
+#  --ancestry-path=K                   D..M ==             K L M
+#  --ancestry-path=K --ancestry-path=F D..M == E F       J K L M
 #
 #  D..M -- M.t                 == M
 #  --ancestry-path D..M -- M.t == M
@@ -50,73 +55,41 @@ test_expect_success setup '
        test_commit M
 '
 
-test_expect_success 'rev-list D..M' '
-       test_write_lines E F G H I J K L M >expect &&
-       git rev-list --format=%s D..M |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list --ancestry-path D..M' '
-       test_write_lines E F H I J L M >expect &&
-       git rev-list --ancestry-path --format=%s D..M |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list D..M -- M.t' '
-       echo M >expect &&
-       git rev-list --format=%s D..M -- M.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list --ancestry-path D..M -- M.t' '
-       echo M >expect &&
-       git rev-list --ancestry-path --format=%s D..M -- M.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
+test_ancestry () {
+       args=$1
+       expected=$2
+       test_expect_success "log $args" "
+               test_write_lines $expected >expect &&
+               git log --format=%s $args >raw &&
+
+               if test -n \"$expected\"
+               then
+                       sort raw >actual &&
+                       test_cmp expect actual
+               else
+                       test_must_be_empty raw
+               fi
+       "
+}
 
-test_expect_success 'rev-list F...I' '
-       test_write_lines F G H I >expect &&
-       git rev-list --format=%s F...I |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "D..M" "E F G H I J K L M"
 
-test_expect_success 'rev-list --ancestry-path F...I' '
-       test_write_lines F H I >expect &&
-       git rev-list --ancestry-path --format=%s F...I |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "--ancestry-path D..M" "E F H I J L M"
+test_ancestry "--ancestry-path=F D..M" "E F J L M"
+test_ancestry "--ancestry-path=G D..M" "G H I J L M"
+test_ancestry "--ancestry-path=H D..M" "E G H I J L M"
+test_ancestry "--ancestry-path=K D..M" "K L M"
+test_ancestry "--ancestry-path=F --ancestry-path=K D..M" "E F J K L M"
 
-# G.t is dropped in an "-s ours" merge
-test_expect_success 'rev-list G..M -- G.t' '
-       git rev-list --format=%s G..M -- G.t |
-       sed -e "/^commit /d" >actual &&
-       test_must_be_empty actual
-'
+test_ancestry "D..M -- M.t" "M"
+test_ancestry "--ancestry-path D..M -- M.t" "M"
 
-test_expect_success 'rev-list --ancestry-path G..M -- G.t' '
-       echo L >expect &&
-       git rev-list --ancestry-path --format=%s G..M -- G.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
+test_ancestry "F...I" "F G H I"
+test_ancestry "--ancestry-path F...I" "F H I"
 
-test_expect_success 'rev-list --ancestry-path --simplify-merges G^..M -- G.t' '
-       test_write_lines G L >expect &&
-       git rev-list --ancestry-path --simplify-merges --format=%s G^..M -- G.t |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "G..M -- G.t" ""
+test_ancestry "--ancestry-path G..M -- G.t" "L"
+test_ancestry "--ancestry-path --simplify-merges G^..M -- G.t" "G L"
 
 #   b---bc
 #  / \ /
index a3a41c7a3e47899521d63c056532604a7d224c97..d20723d627629f5379f35c33c74ff22d1082152e 100755 (executable)
@@ -9,6 +9,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
 TEST_PASSES_SANITIZE_LEAK=true
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 test_cmp_rev_output () {
@@ -26,6 +27,7 @@ test_expect_success 'setup' '
        git merge -m next --allow-unrelated-histories start2 &&
        test_commit final &&
 
+       mkdir .git/info &&
        test_seq 40 |
        while read i
        do
index cf0195e8263c6fdc25977f421252c41238f03419..4a9a4436e219c856095784324e4b097b3402ba29 100755 (executable)
@@ -17,7 +17,7 @@ test_expect_success 'setup unexpected non-blob entry' '
        broken_tree="$(git hash-object -w --literally -t tree broken-tree)"
 '
 
-test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob entry (lone)' '
+test_expect_success 'TODO (should fail!): traverse unexpected non-blob entry (lone)' '
        sed "s/Z$//" >expect <<-EOF &&
        $broken_tree Z
        $tree foo
@@ -121,7 +121,7 @@ test_expect_success 'setup unexpected non-blob tag' '
        tag=$(git hash-object -w --literally -t tag broken-tag)
 '
 
-test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob tag (lone)' '
+test_expect_success 'TODO (should fail!): traverse unexpected non-blob tag (lone)' '
        git rev-list --objects $tag
 '
 
index b4aef32b713ca00f1d8ad4a098f02ca9dc9be126..d59111dedec8020a138296928649eaff4de45694 100755 (executable)
@@ -48,4 +48,26 @@ check_du HEAD
 check_du --objects HEAD
 check_du --objects HEAD^..HEAD
 
+# As mentioned above, don't use hardcode sizes as actual size, but use the
+# output from git cat-file.
+test_expect_success 'rev-list --disk-usage=human' '
+       git rev-list --objects HEAD --disk-usage=human >actual &&
+       disk_usage_slow --objects HEAD >actual_size &&
+       grep "$(cat actual_size) bytes" actual
+'
+
+test_expect_success 'rev-list --disk-usage=human with bitmaps' '
+       git rev-list --objects HEAD --use-bitmap-index --disk-usage=human >actual &&
+       disk_usage_slow --objects HEAD >actual_size &&
+       grep "$(cat actual_size) bytes" actual
+'
+
+test_expect_success 'rev-list use --disk-usage unproperly' '
+       test_must_fail git rev-list --objects HEAD --disk-usage=typo 2>err &&
+       cat >expect <<-\EOF &&
+       fatal: invalid value for '\''--disk-usage=<format>'\'': '\''typo'\'', the only allowed format is '\''human'\''
+       EOF
+       test_cmp err expect
+'
+
 test_done
index 9fdafeb1e907f4381f2b1e74c6c00771d188d136..cada952f9aee35fe795af5cd99af222f1a8b599f 100755 (executable)
@@ -293,7 +293,11 @@ test_expect_success 'add with all negative' '
        test_cmp expect actual
 '
 
-test_expect_success 'add -p with all negative' '
+test_lazy_prereq ADD_I_USE_BUILTIN_OR_PERL '
+       test_have_prereq ADD_I_USE_BUILTIN || test_have_prereq PERL
+'
+
+test_expect_success ADD_I_USE_BUILTIN_OR_PERL 'add -p with all negative' '
        H=$(git rev-parse HEAD) &&
        git reset --hard $H &&
        git clean -f &&
index 0f1cb49cedc645c5a9b0c56257d3aca4740d5cd3..3a241f259de157185ba2733504e5ed1de552c7c5 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='test case exclude pathspec'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup a submodule' '
index 57a67cf362730be7d2ec695310346cb15b276823..3de4ef6bd9e640d2f82deb111dd307e9a000db91 100755 (executable)
@@ -126,7 +126,7 @@ test_expect_success 'Simple merge in repo with interesting pathnames' '
        #     foo/bar-2/baz
        # The fact that foo/bar-2 appears between foo/bar and foo/bar/baz
        # can trip up some codepaths, and is the point of this test.
-       test_create_repo name-ordering &&
+       git init name-ordering &&
        (
                cd name-ordering &&
 
index 3a32b1a45cf8e4bf2a7b42eeec98923262b3e470..772238e582c6f11b81c12a519f81bbe48cecf635 100755 (executable)
@@ -210,7 +210,7 @@ test_expect_success 'updated working tree file should prevent the merge' '
        echo >>M one line addition &&
        cat M >M.saved &&
        git update-index M &&
-       test_expect_code 128 git pull --no-rebase . yellow &&
+       test_expect_code 2 git pull --no-rebase . yellow &&
        test_cmp M M.saved &&
        rm -f M.saved
 '
index 2f421d967abefbef96eb6f2c0d1201184b9b5d57..1a7082323dddfce66b2d419efe60c87314c5f8e1 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='RCS merge replacement: merge-file'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index b8735c6db4d7c9cc556231a5b0dab091f2f2eb61..36215518b6eb1a746ded0f9b475a7d3029b807a6 100755 (executable)
@@ -4,6 +4,7 @@ test_description='Test merge without common ancestors'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 # This scenario is based on a real-world repository of Shawn Pearce.
index 7435fce71e004095c3a9fe181b038ae77a800192..29e2b25ce5de25f6f97aa6ac967332e7603eb91e 100755 (executable)
@@ -11,6 +11,7 @@ if core.symlinks is false.'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
index 99abefd44b96e6528900bb672ba7de38c6bb9135..8650a88c40a2a58182ee39f36cdc59315ae1a35c 100755 (executable)
@@ -162,8 +162,8 @@ test_expect_success 'custom merge backend' '
 '
 
 test_expect_success 'up-to-date merge without common ancestor' '
-       test_create_repo repo1 &&
-       test_create_repo repo2 &&
+       git init repo1 &&
+       git init repo2 &&
        test_tick &&
        (
                cd repo1 &&
index 0753fc95f45efb642543f9f23191d3430d4d6cde..e8a28717cece3248c2d9996e797dcfdb9af128eb 100755 (executable)
@@ -5,7 +5,6 @@ test_description='ask merge-recursive to merge binary files'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
-TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 7763c1ba98080d5d1d68e1009fb70f6c80cf479a..8a1ba6d23a7dc4a7ca7a56988f3d945d9af38619 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='merge fast-forward and up to date'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 6ae2489286c278f978c3a87f1015f16fe2bb005f..b6182723aae158acb4e56ab9018c9b30f4f86cd6 100755 (executable)
@@ -4,6 +4,7 @@ test_description='merge: handle file mode'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'set up mode change in one branch' '
index affea255fe92ca134a553d2a69362a8f8f7a8ab1..b4f4a313f486a583ca62268f2106948cca8d7266 100755 (executable)
@@ -11,6 +11,7 @@ test_description='merge conflict in crlf repo
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 690c8482b13a3763b223be19799ce22197cd383c..17b54d625d0e468047377c378faae25c50633c9b 100755 (executable)
@@ -19,7 +19,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 #
 
 test_expect_success 'setup basic criss-cross + rename with no modifications' '
-       test_create_repo basic-rename &&
+       git init basic-rename &&
        (
                cd basic-rename &&
 
@@ -85,7 +85,7 @@ test_expect_success 'merge simple rename+criss-cross with no modifications' '
 #
 
 test_expect_success 'setup criss-cross + rename merges with basic modification' '
-       test_create_repo rename-modify &&
+       git init rename-modify &&
        (
                cd rename-modify &&
 
@@ -160,7 +160,7 @@ test_expect_success 'merge criss-cross + rename merges with basic modification'
 #
 
 test_expect_success 'setup differently handled merges of rename/add conflict' '
-       test_create_repo rename-add &&
+       git init rename-add &&
        (
                cd rename-add &&
 
@@ -324,7 +324,7 @@ test_expect_success 'git detects differently handled merges conflict, swapped' '
 # Merging commits D & E should result in modify/delete conflict.
 
 test_expect_success 'setup criss-cross + modify/delete resolved differently' '
-       test_create_repo modify-delete &&
+       git init modify-delete &&
        (
                cd modify-delete &&
 
@@ -499,7 +499,7 @@ test_expect_success 'git detects conflict merging criss-cross+modify/delete, rev
 #
 
 test_expect_success 'setup differently handled merges of directory/file conflict' '
-       test_create_repo directory-file &&
+       git init directory-file &&
        (
                cd directory-file &&
 
@@ -867,7 +867,7 @@ test_expect_failure 'merge of D2 & E4 merges a2s & reports conflict for a/file'
 # but that may cancel out at the final merge stage".
 
 test_expect_success 'setup rename/rename(1to2)/modify followed by what looks like rename/rename(2to1)/modify' '
-       test_create_repo rename-squared-squared &&
+       git init rename-squared-squared &&
        (
                cd rename-squared-squared &&
 
@@ -944,7 +944,7 @@ test_expect_success 'handle rename/rename(1to2)/modify followed by what looks li
 # content merge handled.
 
 test_expect_success 'setup criss-cross + rename/rename/add-source + modify/modify' '
-       test_create_repo rename-rename-add-source &&
+       git init rename-rename-add-source &&
        (
                cd rename-rename-add-source &&
 
@@ -1032,7 +1032,7 @@ test_expect_failure 'detect rename/rename/add-source for virtual merge-base' '
 # base of B & C needs to not delete B:c for that to work, though...
 
 test_expect_success 'setup criss-cross+rename/rename/add-dest + simple modify' '
-       test_create_repo rename-rename-add-dest &&
+       git init rename-rename-add-dest &&
        (
                cd rename-rename-add-dest &&
 
@@ -1111,7 +1111,7 @@ test_expect_success 'virtual merge base handles rename/rename(1to2)/add-dest' '
 # git detect it?
 
 test_expect_success 'setup symlink modify/modify' '
-       test_create_repo symlink-modify-modify &&
+       git init symlink-modify-modify &&
        (
                cd symlink-modify-modify &&
 
@@ -1178,7 +1178,7 @@ test_expect_merge_algorithm failure success 'check symlink modify/modify' '
 # git detect it?
 
 test_expect_success 'setup symlink add/add' '
-       test_create_repo symlink-add-add &&
+       git init symlink-add-add &&
        (
                cd symlink-add-add &&
 
@@ -1244,11 +1244,11 @@ test_expect_merge_algorithm failure success 'check symlink add/add' '
 # git detect it?
 
 test_expect_success 'setup submodule modify/modify' '
-       test_create_repo submodule-modify-modify &&
+       git init submodule-modify-modify &&
        (
                cd submodule-modify-modify &&
 
-               test_create_repo submod &&
+               git init submod &&
                (
                        cd submod &&
                        touch file-A &&
@@ -1332,11 +1332,11 @@ test_expect_merge_algorithm failure success 'check submodule modify/modify' '
 # git detect it?
 
 test_expect_success 'setup submodule add/add' '
-       test_create_repo submodule-add-add &&
+       git init submodule-add-add &&
        (
                cd submodule-add-add &&
 
-               test_create_repo submod &&
+               git init submod &&
                (
                        cd submod &&
                        touch file-A &&
@@ -1419,11 +1419,11 @@ test_expect_merge_algorithm failure success 'check submodule add/add' '
 # This is an obvious add/add conflict for 'path'.  Can git detect it?
 
 test_expect_success 'setup conflicting entry types (submodule vs symlink)' '
-       test_create_repo submodule-symlink-add-add &&
+       git init submodule-symlink-add-add &&
        (
                cd submodule-symlink-add-add &&
 
-               test_create_repo path &&
+               git init path &&
                (
                        cd path &&
                        touch file-B &&
@@ -1494,7 +1494,7 @@ test_expect_merge_algorithm failure success 'check conflicting entry types (subm
 # This is an obvious add/add mode conflict.  Can git detect it?
 
 test_expect_success 'setup conflicting modes for regular file' '
-       test_create_repo regular-file-mode-conflict &&
+       git init regular-file-mode-conflict &&
        (
                cd regular-file-mode-conflict &&
 
@@ -1571,7 +1571,7 @@ test_expect_failure 'check conflicting modes for regular file' '
 #   to ensure that we handle it as well as practical.
 
 test_expect_success 'setup nested conflicts' '
-       test_create_repo nested_conflicts &&
+       git init nested_conflicts &&
        (
                cd nested_conflicts &&
 
@@ -1757,7 +1757,7 @@ test_expect_success 'check nested conflicts' '
 #   have three levels of conflict markers.  Can we distinguish all three?
 
 test_expect_success 'setup virtual merge base with nested conflicts' '
-       test_create_repo virtual_merge_base_has_nested_conflicts &&
+       git init virtual_merge_base_has_nested_conflicts &&
        (
                cd virtual_merge_base_has_nested_conflicts &&
 
index 62d1406119e8c2b08a1a93e3d8fb95167ad16e7d..482b73a998ff2aa6651fc3fc7bd845fffd2c80c3 100755 (executable)
@@ -4,6 +4,7 @@ test_description='Merge-recursive ours and theirs variants'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 36bcd7c3280152f16a4150d64afb7621e17e2ee5..5413e5dd9d68c36b95ecf9ccf5ca9e438ff0413b 100755 (executable)
@@ -31,7 +31,7 @@ test_description="limiting blob downloads when merging with partial clones"
 
 test_setup_repo () {
        test -d server && return
-       test_create_repo server &&
+       git init server &&
        (
                cd server &&
 
index bf4ce3c63d4c86f5f549ffa087fd81bb4fb7e327..346253c7c88ee58ebf6735b6c06ae69f18ce5861 100755 (executable)
@@ -6,11 +6,12 @@ test_description="recursive merge corner cases w/ renames but not criss-crosses"
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-merge.sh
 
 test_setup_rename_delete_untracked () {
-       test_create_repo rename-delete-untracked &&
+       git init rename-delete-untracked &&
        (
                cd rename-delete-untracked &&
 
@@ -55,7 +56,7 @@ test_expect_success "Does git preserve Gollum's precious artifact?" '
 # We should be able to merge B & C cleanly
 
 test_setup_rename_modify_add_source () {
-       test_create_repo rename-modify-add-source &&
+       git init rename-modify-add-source &&
        (
                cd rename-modify-add-source &&
 
@@ -95,7 +96,7 @@ test_expect_failure 'rename/modify/add-source conflict resolvable' '
 '
 
 test_setup_break_detection_1 () {
-       test_create_repo break-detection-1 &&
+       git init break-detection-1 &&
        (
                cd break-detection-1 &&
 
@@ -143,7 +144,7 @@ test_expect_failure 'conflict caused if rename not detected' '
 '
 
 test_setup_break_detection_2 () {
-       test_create_repo break-detection-2 &&
+       git init break-detection-2 &&
        (
                cd break-detection-2 &&
 
@@ -191,7 +192,7 @@ test_expect_failure 'missed conflict if rename not detected' '
 #   Commit C: rename a->b, add unrelated a
 
 test_setup_break_detection_3 () {
-       test_create_repo break-detection-3 &&
+       git init break-detection-3 &&
        (
                cd break-detection-3 &&
 
@@ -267,7 +268,7 @@ test_expect_failure 'detect rename/add-source and preserve all data, merge other
 '
 
 test_setup_rename_directory () {
-       test_create_repo rename-directory-$1 &&
+       git init rename-directory-$1 &&
        (
                cd rename-directory-$1 &&
 
@@ -385,7 +386,7 @@ test_expect_success 'rename/directory conflict + content merge conflict' '
 '
 
 test_setup_rename_directory_2 () {
-       test_create_repo rename-directory-2 &&
+       git init rename-directory-2 &&
        (
                cd rename-directory-2 &&
 
@@ -444,7 +445,7 @@ test_expect_success 'disappearing dir in rename/directory conflict handled' '
 #   Commit B: modify a, add different b
 
 test_setup_rename_with_content_merge_and_add () {
-       test_create_repo rename-with-content-merge-and-add-$1 &&
+       git init rename-with-content-merge-and-add-$1 &&
        (
                cd rename-with-content-merge-and-add-$1 &&
 
@@ -569,7 +570,7 @@ test_expect_success 'handle rename-with-content-merge vs. add, merge other way'
 #   * Nothing else should be present.  Is anything?
 
 test_setup_rename_rename_2to1 () {
-       test_create_repo rename-rename-2to1 &&
+       git init rename-rename-2to1 &&
        (
                cd rename-rename-2to1 &&
 
@@ -641,7 +642,7 @@ test_expect_success 'handle rename/rename (2to1) conflict correctly' '
 #   Commit B: rename a->b
 #   Commit C: rename a->c
 test_setup_rename_rename_1to2 () {
-       test_create_repo rename-rename-1to2 &&
+       git init rename-rename-1to2 &&
        (
                cd rename-rename-1to2 &&
 
@@ -699,7 +700,7 @@ test_expect_success 'merge has correct working tree contents' '
 # Merging of B & C should NOT be clean; there's a rename/rename conflict
 
 test_setup_rename_rename_1to2_add_source_1 () {
-       test_create_repo rename-rename-1to2-add-source-1 &&
+       git init rename-rename-1to2-add-source-1 &&
        (
                cd rename-rename-1to2-add-source-1 &&
 
@@ -747,7 +748,7 @@ test_expect_failure 'detect conflict with rename/rename(1to2)/add-source merge'
 '
 
 test_setup_rename_rename_1to2_add_source_2 () {
-       test_create_repo rename-rename-1to2-add-source-2 &&
+       git init rename-rename-1to2-add-source-2 &&
        (
                cd rename-rename-1to2-add-source-2 &&
 
@@ -793,7 +794,7 @@ test_expect_failure 'rename/rename/add-source still tracks new a file' '
 '
 
 test_setup_rename_rename_1to2_add_dest () {
-       test_create_repo rename-rename-1to2-add-dest &&
+       git init rename-rename-1to2-add-dest &&
        (
                cd rename-rename-1to2-add-dest &&
 
@@ -873,7 +874,7 @@ test_expect_success 'rename/rename/add-dest merge still knows about conflicting
 #   Expected: CONFLICT (rename/add/delete), two-way merged bar
 
 test_setup_rad () {
-       test_create_repo rad &&
+       git init rad &&
        (
                cd rad &&
                echo "original file" >foo &&
@@ -945,7 +946,7 @@ test_expect_merge_algorithm failure success 'rad-check: rename/add/delete confli
 #   Expected: CONFLICT (rename/rename/delete/delete), two-way merged baz
 
 test_setup_rrdd () {
-       test_create_repo rrdd &&
+       git init rrdd &&
        (
                cd rrdd &&
                echo foo >foo &&
@@ -1021,7 +1022,7 @@ test_expect_merge_algorithm failure success 'rrdd-check: rename/rename(2to1)/del
 #             multi-way merged contents found in two, four, six
 
 test_setup_mod6 () {
-       test_create_repo mod6 &&
+       git init mod6 &&
        (
                cd mod6 &&
                test_seq 11 19 >one &&
@@ -1159,7 +1160,7 @@ test_conflicts_with_adds_and_renames() {
        #      tree
        test_setup_collision_conflict () {
        #test_expect_success "setup simple $sideL/$sideR conflict" '
-               test_create_repo simple_${sideL}_${sideR} &&
+               git init simple_${sideL}_${sideR} &&
                (
                        cd simple_${sideL}_${sideR} &&
 
@@ -1307,7 +1308,7 @@ test_conflicts_with_adds_and_renames add    add
 #   So, we have four different conflicting files that all end up at path
 #   'three'.
 test_setup_nested_conflicts_from_rename_rename () {
-       test_create_repo nested_conflicts_from_rename_rename &&
+       git init nested_conflicts_from_rename_rename &&
        (
                cd nested_conflicts_from_rename_rename &&
 
@@ -1416,7 +1417,7 @@ test_expect_success 'check nested conflicts from rename/rename(2to1)' '
 #   Expected: CONFLICT(rename/rename) message, three unstaged entries in the
 #             index, and contents of orig-[AB] at path orig-[AB]
 test_setup_rename_rename_1_to_2_binary () {
-       test_create_repo rename_rename_1_to_2_binary &&
+       git init rename_rename_1_to_2_binary &&
        (
                cd rename_rename_1_to_2_binary &&
 
index 99baf77cbfdce0ea13345a21ad11575d52e7de59..a4941878fe2a693910cca1f226277747706a5017 100755 (executable)
@@ -40,7 +40,7 @@ test_description="recursive merge with directory renames"
 #   Expected: y/{b,c,d,e/f}
 
 test_setup_1a () {
-       test_create_repo 1a &&
+       git init 1a &&
        (
                cd 1a &&
 
@@ -106,7 +106,7 @@ test_expect_success '1a: Simple directory rename detection' '
 #   Expected: y/{b,c,d,e}
 
 test_setup_1b () {
-       test_create_repo 1b &&
+       git init 1b &&
        (
                cd 1b &&
 
@@ -169,7 +169,7 @@ test_expect_success '1b: Merge a directory with another' '
 #   Expected: y/{b,c,d}  (because x/d -> z/d -> y/d)
 
 test_setup_1c () {
-       test_create_repo 1c &&
+       git init 1c &&
        (
                cd 1c &&
 
@@ -232,7 +232,7 @@ test_expect_success '1c: Transitive renaming' '
 #         y/wham_1 & z/wham_2 should too...giving us a conflict.
 
 test_setup_1d () {
-       test_create_repo 1d &&
+       git init 1d &&
        (
                cd 1d &&
 
@@ -328,7 +328,7 @@ test_expect_success '1d: Directory renames cause a rename/rename(2to1) conflict'
 #   Expected: y/{newb,newc,d}
 
 test_setup_1e () {
-       test_create_repo 1e &&
+       git init 1e &&
        (
                cd 1e &&
 
@@ -387,7 +387,7 @@ test_expect_success '1e: Renamed directory, with all files being renamed too' '
 #   Expected: y/{b,c}, x/{d,e,f,g}
 
 test_setup_1f () {
-       test_create_repo 1f &&
+       git init 1f &&
        (
                cd 1f &&
 
@@ -476,7 +476,7 @@ test_expect_success '1f: Split a directory into two other directories' '
 #   Commit B: z/{b,c,d}
 #   Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict
 test_setup_2a () {
-       test_create_repo 2a &&
+       git init 2a &&
        (
                cd 2a &&
 
@@ -538,7 +538,7 @@ test_expect_success '2a: Directory split into two on one side, with equal number
 #   Commit B: z/{b,c}, x/d
 #   Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict
 test_setup_2b () {
-       test_create_repo 2b &&
+       git init 2b &&
        (
                cd 2b &&
 
@@ -620,7 +620,7 @@ test_expect_success '2b: Directory split into two on one side, with equal number
 #   Commit B: y/{b,c}, x/d
 #   Expected: y/{b,c}, x/d
 test_setup_3a () {
-       test_create_repo 3a &&
+       git init 3a &&
        (
                cd 3a &&
 
@@ -684,7 +684,7 @@ test_expect_success '3a: Avoid implicit rename if involved as source on other si
 #         end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a
 #         rename/rename/rename(1to3) conflict, which is just weird.
 test_setup_3b () {
-       test_create_repo 3b &&
+       git init 3b &&
        (
                cd 3b &&
 
@@ -807,7 +807,7 @@ test_expect_success '3b: Avoid implicit rename if involved as source on current
 #   NOTE: Even though most files from z moved to y, we don't want f to follow.
 
 test_setup_4a () {
-       test_create_repo 4a &&
+       git init 4a &&
        (
                cd 4a &&
 
@@ -896,7 +896,7 @@ test_expect_success '4a: Directory split, with original directory still present'
 #         index.
 
 test_setup_5a () {
-       test_create_repo 5a &&
+       git init 5a &&
        (
                cd 5a &&
 
@@ -971,7 +971,7 @@ test_expect_success '5a: Merge directories, other side adds files to original an
 #         back to git behavior without the directory rename detection.
 
 test_setup_5b () {
-       test_create_repo 5b &&
+       git init 5b &&
        (
                cd 5b &&
 
@@ -1048,7 +1048,7 @@ test_expect_success '5b: Rename/delete in order to get add/add/add conflict' '
 #             though, because it doesn't have anything in the way.
 
 test_setup_5c () {
-       test_create_repo 5c &&
+       git init 5c &&
        (
                cd 5c &&
 
@@ -1138,7 +1138,7 @@ test_expect_success '5c: Transitive rename would cause rename/rename/rename/add/
 #         directory rename detection for z/f -> y/f.
 
 test_setup_5d () {
-       test_create_repo 5d &&
+       git init 5d &&
        (
                cd 5d &&
 
@@ -1239,7 +1239,7 @@ test_expect_success '5d: Directory/file/file conflict due to directory rename' '
 #         it is also involved in a rename/delete conflict.
 
 test_setup_6a () {
-       test_create_repo 6a &&
+       git init 6a &&
        (
                cd 6a &&
 
@@ -1337,7 +1337,7 @@ test_expect_success '6a: Tricky rename/delete' '
 #         the behavior on testcases 6b2 and 8e, and introduced this 6b1 testcase.
 
 test_setup_6b1 () {
-       test_create_repo 6b1 &&
+       git init 6b1 &&
        (
                cd 6b1 &&
 
@@ -1415,7 +1415,7 @@ test_expect_merge_algorithm failure success '6b1: Same renames done on both side
 #         the z/ -> y/ rename.
 
 test_setup_6b2 () {
-       test_create_repo 6b2 &&
+       git init 6b2 &&
        (
                cd 6b2 &&
 
@@ -1479,7 +1479,7 @@ test_expect_merge_algorithm failure success '6b2: Same rename done on both sides
 #         "accidentally detect a rename" and give us y/{b,c,d}.
 
 test_setup_6c () {
-       test_create_repo 6c &&
+       git init 6c &&
        (
                cd 6c &&
 
@@ -1542,7 +1542,7 @@ test_expect_success '6c: Rename only done on same side' '
 #         doesn't "accidentally detect a rename" and give us y/{b,c,d}.
 
 test_setup_6d () {
-       test_create_repo 6d &&
+       git init 6d &&
        (
                cd 6d &&
 
@@ -1605,7 +1605,7 @@ test_expect_success '6d: We do not always want transitive renaming' '
 #         add/add conflict on y/d_1 vs y/d_2.
 
 test_setup_6e () {
-       test_create_repo 6e &&
+       git init 6e &&
        (
                cd 6e &&
 
@@ -1700,7 +1700,7 @@ test_expect_success '6e: Add/add from one side' '
 #   NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d.
 
 test_setup_7a () {
-       test_create_repo 7a &&
+       git init 7a &&
        (
                cd 7a &&
 
@@ -1772,7 +1772,7 @@ test_expect_success '7a: rename-dir vs. rename-dir (NOT split evenly) PLUS add-o
 #   Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d)
 
 test_setup_7b () {
-       test_create_repo 7b &&
+       git init 7b &&
        (
                cd 7b &&
 
@@ -1861,7 +1861,7 @@ test_expect_success '7b: rename/rename(2to1), but only due to transitive rename'
 #         nor CONFLiCT x/d -> w/d vs. y/d vs. z/d)
 
 test_setup_7c () {
-       test_create_repo 7c &&
+       git init 7c &&
        (
                cd 7c &&
 
@@ -1926,7 +1926,7 @@ test_expect_success '7c: rename/rename(1to...2or3); transitive rename may add co
 #   NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d)
 
 test_setup_7d () {
-       test_create_repo 7d &&
+       git init 7d &&
        (
                cd 7d &&
 
@@ -2027,7 +2027,7 @@ test_expect_success '7d: transitive rename involved in rename/delete; how is it
 #         how it's resolved.
 
 test_setup_7e () {
-       test_create_repo 7e &&
+       git init 7e &&
        (
                cd 7e &&
 
@@ -2137,7 +2137,7 @@ test_expect_success '7e: transitive rename in rename/delete AND dirs in the way'
 # we potentially could.
 
 test_setup_8a () {
-       test_create_repo 8a &&
+       git init 8a &&
        (
                cd 8a &&
 
@@ -2216,7 +2216,7 @@ test_expect_success '8a: Dual-directory rename, one into the others way' '
 # e_1 and e_2.
 
 test_setup_8b () {
-       test_create_repo 8b &&
+       git init 8b &&
        (
                cd 8b &&
 
@@ -2290,7 +2290,7 @@ test_expect_success '8b: Dual-directory rename, one into the others way, with co
 #         notes in 8d.
 
 test_setup_8c () {
-       test_create_repo 8c &&
+       git init 8c &&
        (
                cd 8c &&
 
@@ -2370,7 +2370,7 @@ test_expect_success '8c: modify/delete or rename+modify/delete' '
 #   differently.
 
 test_setup_8d () {
-       test_create_repo 8d &&
+       git init 8d &&
        (
                cd 8d &&
 
@@ -2453,7 +2453,7 @@ test_expect_success '8d: rename/delete...or not?' '
 #        the behavior, and predict it without computing as many details.
 
 test_setup_8e () {
-       test_create_repo 8e &&
+       git init 8e &&
        (
                cd 8e &&
 
@@ -2537,7 +2537,7 @@ test_expect_success '8e: Both sides rename, one side adds to original directory'
 #         of that could take the new file in commit B at z/i to x/w/i or x/i.
 
 test_setup_9a () {
-       test_create_repo 9a &&
+       git init 9a &&
        (
                cd 9a &&
 
@@ -2609,7 +2609,7 @@ test_expect_success '9a: Inner renamed directory within outer renamed directory'
 #   Expected: y/{b,c,d_merged}
 
 test_setup_9b () {
-       test_create_repo 9b &&
+       git init 9b &&
        (
                cd 9b &&
 
@@ -2697,7 +2697,7 @@ test_expect_success '9b: Transitive rename with content merge' '
 #         history for any implicit directory renames.
 
 test_setup_9c () {
-       test_create_repo 9c &&
+       git init 9c &&
        (
                cd 9c &&
 
@@ -2786,7 +2786,7 @@ test_expect_success '9c: Doubly transitive rename?' '
 #   testcases and simplifies things for the user.
 
 test_setup_9d () {
-       test_create_repo 9d &&
+       git init 9d &&
        (
                cd 9d &&
 
@@ -2861,7 +2861,7 @@ test_expect_success '9d: N-way transitive rename?' '
 #             dir1/yo, dir2/yo, dir3/yo, dirN/yo
 
 test_setup_9e () {
-       test_create_repo 9e &&
+       git init 9e &&
        (
                cd 9e &&
 
@@ -2954,7 +2954,7 @@ test_expect_success '9e: N-to-1 whammo' '
 #   Expected: priority/{a,b}/$more_files, priority/c
 
 test_setup_9f () {
-       test_create_repo 9f &&
+       git init 9f &&
        (
                cd 9f &&
 
@@ -3027,7 +3027,7 @@ test_expect_success '9f: Renamed directory that only contained immediate subdirs
 # viewpoint...
 
 test_setup_9g () {
-       test_create_repo 9g &&
+       git init 9g &&
        (
                cd 9g &&
 
@@ -3096,7 +3096,7 @@ test_expect_failure '9g: Renamed directory that only contained immediate subdirs
 #   NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with
 #         a rename/rename(1to2) conflict (z/d -> y/d vs. x/d)
 test_setup_9h () {
-       test_create_repo 9h &&
+       git init 9h &&
        (
                cd 9h &&
 
@@ -3177,7 +3177,7 @@ test_expect_success '9h: Avoid dir rename on merely modified path' '
 #       ERROR_MSG(untracked working tree files would be overwritten by merge)
 
 test_setup_10a () {
-       test_create_repo 10a &&
+       git init 10a &&
        (
                cd 10a &&
 
@@ -3243,7 +3243,7 @@ test_expect_success '10a: Overwrite untracked with normal rename/delete' '
 #       ERROR_MSG(refusing to lose untracked file at 'y/d')
 
 test_setup_10b () {
-       test_create_repo 10b &&
+       git init 10b &&
        (
                cd 10b &&
 
@@ -3334,7 +3334,7 @@ test_expect_success '10b: Overwrite untracked with dir rename + delete' '
 #             ERROR_MSG(Refusing to lose untracked file at y/c)
 
 test_setup_10c () {
-       test_create_repo 10c_$1 &&
+       git init 10c_$1 &&
        (
                cd 10c_$1 &&
 
@@ -3472,7 +3472,7 @@ test_expect_success '10c2: Overwrite untracked with dir rename/rename(1to2), oth
 #             ERROR_MSG(Refusing to lose untracked file at y/wham)
 
 test_setup_10d () {
-       test_create_repo 10d &&
+       git init 10d &&
        (
                cd 10d &&
 
@@ -3568,7 +3568,7 @@ test_expect_success '10d: Delete untracked with dir rename/rename(2to1)' '
 #   Expected: y/{a,b,c} + untracked z/c
 
 test_setup_10e () {
-       test_create_repo 10e &&
+       git init 10e &&
        (
                cd 10e &&
 
@@ -3650,7 +3650,7 @@ test_expect_merge_algorithm failure success '10e: Does git complain about untrac
 #             z/c with uncommitted mods on top of A:z/c_v1
 
 test_setup_11a () {
-       test_create_repo 11a &&
+       git init 11a &&
        (
                cd 11a &&
 
@@ -3728,7 +3728,7 @@ test_expect_success '11a: Avoid losing dirty contents with simple rename' '
 
 
 test_setup_11b () {
-       test_create_repo 11b &&
+       git init 11b &&
        (
                cd 11b &&
 
@@ -3810,7 +3810,7 @@ test_expect_success '11b: Avoid losing dirty file involved in directory rename'
 #             y/c left untouched (still has uncommitted mods)
 
 test_setup_11c () {
-       test_create_repo 11c &&
+       git init 11c &&
        (
                cd 11c &&
 
@@ -3883,7 +3883,7 @@ test_expect_success '11c: Avoid losing not-uptodate with rename + D/F conflict'
 #             y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods
 
 test_setup_11d () {
-       test_create_repo 11d &&
+       git init 11d &&
        (
                cd 11d &&
 
@@ -3968,7 +3968,7 @@ test_expect_success '11d: Avoid losing not-uptodate with rename + D/F conflict'
 #             y/c has dirty file from before merge
 
 test_setup_11e () {
-       test_create_repo 11e &&
+       git init 11e &&
        (
                cd 11e &&
 
@@ -4060,7 +4060,7 @@ test_expect_success '11e: Avoid deleting not-uptodate with dir rename/rename(1to
 #             ERROR_MSG(Refusing to lose dirty file at y/wham)
 
 test_setup_11f () {
-       test_create_repo 11f &&
+       git init 11f &&
        (
                cd 11f &&
 
@@ -4155,7 +4155,7 @@ test_expect_success '11f: Avoid deleting not-uptodate with dir rename/rename(2to
 #   Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}}
 
 test_setup_12a () {
-       test_create_repo 12a &&
+       git init 12a &&
        (
                cd 12a &&
 
@@ -4238,7 +4238,7 @@ test_expect_success '12a: Moving one directory hierarchy into another' '
 #             node2/node1/{leaf1, leaf2}
 
 test_setup_12b1 () {
-       test_create_repo 12b1 &&
+       git init 12b1 &&
        (
                cd 12b1 &&
 
@@ -4327,7 +4327,7 @@ test_expect_merge_algorithm failure success '12b1: Moving two directory hierarch
 #         even simple rules give weird results when given weird inputs.
 
 test_setup_12b2 () {
-       test_create_repo 12b2 &&
+       git init 12b2 &&
        (
                cd 12b2 &&
 
@@ -4402,7 +4402,7 @@ test_expect_success '12b2: Moving two directory hierarchies into each other' '
 #         each side of the merge.
 
 test_setup_12c1 () {
-       test_create_repo 12c1 &&
+       git init 12c1 &&
        (
                cd 12c1 &&
 
@@ -4492,7 +4492,7 @@ test_expect_merge_algorithm failure success '12c1: Moving one directory hierarch
 #         on each side of the merge.
 
 test_setup_12c2 () {
-       test_create_repo 12c2 &&
+       git init 12c2 &&
        (
                cd 12c2 &&
 
@@ -4584,7 +4584,7 @@ test_expect_success '12c2: Moving one directory hierarchy into another w/ conten
 #   Expected: subdir/foo, bar
 
 test_setup_12d () {
-       test_create_repo 12d &&
+       git init 12d &&
        (
                cd 12d &&
 
@@ -4642,7 +4642,7 @@ test_expect_success '12d: Rename/merge subdir into the root, variant 1' '
 #   Expected: foo, bar
 
 test_setup_12e () {
-       test_create_repo 12e &&
+       git init 12e &&
        (
                cd 12e &&
 
@@ -4743,7 +4743,7 @@ test_expect_success '12e: Rename/merge subdir into the root, variant 2' '
 #      pick and re-applying them in the subsequent one.
 
 test_setup_12f () {
-       test_create_repo 12f &&
+       git init 12f &&
        (
                cd 12f &&
 
@@ -4902,7 +4902,7 @@ test_expect_merge_algorithm failure success '12f: Trivial directory resolve, cac
 #   Expected: newfile_{merged}, newdir/{a_B,b_B,c_A}
 
 test_setup_12g () {
-       test_create_repo 12g &&
+       git init 12g &&
        (
                cd 12g &&
 
@@ -4973,7 +4973,7 @@ test_expect_success '12g: Testcase with two kinds of "relevant" renames' '
 #   Expected: newdir/{alpha_2, b}
 
 test_setup_12h () {
-       test_create_repo 12h &&
+       git init 12h &&
        (
                cd 12h &&
 
@@ -5032,7 +5032,7 @@ test_expect_failure '12h: renaming a file within a renamed directory' '
 #                source/bar vs. source/subdir/bar
 
 test_setup_12i () {
-       test_create_repo 12i &&
+       git init 12i &&
        (
                cd 12i &&
 
@@ -5090,7 +5090,7 @@ test_expect_success '12i: Directory rename causes rename-to-self' '
 #   Expected: {foo, bar, baz_2}, with conflicts on bar vs. subdir/bar
 
 test_setup_12j () {
-       test_create_repo 12j &&
+       git init 12j &&
        (
                cd 12j &&
 
@@ -5148,7 +5148,7 @@ test_expect_success '12j: Directory rename to root causes rename-to-self' '
 #   Expected: dirA/{foo, bar, baz_2}, with conflicts on dirA/bar vs. dirB/bar
 
 test_setup_12k () {
-       test_create_repo 12k &&
+       git init 12k &&
        (
                cd 12k &&
 
@@ -5218,7 +5218,7 @@ test_expect_success '12k: Directory rename with sibling causes rename-to-self' '
 #   is needed for there to be a sub1/ -> sub3/ rename.
 
 test_setup_12l () {
-       test_create_repo 12l_$1 &&
+       git init 12l_$1 &&
        (
                cd 12l_$1 &&
 
@@ -5322,7 +5322,7 @@ test_expect_merge_algorithm failure success '12l (A into B): Rename into each ot
 #   Expected: y/{b,c,d,e/f}, with notices/conflicts for both y/d and y/e/f
 
 test_setup_13a () {
-       test_create_repo 13a_$1 &&
+       git init 13a_$1 &&
        (
                cd 13a_$1 &&
 
@@ -5409,7 +5409,7 @@ test_expect_success '13a(info): messages for newly added files' '
 #             one about content, and one about file location
 
 test_setup_13b () {
-       test_create_repo 13b_$1 &&
+       git init 13b_$1 &&
        (
                cd 13b_$1 &&
 
@@ -5496,7 +5496,7 @@ test_expect_success '13b(info): messages for transitive rename with conflicted c
 #             shown in testcase 13d.
 
 test_setup_13c () {
-       test_create_repo 13c_$1 &&
+       git init 13c_$1 &&
        (
                cd 13c_$1 &&
 
@@ -5584,7 +5584,7 @@ test_expect_success '13c(info): messages for rename/rename(1to1) via transitive
 #               No conflict in where a/y ends up, so put it in d/y.
 
 test_setup_13d () {
-       test_create_repo 13d_$1 &&
+       git init 13d_$1 &&
        (
                cd 13d_$1 &&
 
@@ -5710,7 +5710,7 @@ test_expect_success '13d(info): messages for rename/rename(1to1) via dual transi
 #          least avoids hitting a BUG().
 #
 test_setup_13e () {
-       test_create_repo 13e &&
+       git init 13e &&
        (
                cd 13e &&
 
index b6e424a427b5566b2edf48b3d0a29ad6a927f67e..a61f20c22fe62031da12af45bd8ca427782043bf 100755 (executable)
@@ -114,6 +114,39 @@ test_expect_success 'resolve, non-trivial' '
        test_path_is_missing .git/MERGE_HEAD
 '
 
+test_expect_success 'resolve, trivial, related file removed' '
+       git reset --hard &&
+       git checkout B^0 &&
+
+       git rm a &&
+       test_path_is_missing a &&
+
+       test_must_fail git merge -s resolve C^0 &&
+
+       test_path_is_missing a &&
+       test_path_is_missing .git/MERGE_HEAD
+'
+
+test_expect_success 'resolve, non-trivial, related file removed' '
+       git reset --hard &&
+       git checkout B^0 &&
+
+       git rm a &&
+       test_path_is_missing a &&
+
+       # We also ask for recursive in order to turn off the "allow_trivial"
+       # setting in builtin/merge.c, and ensure that resolve really does
+       # correctly fail the merge (I guess this also tests that recursive
+       # correctly fails the merge, but the main thing we are attempting
+       # to test here is resolve and are just using the side effect of
+       # adding recursive to ensure that resolve is actually tested rather
+       # than the trivial merge codepath)
+       test_must_fail git merge -s resolve -s recursive D^0 &&
+
+       test_path_is_missing a &&
+       test_path_is_missing .git/MERGE_HEAD
+'
+
 test_expect_success 'recursive' '
        git reset --hard &&
        git checkout B^0 &&
@@ -242,4 +275,36 @@ test_expect_success 'subtree' '
        test_path_is_missing .git/MERGE_HEAD
 '
 
+test_expect_success 'avoid failure due to stat-dirty files' '
+       git reset --hard &&
+       git checkout B^0 &&
+
+       # Make "a" be stat-dirty
+       test-tool chmtime =+1 a &&
+
+       # stat-dirty file should not prevent stash creation in builtin/merge.c
+       git merge -s resolve -s recursive D^0
+'
+
+test_expect_success 'with multiple strategies, recursive or ort failure do not early abort' '
+       git reset --hard &&
+       git checkout B^0 &&
+
+       test_seq 0 10 >a &&
+       git add a &&
+       git rev-parse :a >expect &&
+
+       sane_unset GIT_TEST_MERGE_ALGORITHM &&
+       test_must_fail git merge -s recursive -s ort -s octopus C^0 >output 2>&1 &&
+
+       grep "Trying merge strategy recursive..." output &&
+       grep "Trying merge strategy ort..." output &&
+       grep "Trying merge strategy octopus..." output &&
+       grep "No merge strategy handled the merge." output &&
+
+       # Changes to "a" should remain staged
+       git rev-parse :a >actual &&
+       test_cmp expect actual
+'
+
 test_done
index 459b431a60d83c9f1e7c052b9c41491854f9997d..93cd2869b12897b5246a3a977f0cdaf1b2911663 100755 (executable)
@@ -4,6 +4,7 @@ test_description='Merge-recursive rename/delete conflict message'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'rename/delete' '
index 7b5f1c1dcd1f68c74646a5983b6b0d53bd9b4a92..2bb8e7f09bb5897fa670aaf808e84ac7bc3516d3 100755 (executable)
@@ -38,7 +38,7 @@ test_description="merge cases"
 #   Expected: b_2
 
 test_setup_1a () {
-       test_create_repo 1a_$1 &&
+       git init 1a_$1 &&
        (
                cd 1a_$1 &&
 
@@ -136,7 +136,7 @@ test_expect_success '1a-R: Modify(A)/Modify(B), change on B subset of A' '
 #   Expected: c_2
 
 test_setup_2a () {
-       test_create_repo 2a_$1 &&
+       git init 2a_$1 &&
        (
                cd 2a_$1 &&
 
@@ -229,7 +229,7 @@ test_expect_success '2a-R: Modify/rename, merge into rename side' '
 #   Expected: c_2
 
 test_setup_2b () {
-       test_create_repo 2b_$1 &&
+       git init 2b_$1 &&
        (
                cd 2b_$1 &&
 
@@ -336,7 +336,7 @@ test_expect_success '2b-R: Rename+Mod(A)/Mod(B), B mods subset of A' '
 #         not make that particular mistake.
 
 test_setup_2c () {
-       test_create_repo 2c &&
+       git init 2c &&
        (
                cd 2c &&
 
@@ -437,7 +437,7 @@ test_expect_success '2c: Modify b & add c VS rename b->c' '
 #   Expected: bar/{bq_2, whatever}
 
 test_setup_3a () {
-       test_create_repo 3a_$1 &&
+       git init 3a_$1 &&
        (
                cd 3a_$1 &&
 
@@ -537,7 +537,7 @@ test_expect_success '3a-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
 #   Expected: bar/{bq_2, whatever}
 
 test_setup_3b () {
-       test_create_repo 3b_$1 &&
+       git init 3b_$1 &&
        (
                cd 3b_$1 &&
 
@@ -642,7 +642,7 @@ test_expect_success '3b-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
 #   Expected: b_2 for merge, b_4 in working copy
 
 test_setup_4a () {
-       test_create_repo 4a &&
+       git init 4a &&
        (
                cd 4a &&
 
@@ -714,7 +714,7 @@ test_expect_merge_algorithm failure success '4a: Change on A, change on B subset
 #   Expected: c_2
 
 test_setup_4b () {
-       test_create_repo 4b &&
+       git init 4b &&
        (
                cd 4b &&
 
index a9ee4cb207a140eb3c93614f25b9625f43553d59..dd5fe6a402196270fba03fadff55d1e39fa64f02 100755 (executable)
@@ -19,7 +19,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 #
 
 test_expect_success 'setup no merge base' '
-       test_create_repo no_merge_base &&
+       git init no_merge_base &&
        (
                cd no_merge_base &&
 
@@ -55,7 +55,7 @@ test_expect_success 'check no merge base' '
 #
 
 test_expect_success 'setup unique merge base' '
-       test_create_repo unique_merge_base &&
+       git init unique_merge_base &&
        (
                cd unique_merge_base &&
 
@@ -116,7 +116,7 @@ test_expect_success 'check unique merge base' '
 #
 
 test_expect_success 'setup multiple merge bases' '
-       test_create_repo multiple_merge_bases &&
+       git init multiple_merge_bases &&
        (
                cd multiple_merge_bases &&
 
@@ -190,7 +190,7 @@ test_expect_success 'check multiple merge bases' '
 '
 
 test_expect_success 'rebase --merge describes parent of commit being picked' '
-       test_create_repo rebase &&
+       git init rebase &&
        (
                cd rebase &&
                test_commit base file &&
@@ -212,7 +212,7 @@ test_expect_success 'rebase --apply describes fake ancestor base' '
 '
 
 test_setup_zdiff3 () {
-       test_create_repo zdiff3 &&
+       git init zdiff3 &&
        (
                cd zdiff3 &&
 
index 064be1b629e318bb571f1dca2d8ac9c26f86adf9..9919c3fa7cd4359ebe36e3b4b1cbbe99da227905 100755 (executable)
@@ -29,7 +29,7 @@ test_description="merge cases"
 # Testcase basic, conflicting changes in 'numerals'
 
 test_setup_numerals () {
-       test_create_repo numerals_$1 &&
+       git init numerals_$1 &&
        (
                cd numerals_$1 &&
 
index e1ce9199164806abc23955530e4681d1f3cf671e..d02fa16614e0622585a7732942ee9d5b515e6afa 100755 (executable)
@@ -35,7 +35,7 @@ test_description="remember regular & dir renames in sequence of merges"
 # preventing us from finding new renames.
 #
 test_expect_success 'caching renames does not preclude finding new ones' '
-       test_create_repo caching-renames-and-new-renames &&
+       git init caching-renames-and-new-renames &&
        (
                cd caching-renames-and-new-renames &&
 
@@ -106,7 +106,7 @@ test_expect_success 'caching renames does not preclude finding new ones' '
 # should be able to only run rename detection on the upstream side one
 # time.)
 test_expect_success 'cherry-pick both a commit and its immediate revert' '
-       test_create_repo pick-commit-and-its-immediate-revert &&
+       git init pick-commit-and-its-immediate-revert &&
        (
                cd pick-commit-and-its-immediate-revert &&
 
@@ -162,7 +162,7 @@ test_expect_success 'cherry-pick both a commit and its immediate revert' '
 # could cause a spurious rename/add conflict.
 #
 test_expect_success 'rename same file identically, then reintroduce it' '
-       test_create_repo rename-rename-1to1-then-add-old-filename &&
+       git init rename-rename-1to1-then-add-old-filename &&
        (
                cd rename-rename-1to1-then-add-old-filename &&
 
@@ -229,7 +229,7 @@ test_expect_success 'rename same file identically, then reintroduce it' '
 # cached, the directory rename could put newfile in the wrong directory.
 #
 test_expect_success 'rename same file identically, then add file to old dir' '
-       test_create_repo rename-rename-1to1-then-add-file-to-old-dir &&
+       git init rename-rename-1to1-then-add-file-to-old-dir &&
        (
                cd rename-rename-1to1-then-add-file-to-old-dir &&
 
@@ -311,7 +311,7 @@ test_expect_success 'rename same file identically, then add file to old dir' '
 # should avoid the need to re-detect upstream renames.)
 #
 test_expect_success 'cached dir rename does not prevent noticing later conflict' '
-       test_create_repo dir-rename-cache-not-occluding-later-conflict &&
+       git init dir-rename-cache-not-occluding-later-conflict &&
        (
                cd dir-rename-cache-not-occluding-later-conflict &&
 
@@ -365,7 +365,7 @@ test_expect_success 'cached dir rename does not prevent noticing later conflict'
 
 # Helper for the next two tests
 test_setup_upstream_rename () {
-       test_create_repo $1 &&
+       git init $1 &&
        (
                cd $1 &&
 
@@ -537,7 +537,7 @@ test_expect_success 'dir rename unneeded, then rename existing file into old dir
 
 # Helper for the next two tests
 test_setup_topic_rename () {
-       test_create_repo $1 &&
+       git init $1 &&
        (
                cd $1 &&
 
@@ -725,7 +725,7 @@ test_expect_success 'avoid assuming we detected renames' '
                mkdir unrelated &&
                for i in $(test_seq 1 10)
                do
-                       >unrelated/$i
+                       >unrelated/$i || exit 1
                done &&
                test_seq  2 10 >numbers &&
                test_seq 12 20 >values &&
index 3824756a02ec31c228e2eeda92fc13a33b1e4b75..3fe14cd73e895fde51b4f711865553e7a142a2ea 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='merge-recursive backend test'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 #         A      <- create some files
index 74562e1235666ffe49e0978b78dbfb06e416cd2e..fde4aa3cd1ab66a13c61234155da0c2bacb851a9 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='merge with sparse files'
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 # test_file $filename $content
@@ -26,6 +27,7 @@ test_expect_success 'setup' '
        git rm modify_delete &&
        test_commit_this ours &&
        git config core.sparseCheckout true &&
+       mkdir .git/info &&
        echo "/checked-out" >.git/info/sparse-checkout &&
        git reset --hard &&
        test_must_fail git merge theirs
index 178413c22f0dc1dd03152ec1d84843e070d1d843..c9a86f2e947e4a2943438cc5adcaaeab39a465d9 100755 (executable)
@@ -103,8 +103,25 @@ test_expect_success 'setup for merge search' '
         echo "file-c" > file-c &&
         git add file-c &&
         git commit -m "sub-c") &&
-       git commit -a -m "c" &&
+       git commit -a -m "c")
+'
 
+test_expect_success 'merging should conflict for non fast-forward' '
+       test_when_finished "git -C merge-search reset --hard" &&
+       (cd merge-search &&
+        git checkout -b test-nonforward-a b &&
+         if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+         then
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
+         else
+               test_must_fail git merge c 2> actual
+         fi)
+'
+
+test_expect_success 'finish setup for merge-search' '
+       (cd merge-search &&
        git checkout -b d a &&
        (cd sub &&
         git checkout -b sub-d sub-b &&
@@ -129,14 +146,16 @@ test_expect_success 'merge with one side as a fast-forward of the other' '
         test_cmp expect actual)
 '
 
-test_expect_success 'merging should conflict for non fast-forward' '
+test_expect_success 'merging should conflict for non fast-forward (resolution exists)' '
        (cd merge-search &&
-        git checkout -b test-nonforward b &&
+        git checkout -b test-nonforward-b b &&
         (cd sub &&
-         git rev-parse sub-d > ../expect) &&
+         git rev-parse --short sub-d > ../expect) &&
          if test "$GIT_TEST_MERGE_ALGORITHM" = ort
          then
-               test_must_fail git merge c >actual
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
          else
                test_must_fail git merge c 2> actual
          fi &&
@@ -161,7 +180,9 @@ test_expect_success 'merging should fail for ambiguous common parent' '
         ) &&
         if test "$GIT_TEST_MERGE_ALGORITHM" = ort
         then
-               test_must_fail git merge c >actual
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
         else
                test_must_fail git merge c 2> actual
         fi &&
@@ -205,7 +226,12 @@ test_expect_success 'merging should fail for changes that are backwards' '
        git commit -a -m "f" &&
 
        git checkout -b test-backward e &&
-       test_must_fail git merge f)
+       test_must_fail git merge f >actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-d)" &&
+               grep "$sub_expect" actual
+       fi)
 '
 
 
@@ -310,7 +336,7 @@ test_expect_success 'recursive merge with submodule' '
 #   Expected: path/ is submodule and file contents for B's path are somewhere
 
 test_expect_success 'setup file/submodule conflict' '
-       test_create_repo file-submodule &&
+       git init file-submodule &&
        (
                cd file-submodule &&
 
@@ -325,7 +351,7 @@ test_expect_success 'setup file/submodule conflict' '
                git commit -m B &&
 
                git checkout A &&
-               test_create_repo path &&
+               git init path &&
                test_commit -C path world &&
                git submodule add ./path &&
                git commit -m A
@@ -385,7 +411,7 @@ test_expect_success 'file/submodule conflict; merge --abort works afterward' '
 #     under the submodule to be treated as untracked or in the way.
 
 test_expect_success 'setup directory/submodule conflict' '
-       test_create_repo directory-submodule &&
+       git init directory-submodule &&
        (
                cd directory-submodule &&
 
@@ -408,7 +434,7 @@ test_expect_success 'setup directory/submodule conflict' '
                git commit -m B2 &&
 
                git checkout A &&
-               test_create_repo path &&
+               git init path &&
                test_commit -C path hello world &&
                git submodule add ./path &&
                git commit -m A
@@ -476,4 +502,44 @@ test_expect_failure 'directory/submodule conflict; merge --abort works afterward
        )
 '
 
+# Setup:
+#   - Submodule has 2 commits: a and b
+#   - Superproject branch 'a' adds and commits submodule pointing to 'commit a'
+#   - Superproject branch 'b' adds and commits submodule pointing to 'commit b'
+# If these two branches are now merged, there is no merge base
+test_expect_success 'setup for null merge base' '
+       mkdir no-merge-base &&
+       (cd no-merge-base &&
+       git init &&
+       mkdir sub &&
+       (cd sub &&
+        git init &&
+        echo "file-a" > file-a &&
+        git add file-a &&
+        git commit -m "commit a") &&
+       git commit --allow-empty -m init &&
+       git branch init &&
+       git checkout -b a init &&
+       git add sub &&
+       git commit -m "a" &&
+       git switch main &&
+       (cd sub &&
+        echo "file-b" > file-b &&
+        git add file-b &&
+        git commit -m "commit b"))
+'
+
+test_expect_success 'merging should fail with no merge base' '
+       (cd no-merge-base &&
+       git checkout -b b init &&
+       git add sub &&
+       git commit -m "b" &&
+       test_must_fail git merge a >actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short HEAD^1)" &&
+               grep "$sub_expect" actual
+       fi)
+'
+
 test_done
index 5bfb027099a6d22849739075de82f3c0af7ca513..52cf0c87690be83e758b97c12b0ab00fa77613a2 100755 (executable)
@@ -47,6 +47,7 @@ test_expect_success 'untracked files overwritten by merge (fast and non-fast for
                export GIT_MERGE_VERBOSITY &&
                test_must_fail git merge branch 2>out2
        ) &&
+       echo "Merge with strategy ${GIT_TEST_MERGE_ALGORITHM:-ort} failed." >>expect &&
        test_cmp out2 expect &&
        git reset --hard HEAD^
 '
index f0f7cbfcdb7fa523ee3693501324cde8591a25d9..71fe29690fd1226082f14df5b8e17cef76659e3f 100755 (executable)
@@ -4,6 +4,18 @@ test_description='git mv in sparse working trees'
 
 . ./test-lib.sh
 
+setup_sparse_checkout () {
+       mkdir folder1 &&
+       touch folder1/file1 &&
+       git add folder1 &&
+       git sparse-checkout set --cone sub
+}
+
+cleanup_sparse_checkout () {
+       git sparse-checkout disable &&
+       git reset --hard
+}
+
 test_expect_success 'setup' "
        mkdir -p sub/dir sub/dir2 &&
        touch a b c sub/d sub/dir/e sub/dir2/e &&
@@ -196,6 +208,7 @@ test_expect_success 'can move files to non-sparse dir' '
 '
 
 test_expect_success 'refuse to move file to non-skip-worktree sparse path' '
+       test_when_finished "cleanup_sparse_checkout" &&
        git reset --hard &&
        git sparse-checkout init --no-cone &&
        git sparse-checkout set a !/x y/ !x/y/z &&
@@ -206,4 +219,75 @@ test_expect_success 'refuse to move file to non-skip-worktree sparse path' '
        test_cmp expect stderr
 '
 
+test_expect_success 'refuse to move out-of-cone directory without --sparse' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       setup_sparse_checkout &&
+
+       test_must_fail git mv folder1 sub 2>stderr &&
+       cat sparse_error_header >expect &&
+       echo folder1/file1 >>expect &&
+       cat sparse_hint >>expect &&
+       test_cmp expect stderr
+'
+
+test_expect_success 'can move out-of-cone directory with --sparse' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       setup_sparse_checkout &&
+
+       git mv --sparse folder1 sub 2>stderr &&
+       test_must_be_empty stderr &&
+
+       test_path_is_dir sub/folder1 &&
+       test_path_is_file sub/folder1/file1
+'
+
+test_expect_success 'refuse to move out-of-cone file without --sparse' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       setup_sparse_checkout &&
+
+       test_must_fail git mv folder1/file1 sub 2>stderr &&
+       cat sparse_error_header >expect &&
+       echo folder1/file1 >>expect &&
+       cat sparse_hint >>expect &&
+       test_cmp expect stderr
+'
+
+test_expect_success 'can move out-of-cone file with --sparse' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       setup_sparse_checkout &&
+
+       git mv --sparse folder1/file1 sub 2>stderr &&
+       test_must_be_empty stderr &&
+
+       test_path_is_file sub/file1
+'
+
+test_expect_success 'refuse to move sparse file to existing destination' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       mkdir folder1 &&
+       touch folder1/file1 &&
+       touch sub/file1 &&
+       git add folder1 sub/file1 &&
+       git sparse-checkout set --cone sub &&
+
+       test_must_fail git mv --sparse folder1/file1 sub 2>stderr &&
+       echo "fatal: destination exists, source=folder1/file1, destination=sub/file1" >expect &&
+       test_cmp expect stderr
+'
+
+test_expect_success 'move sparse file to existing destination with --force and --sparse' '
+       test_when_finished "cleanup_sparse_checkout" &&
+       mkdir folder1 &&
+       touch folder1/file1 &&
+       touch sub/file1 &&
+       echo "overwrite" >folder1/file1 &&
+       git add folder1 sub/file1 &&
+       git sparse-checkout set --cone sub &&
+
+       git mv --sparse --force folder1/file1 sub 2>stderr &&
+       test_must_be_empty stderr &&
+       echo "overwrite" >expect &&
+       test_cmp expect sub/file1
+'
+
 test_done
index d6cc69e0f2cbd576ff6ff81720de63f1e9cf86bf..f908a4d1abc5dc6608b4473937a614267013f3a2 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git show'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 0f4344c55e6421d605ab7364bc2fedfe9165b02b..aaeb4a533440df495d99e3b123f1640afe4374e7 100755 (executable)
@@ -5,6 +5,7 @@ test_description='basic work tree status reporting'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index 73709dbeee287932b3d925aad78f5c2d3f216a52..caf372a3d42ac362c96bbbdf340f27688a565130 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git-status with core.ignorecase=true'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'status with hash collisions' '
index c1f0d950363d311a4b8da3b6fde00b69cb2bdd74..8929ef481f926ce8eee1414b852c3e14538afa9c 100755 (executable)
@@ -86,7 +86,7 @@ test_expect_success 'core.untrackedCache is unset' '
 '
 
 test_expect_success 'setup' '
-       git init worktree &&
+       git init --template= worktree &&
        cd worktree &&
        mkdir done dtwo dthree &&
        touch one two three done/one dtwo/two dthree/three &&
@@ -94,6 +94,7 @@ test_expect_success 'setup' '
        test-tool chmtime =-300 done dtwo dthree &&
        test-tool chmtime =-300 . &&
        git add one two done/one &&
+       mkdir .git/info &&
        : >.git/info/exclude &&
        git update-index --untracked-cache &&
        test_oid_cache <<-EOF
index 3d62e10b53fe16fd2eae0d0fa0363d8839f97b95..eb881be95b615f53867dbe8f56c6b0f9cc7a9d3c 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='Tests for "git reset" with "--merge" and "--keep" options'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success setup '
index ce421ad5ac4b3a17d0e347a7d86a386e2b14547b..78f25c1c7ead9820ed647711e4749d7e9d444c52 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='Tests to check that "reset" options follow a known table'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 
index e7cec2e457af7767a1fbda11535ed4c855f09db7..b50db3f1031339b62071bd28b52094f90c644cb5 100755 (executable)
@@ -14,6 +14,32 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
 . ./test-lib.sh
 
+test_expect_success 'submodule usage: -h' '
+       git submodule -h >out 2>err &&
+       grep "^usage: git submodule" out &&
+       test_must_be_empty err
+'
+
+test_expect_success 'submodule usage: --recursive' '
+       test_expect_code 1 git submodule --recursive >out 2>err &&
+       grep "^usage: git submodule" err &&
+       test_must_be_empty out
+'
+
+test_expect_success 'submodule usage: status --' '
+       test_expect_code 1 git submodule -- &&
+       test_expect_code 1 git submodule --end-of-options
+'
+
+for opt in '--quiet' '--cached'
+do
+       test_expect_success "submodule usage: status $opt" '
+               git submodule $opt &&
+               git submodule status $opt &&
+               git submodule $opt status
+       '
+done
+
 test_expect_success 'submodule deinit works on empty repository' '
        git submodule deinit --all
 '
@@ -152,6 +178,11 @@ test_expect_success 'submodule add' '
        test_must_be_empty untracked
 '
 
+test_expect_success !WINDOWS 'submodule add (absolute path)' '
+       test_when_finished "git reset --hard" &&
+       git submodule add "$submodurl" "$submodurl/add-abs"
+'
+
 test_expect_success 'setup parent and one repository' '
        test_create_repo parent &&
        test_commit -C parent one
@@ -1224,31 +1255,6 @@ test_expect_success 'submodule add clone shallow submodule' '
        )
 '
 
-test_expect_success 'submodule helper list is not confused by common prefixes' '
-       mkdir -p dir1/b &&
-       (
-               cd dir1/b &&
-               git init &&
-               echo hi >testfile2 &&
-               git add . &&
-               git commit -m "test1"
-       ) &&
-       mkdir -p dir2/b &&
-       (
-               cd dir2/b &&
-               git init &&
-               echo hello >testfile1 &&
-               git add .  &&
-               git commit -m "test2"
-       ) &&
-       git submodule add /dir1/b dir1/b &&
-       git submodule add /dir2/b dir2/b &&
-       git commit -m "first submodule commit" &&
-       git submodule--helper list dir1/b | cut -f 2 >actual &&
-       echo "dir1/b" >expect &&
-       test_cmp expect actual
-'
-
 test_expect_success 'setup superproject with submodules' '
        git init sub1 &&
        test_commit -C sub1 test &&
index 9c3cc4cf4046befd76e0239c831d61b33196ee86..542b3331a78f4db27dc5d99d3364f780f4db4548 100755 (executable)
@@ -17,6 +17,7 @@ This test script tries to verify the sanity of summary subcommand of git submodu
 # various reasons, one of them being that there are lots of commands taking place
 # outside of 'test_expect_success' block, which is no longer in good-style.
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 add_file () {
index 8e32f190077474274dc5046df5a64f837ee696f3..ebeca12a71115f60d10fa3ffa4725b1c7080f6f0 100755 (executable)
@@ -104,7 +104,7 @@ test_expect_success 'rebasing submodule that should conflict' '
        test_tick &&
        git commit -m fourth &&
 
-       test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 &&
+       test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 >actual_output &&
        git ls-files -s submodule >actual &&
        (
                cd submodule &&
@@ -112,7 +112,12 @@ test_expect_success 'rebasing submodule that should conflict' '
                echo "160000 $(git rev-parse HEAD^^) 2  submodule" &&
                echo "160000 $(git rev-parse HEAD) 3    submodule"
        ) >expect &&
-       test_cmp expect actual
+       test_cmp expect actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (submodule), and either merge commit $(git -C submodule rev-parse --short HEAD^0)" &&
+               grep "$sub_expect" actual_output
+       fi
 '
 
 test_done
index 43f779d751cfe2645b038811e69b6f67fee2ed34..c5f5dbe55e0a91e24589c8c9d85ba53995091b36 100755 (executable)
@@ -769,7 +769,7 @@ test_expect_success 'submodule update continues after recursive checkout error'
           echo "" > file
          )
         ) &&
-        test_must_fail git submodule update --recursive &&
+        test_expect_code 1 git submodule update --recursive &&
         (cd submodule2 &&
          git rev-parse --verify HEAD >../actual
         ) &&
@@ -1074,7 +1074,7 @@ test_expect_success 'submodule update --quiet passes quietness to merge/rebase'
         git submodule update --rebase --quiet >out 2>err &&
         test_must_be_empty out &&
         test_must_be_empty err &&
-        git submodule update --rebase -v >out 2>err &&
+        git submodule update --rebase >out 2>err &&
         test_file_not_empty out &&
         test_must_be_empty err
        )
@@ -1116,4 +1116,66 @@ test_expect_success 'submodule update --filter sets partial clone settings' '
        test_cmp_config -C super-filter/submodule blob:none remote.origin.partialclonefilter
 '
 
+# NEEDSWORK: Clean up the tests so that we can reuse the test setup.
+# Don't reuse the existing repos because the earlier tests have
+# intentionally disruptive configurations.
+test_expect_success 'setup clean recursive superproject' '
+       git init bottom &&
+       test_commit -C bottom "bottom" &&
+       git init middle &&
+       git -C middle submodule add ../bottom bottom &&
+       git -C middle commit -m "middle" &&
+       git init top &&
+       git -C top submodule add ../middle middle &&
+       git -C top commit -m "top" &&
+       git clone --recurse-submodules top top-clean
+'
+
+test_expect_success 'submodule update should skip unmerged submodules' '
+       test_when_finished "rm -fr top-cloned" &&
+       cp -r top-clean top-cloned &&
+
+       # Create an upstream commit in each repo, starting with bottom
+       test_commit -C bottom upstream_commit &&
+       # Create middle commit
+       git -C middle/bottom fetch &&
+       git -C middle/bottom checkout -f FETCH_HEAD &&
+       git -C middle add bottom &&
+       git -C middle commit -m "upstream_commit" &&
+       # Create top commit
+       git -C top/middle fetch &&
+       git -C top/middle checkout -f FETCH_HEAD &&
+       git -C top add middle &&
+       git -C top commit -m "upstream_commit" &&
+
+       # Create a downstream conflict
+       test_commit -C top-cloned/middle/bottom downstream_commit &&
+       git -C top-cloned/middle add bottom &&
+       git -C top-cloned/middle commit -m "downstream_commit" &&
+       git -C top-cloned/middle fetch --recurse-submodules origin &&
+       test_must_fail git -C top-cloned/middle merge origin/main &&
+
+       # Make the update of "middle" a no-op, otherwise we error out
+       # because of its unmerged state
+       test_config -C top-cloned submodule.middle.update !true &&
+       git -C top-cloned submodule update --recursive 2>actual.err &&
+       cat >expect.err <<-\EOF &&
+       Skipping unmerged submodule middle/bottom
+       EOF
+       test_cmp expect.err actual.err
+'
+
+test_expect_success 'submodule update --recursive skip submodules with strategy=none' '
+       test_when_finished "rm -fr top-cloned" &&
+       cp -r top-clean top-cloned &&
+
+       test_commit -C top-cloned/middle/bottom downstream_commit &&
+       git -C top-cloned/middle config submodule.bottom.update none &&
+       git -C top-cloned submodule update --recursive 2>actual.err &&
+       cat >expect.err <<-\EOF &&
+       Skipping submodule '\''middle/bottom'\''
+       EOF
+       test_cmp expect.err actual.err
+'
+
 test_done
index 1cfa150768d7f813ba9943b4bd17887469cb1c04..2859695c6d208341f5ba1de01e1cfe4131388a2b 100755 (executable)
@@ -6,6 +6,7 @@ This test verifies that `git submodue absorbgitdirs` moves a submodules git
 directory into the superproject.
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup a real submodule' '
index c8e7e983317610b54da89101bea8f7c59a4dcec3..4dc7d089423be4acd3c83ceb4055d77fbc11ba70 100755 (executable)
@@ -1,11 +1,15 @@
 #!/bin/sh
 
-test_description='Test submodule--helper is-active
+test_description='Test with test-tool submodule is-active
 
-This test verifies that `git submodue--helper is-active` correctly identifies
+This test verifies that `test-tool submodule is-active` correctly identifies
 submodules which are "active" and interesting to the user.
+
+This is a unit test of the submodule.c is_submodule_active() function,
+which is also indirectly tested elsewhere.
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup' '
@@ -25,13 +29,13 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'is-active works with urls' '
-       git -C super submodule--helper is-active sub1 &&
-       git -C super submodule--helper is-active sub2 &&
+       test-tool -C super submodule is-active sub1 &&
+       test-tool -C super submodule is-active sub2 &&
 
        git -C super config --unset submodule.sub1.URL &&
-       test_must_fail git -C super submodule--helper is-active sub1 &&
+       test_must_fail test-tool -C super submodule is-active sub1 &&
        git -C super config submodule.sub1.URL ../sub &&
-       git -C super submodule--helper is-active sub1
+       test-tool -C super submodule is-active sub1
 '
 
 test_expect_success 'is-active works with submodule.<name>.active config' '
@@ -39,11 +43,11 @@ test_expect_success 'is-active works with submodule.<name>.active config' '
        test_when_finished "git -C super config submodule.sub1.URL ../sub" &&
 
        git -C super config --bool submodule.sub1.active "false" &&
-       test_must_fail git -C super submodule--helper is-active sub1 &&
+       test_must_fail test-tool -C super submodule is-active sub1 &&
 
        git -C super config --bool submodule.sub1.active "true" &&
        git -C super config --unset submodule.sub1.URL &&
-       git -C super submodule--helper is-active sub1
+       test-tool -C super submodule is-active sub1
 '
 
 test_expect_success 'is-active works with basic submodule.active config' '
@@ -53,17 +57,17 @@ test_expect_success 'is-active works with basic submodule.active config' '
        git -C super config --add submodule.active "." &&
        git -C super config --unset submodule.sub1.URL &&
 
-       git -C super submodule--helper is-active sub1 &&
-       git -C super submodule--helper is-active sub2
+       test-tool -C super submodule is-active sub1 &&
+       test-tool -C super submodule is-active sub2
 '
 
 test_expect_success 'is-active correctly works with paths that are not submodules' '
        test_when_finished "git -C super config --unset-all submodule.active" &&
 
-       test_must_fail git -C super submodule--helper is-active not-a-submodule &&
+       test_must_fail test-tool -C super submodule is-active not-a-submodule &&
 
        git -C super config --add submodule.active "." &&
-       test_must_fail git -C super submodule--helper is-active not-a-submodule
+       test_must_fail test-tool -C super submodule is-active not-a-submodule
 '
 
 test_expect_success 'is-active works with exclusions in submodule.active config' '
@@ -72,8 +76,8 @@ test_expect_success 'is-active works with exclusions in submodule.active config'
        git -C super config --add submodule.active "." &&
        git -C super config --add submodule.active ":(exclude)sub1" &&
 
-       test_must_fail git -C super submodule--helper is-active sub1 &&
-       git -C super submodule--helper is-active sub2
+       test_must_fail test-tool -C super submodule is-active sub1 &&
+       test-tool -C super submodule is-active sub2
 '
 
 test_expect_success 'is-active with submodule.active and submodule.<name>.active' '
@@ -85,8 +89,8 @@ test_expect_success 'is-active with submodule.active and submodule.<name>.active
        git -C super config --bool submodule.sub1.active "false" &&
        git -C super config --bool submodule.sub2.active "true" &&
 
-       test_must_fail git -C super submodule--helper is-active sub1 &&
-       git -C super submodule--helper is-active sub2
+       test_must_fail test-tool -C super submodule is-active sub1 &&
+       test-tool -C super submodule is-active sub2
 '
 
 test_expect_success 'is-active, submodule.active and submodule add' '
index f2e7df59cf24c254440a8bc1c7d5c195c8bfcd5b..3269298197c2c2dcf92a780d401d212101c8475f 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='handling of common mistakes people may make with submodules'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'create embedded repository' '
index f87e524d6d467fd723c083c105f092850051f566..57d7ab3eced2970791021caa9855e1ed0c1d2712 100755 (executable)
@@ -31,8 +31,9 @@ test_expect_success 'sparse checkout setup which hides .gitmodules' '
                test_tick &&
                git commit -m "Add submodule"
        ) &&
-       git clone upstream super &&
+       git clone --template= upstream super &&
        (cd super &&
+               mkdir .git/info &&
                cat >.git/info/sparse-checkout <<-\EOF &&
                /*
                !/.gitmodules
index 3b925c302fc4e2194a2e0ad10c0b48823f2cf021..96e984232144c60743d59150c68461e5b1e779eb 100755 (executable)
@@ -9,6 +9,7 @@ This test verifies that the set-branch subcommand of git-submodule is working
 as expected.
 '
 
+TEST_PASSES_SANITIZE_LEAK=true
 TEST_NO_CREATE_REPO=1
 . ./test-lib.sh
 
index 41706c1c9ff91d4ac11d9af9f8acdd7f587b896d..2c24f120da3591305b073d247e21e3c07cff9386 100755 (executable)
@@ -21,7 +21,7 @@ test_expect_success 'check names' '
        valid/with/paths
        EOF
 
-       git submodule--helper check-name >actual <<-\EOF &&
+       test-tool submodule check-name >actual <<-\EOF &&
        valid
        valid/with/paths
 
index ad1eb64ba0db16d4f1452961c0a226fdf2e5cce0..aa004b70a8d1f1ab093b962a232524eaf4e851a1 100755 (executable)
@@ -5,6 +5,7 @@ test_description='pre-commit and pre-merge-commit hooks'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'root commit' '
index 3fcb44767f51052f637683a34cbb8a2f21c777cc..f5426a8e589fb6a0beadd9ef793ed6be66b69355 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='git status for submodule'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_create_repo_with_commit () {
index ed2653d46fe6cd0ed1dd5dc2dfd73f7340b8fe31..92462a22374042603a885c4b52fb49b3d16e0373 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='verbose commit template'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 write_script "check-for-diff" <<\EOF &&
index f0f6fda150bc29695dc8b56cb3c204de251e6e1d..7c3f6ed99431839abc42533d520cd342f25dafd1 100755 (executable)
@@ -255,6 +255,15 @@ test_expect_success 'merge --squash c3 with c7' '
        test_cmp expect actual
 '
 
+test_expect_success 'merge --squash --autostash conflict does not attempt to apply autostash' '
+       git reset --hard c3 &&
+       >unrelated &&
+       git add unrelated &&
+       test_must_fail git merge --squash c7 --autostash >out 2>err &&
+       ! grep "Applying autostash resulted in conflicts." err &&
+       grep "When finished, apply stashed changes with \`git stash pop\`" out
+'
+
 test_expect_success 'merge c3 with c7 with commit.cleanup = scissors' '
        git config commit.cleanup scissors &&
        git reset --hard c3 &&
diff --git a/t/t7607-merge-state.sh b/t/t7607-merge-state.sh
new file mode 100755 (executable)
index 0000000..89a62ac
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+test_description="Test that merge state is as expected after failed merge"
+
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+. ./test-lib.sh
+
+test_expect_success 'Ensure we restore original state if no merge strategy handles it' '
+       test_commit --no-tag "Initial" base base &&
+
+       for b in branch1 branch2 branch3
+       do
+               git checkout -b $b main &&
+               test_commit --no-tag "Change on $b" base $b || return 1
+       done &&
+
+       git checkout branch1 &&
+       # This is a merge that octopus cannot handle.  Note, that it does not
+       # just hit conflicts, it completely fails and says that it cannot
+       # handle this type of merge.
+       test_expect_code 2 git merge branch2 branch3 >output 2>&1 &&
+       grep "fatal: merge program failed" output &&
+       grep "Should not be doing an octopus" output &&
+
+       # Make sure we did not leave stray changes around when no appropriate
+       # merge strategy was found
+       git diff --exit-code --name-status &&
+       test_path_is_missing .git/MERGE_HEAD
+'
+
+test_done
index 330d6d603d77236788ee932cdcc288731a7aa388..8b1c3bd39f2249417099ea2f24218268925909af 100755 (executable)
@@ -4,6 +4,7 @@ test_description='git mergetool
 
 Testing basic merge tools options'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'mergetool --tool=vimdiff creates the expected layout' '
index 69356011713fad1add8044471f01f9d6d3dee892..0f937990a062e30b5f11baa296c88472019ac338 100755 (executable)
@@ -77,6 +77,7 @@ test_expect_success setup '
        # Say hello.
        function hello() {
          echo "Hello world."
+         echo "Hello again."
        } # hello
 
        # Still a no-op.
@@ -595,6 +596,92 @@ test_expect_success 'grep --files-without-match --quiet' '
        test_must_be_empty actual
 '
 
+test_expect_success 'grep --max-count 0 (must exit with non-zero)' '
+       test_must_fail git grep --max-count 0 foo >actual &&
+       test_must_be_empty actual
+'
+
+test_expect_success 'grep --max-count 3' '
+       cat >expected <<-EOF &&
+       file:foo mmap bar
+       file:foo_mmap bar
+       file:foo_mmap bar mmap
+       EOF
+       git grep --max-count 3 foo >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count -1 (no limit)' '
+       cat >expected <<-EOF &&
+       file:foo mmap bar
+       file:foo_mmap bar
+       file:foo_mmap bar mmap
+       file:foo mmap bar_mmap
+       file:foo_mmap bar mmap baz
+       EOF
+       git grep --max-count -1 foo >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 1 --context 2' '
+       cat >expected <<-EOF &&
+       file-foo mmap bar
+       file:foo_mmap bar
+       file-foo_mmap bar mmap
+       EOF
+       git grep --max-count 1 --context 1 foo_mmap >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 1 --show-function' '
+       cat >expected <<-EOF &&
+       hello.ps1=function hello() {
+       hello.ps1:  echo "Hello world."
+       EOF
+       git grep --max-count 1 --show-function Hello hello.ps1 >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 2 --show-function' '
+       cat >expected <<-EOF &&
+       hello.ps1=function hello() {
+       hello.ps1:  echo "Hello world."
+       hello.ps1:  echo "Hello again."
+       EOF
+       git grep --max-count 2 --show-function Hello hello.ps1 >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 1 --count' '
+       cat >expected <<-EOF &&
+       hello.ps1:1
+       EOF
+       git grep --max-count 1 --count Hello hello.ps1 >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 1 (multiple files)' '
+       cat >expected <<-EOF &&
+       hello.c:#include <stdio.h>
+       hello.ps1:# No-op.
+       EOF
+       git grep --max-count 1 -e o -- hello.\* >actual &&
+       test_cmp expected actual
+'
+
+test_expect_success 'grep --max-count 1 --context 1 (multiple files)' '
+       cat >expected <<-EOF &&
+       hello.c-#include <assert.h>
+       hello.c:#include <stdio.h>
+       hello.c-
+       --
+       hello.ps1:# No-op.
+       hello.ps1-function dummy() {}
+       EOF
+       git grep --max-count 1 --context 1 -e o -- hello.\* >actual &&
+       test_cmp expected actual
+'
+
 cat >expected <<EOF
 file:foo mmap bar_mmap
 EOF
index ac7be5471452a0a8b2123dc39be80caf3520e9db..31c66b63c2cb008979cc64c3f9df203ce5ed1aeb 100755 (executable)
@@ -2,6 +2,7 @@
 
 test_description='grep icase on non-English locales'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./lib-gettext.sh
 
 doalarm () {
index a4476dc492204297d3b91b3d3d79a7769f4d38b9..3ad80526c4c426b1ebeb94ee0730ff9ec79a4ef2 100755 (executable)
@@ -6,6 +6,7 @@ This test verifies the recurse-submodules feature correctly greps across
 submodules.
 '
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=1
@@ -471,8 +472,10 @@ test_expect_failure 'grep --textconv: superproject .gitattributes (from index) d
 test_expect_failure 'grep --textconv: superproject .git/info/attributes does not affect submodules' '
        reset_and_clean &&
        test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
-       super_attr="$(git rev-parse --git-path info/attributes)" &&
+       super_info="$(git rev-parse --git-path info)" &&
+       super_attr="$super_info/attributes" &&
        test_when_finished "rm -f \"$super_attr\"" &&
+       mkdir "$super_info" &&
        echo "a diff=d2x" >"$super_attr" &&
 
        cat >expect <<-\EOF &&
@@ -516,7 +519,8 @@ test_expect_failure 'grep --textconv correctly reads submodule .git/info/attribu
        reset_and_clean &&
        test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
 
-       submodule_attr="$(git -C submodule rev-parse --path-format=absolute --git-path info/attributes)" &&
+       submodule_info="$(git -C submodule rev-parse --path-format=absolute --git-path info)" &&
+       submodule_attr="$submodule_info/attributes" &&
        test_when_finished "rm -f \"$submodule_attr\"" &&
        echo "a diff=d2x" >"$submodule_attr" &&
 
index 74aa6384755ec6d53cf061a48dbe2cdfc93725f9..2724a44fe3ef240aa09077f770706ed022a99b7e 100755 (executable)
@@ -32,11 +32,13 @@ test_systemd_analyze_verify () {
 }
 
 test_expect_success 'help text' '
-       test_expect_code 129 git maintenance -h 2>err &&
-       test_i18ngrep "usage: git maintenance <subcommand>" err &&
-       test_expect_code 128 git maintenance barf 2>err &&
-       test_i18ngrep "invalid subcommand: barf" err &&
+       test_expect_code 129 git maintenance -h >actual &&
+       test_i18ngrep "usage: git maintenance <subcommand>" actual &&
+       test_expect_code 129 git maintenance barf 2>err &&
+       test_i18ngrep "unknown subcommand: \`barf'\''" err &&
+       test_i18ngrep "usage: git maintenance" err &&
        test_expect_code 129 git maintenance 2>err &&
+       test_i18ngrep "error: need a subcommand" err &&
        test_i18ngrep "usage: git maintenance" err
 '
 
@@ -162,7 +164,6 @@ test_expect_success 'prefetch multiple remotes' '
        test_cmp_rev refs/remotes/remote1/one refs/prefetch/remotes/remote1/one &&
        test_cmp_rev refs/remotes/remote2/two refs/prefetch/remotes/remote2/two &&
 
-       test_cmp_config refs/prefetch/ log.excludedecoration &&
        git log --oneline --decorate --all >log &&
        ! grep "prefetch" log &&
 
@@ -173,26 +174,6 @@ test_expect_success 'prefetch multiple remotes' '
        test_subcommand git fetch remote2 $fetchargs <skip-remote1.txt
 '
 
-test_expect_success 'prefetch and existing log.excludeDecoration values' '
-       git config --unset-all log.excludeDecoration &&
-       git config log.excludeDecoration refs/remotes/remote1/ &&
-       git maintenance run --task=prefetch &&
-
-       git config --get-all log.excludeDecoration >out &&
-       grep refs/remotes/remote1/ out &&
-       grep refs/prefetch/ out &&
-
-       git log --oneline --decorate --all >log &&
-       ! grep "prefetch" log &&
-       ! grep "remote1" log &&
-       grep "remote2" log &&
-
-       # a second run does not change the config
-       git maintenance run --task=prefetch &&
-       git log --oneline --decorate --all >log2 &&
-       test_cmp log log2
-'
-
 test_expect_success 'loose-objects task' '
        # Repack everything so we know the state of the object dir
        git repack -adk &&
index a536a621b24a4ee0a010fd5a3e565d2711d67806..d7167f55397f57f35a75eccff662c13b4f712ec8 100755 (executable)
@@ -4,6 +4,7 @@ test_description='git annotate'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 PROG='git annotate'
index ee4fdd8f18d572f82392b6ed2e4b4da9837b63b5..0147de304b4d104cc7f05ea1f8d68f1a07ceb80d 100755 (executable)
@@ -4,6 +4,7 @@ test_description='git blame'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 PROG='git blame -c'
index b067983ba1c6adf152131c96a67fc7a709e6666b..c8266f17f14af3c1af34661ccf9c039d04cfbe31 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='git cat-file textconv support'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 cat >helper <<'EOF'
index 31de4b64dc06c1aad760dfc4ad585d11b46a65d8..ca04242ca016368a5644ef7d04c8b3dab0569260 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
 test_description='git cat-file filters support'
+
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_expect_success 'setup ' '
index 90c75dbb283fce796847f51546a04691e0521de8..c3a5f6d01ffe5cff12b868e7cbe5a09dacbef011 100755 (executable)
@@ -4,6 +4,7 @@ test_description='colored git blame'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_CREATE_REPO_NO_TEMPLATE=1
 . ./test-lib.sh
 
 PROG='git blame -c'
index 7c5b847f58424dc62c330674971d2d849f77ac4e..fea41b3c3606df1fc6d8111cdd74522532ca8b0b 100755 (executable)
@@ -8,7 +8,6 @@ test_description='git svn basic tests'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 
 prepare_utf8_locale
index d043e80fc349f85e1cd4168c9bf4d343dfeee0d5..52046e60d5150d0ee71bcbb515277b4bf1e6969d 100755 (executable)
@@ -5,7 +5,6 @@
 
 test_description='git svn property tests'
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 
 mkdir import
index 5cf2ef4b8b0fc3a9a587baea4e8421893c90bd14..85d735861fc9f500561e9f5fb4baa131657752c0 100755 (executable)
@@ -5,7 +5,6 @@
 
 test_description='git svn fetching'
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 
 test_expect_success 'initialize repo' '
index 527ba3d29322a14d609ea57e3740672f144f3182..0fc289ae0f02c064586b5ff9f1cab0ff0976814f 100755 (executable)
@@ -2,7 +2,6 @@
 
 test_description='git svn authorship'
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 
 test_expect_success 'setup svn repository' '
index 4d8d0584b795d21fab8ba7dcbd47b50c178c2757..aeceffaf7b0824b8da673261f523589d3b141619 100755 (executable)
@@ -2,7 +2,6 @@
 
 test_description='test that git handles an svn repository with empty symlinks'
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 test_expect_success 'load svn dumpfile' '
        svnadmin load "$rawsvnrepo" <<EOF
index e2aa8ed88a96ec0b50fb33f31e2e6d232895067c..b3ce033a0d3dec9c27b3a5e9114e07f18f20f04d 100755 (executable)
@@ -4,7 +4,6 @@
 
 test_description='git svn dcommit --interactive series'
 
-TEST_FAILS_SANITIZE_LEAK=true
 . ./lib-git-svn.sh
 
 test_expect_success 'initialize repo' '
index 1ae4d7c0d37db2d10cbc08ce90505b291e70171b..58413221e6a73dcb464f0f226616b0bba4ea30dd 100755 (executable)
@@ -7,6 +7,7 @@ test_description='test git fast-import of notes objects'
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 
index 102c133112c7149258d123b95acec807006890b7..4aa5d90d328aca4adcac5f0d3a2ee4946a393812 100755 (executable)
@@ -4,17 +4,12 @@
 #
 
 test_description='perl interface (Git.pm)'
-. ./test-lib.sh
 
-if ! test_have_prereq PERL; then
-       skip_all='skipping perl interface tests, perl not available'
-       test_done
-fi
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-perl.sh
 
-perl -MTest::More -e 0 2>/dev/null || {
-       skip_all="Perl Test::More unavailable, skipping test"
-       test_done
-}
+skip_all_if_no_Test_More
 
 # set up test repository
 
@@ -50,11 +45,9 @@ test_expect_success \
      git config --add test.pathmulti bar
      '
 
-# The external test will outputs its own plan
-test_external_has_tap=1
-
-test_external_without_stderr \
-    'Perl API' \
-    perl "$TEST_DIRECTORY"/t9700/test.pl
+test_expect_success 'use t9700/test.pl to test Git.pm' '
+       "$PERL_PATH" "$TEST_DIRECTORY"/t9700/test.pl 2>stderr &&
+       test_must_be_empty stderr
+'
 
 test_done
index de7152f82713bf797aecd3f53bce6ef006524b7b..19f56e5680f678c22fc80a63907c804448c4b445 100755 (executable)
@@ -5,6 +5,7 @@ test_description='git web--browse basic tests
 
 This test checks that git web--browse can handle various valid URLs.'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 test_web_browse () {
index 6a30f5719c33260cb57fca2112d77bc1f9515375..d459fae6551bd7b5812f8b76a5faae8ebd4075df 100755 (executable)
@@ -759,4 +759,20 @@ test_expect_success 'prompt - hide if pwd ignored - inside gitdir' '
        test_cmp expected "$actual"
 '
 
+test_expect_success 'prompt - conflict indicator' '
+       printf " (main|CONFLICT)" >expected &&
+       echo "stash" >file &&
+       git stash &&
+       test_when_finished "git stash drop" &&
+       echo "commit" >file &&
+       git commit -m "commit" file &&
+       test_when_finished "git reset --hard HEAD~" &&
+       test_must_fail git stash apply &&
+       (
+               GIT_PS1_SHOWCONFLICTSTATE="yes" &&
+               __git_ps1 >"$actual"
+       ) &&
+       test_cmp expected "$actual"
+'
+
 test_done
index 8c44856eaec0d8fed84f5a4959edce275c8b79eb..c6479f24eb5ac291a29664903b3b6393d04748c6 100644 (file)
@@ -633,7 +633,7 @@ test_hook () {
 # - Explicitly using test_have_prereq.
 #
 # - Implicitly by specifying the prerequisite tag in the calls to
-#   test_expect_{success,failure} and test_external{,_without_stderr}.
+#   test_expect_{success,failure}
 #
 # The single parameter is the prerequisite tag (a simple word, in all
 # capital letters by convention).
@@ -835,93 +835,6 @@ test_expect_success () {
        test_finish_
 }
 
-# test_external runs external test scripts that provide continuous
-# test output about their progress, and succeeds/fails on
-# zero/non-zero exit code.  It outputs the test output on stdout even
-# in non-verbose mode, and announces the external script with "# run
-# <n>: ..." before running it.  When providing relative paths, keep in
-# mind that all scripts run in "trash directory".
-# Usage: test_external description command arguments...
-# Example: test_external 'Perl API' perl ../path/to/test.pl
-test_external () {
-       test "$#" = 4 && { test_prereq=$1; shift; } || test_prereq=
-       test "$#" = 3 ||
-       BUG "not 3 or 4 parameters to test_external"
-       descr="$1"
-       shift
-       test_verify_prereq
-       export test_prereq
-       if ! test_skip "$descr" "$@"
-       then
-               # Announce the script to reduce confusion about the
-               # test output that follows.
-               say_color "" "# run $test_count: $descr ($*)"
-               # Export TEST_DIRECTORY, TRASH_DIRECTORY and GIT_TEST_LONG
-               # to be able to use them in script
-               export TEST_DIRECTORY TRASH_DIRECTORY GIT_TEST_LONG
-               # Run command; redirect its stderr to &4 as in
-               # test_run_, but keep its stdout on our stdout even in
-               # non-verbose mode.
-               "$@" 2>&4
-               if test "$?" = 0
-               then
-                       if test $test_external_has_tap -eq 0; then
-                               test_ok_ "$descr"
-                       else
-                               say_color "" "# test_external test $descr was ok"
-                               test_success=$(($test_success + 1))
-                       fi
-               else
-                       if test $test_external_has_tap -eq 0; then
-                               test_failure_ "$descr" "$@"
-                       else
-                               say_color error "# test_external test $descr failed: $@"
-                               test_failure=$(($test_failure + 1))
-                       fi
-               fi
-       fi
-}
-
-# Like test_external, but in addition tests that the command generated
-# no output on stderr.
-test_external_without_stderr () {
-       # The temporary file has no (and must have no) security
-       # implications.
-       tmp=${TMPDIR:-/tmp}
-       stderr="$tmp/git-external-stderr.$$.tmp"
-       test_external "$@" 4> "$stderr"
-       test -f "$stderr" || error "Internal error: $stderr disappeared."
-       descr="no stderr: $1"
-       shift
-       say >&3 "# expecting no stderr from previous command"
-       if test ! -s "$stderr"
-       then
-               rm "$stderr"
-
-               if test $test_external_has_tap -eq 0; then
-                       test_ok_ "$descr"
-               else
-                       say_color "" "# test_external_without_stderr test $descr was ok"
-                       test_success=$(($test_success + 1))
-               fi
-       else
-               if test "$verbose" = t
-               then
-                       output=$(echo; echo "# Stderr is:"; cat "$stderr")
-               else
-                       output=
-               fi
-               # rm first in case test_failure exits.
-               rm "$stderr"
-               if test $test_external_has_tap -eq 0; then
-                       test_failure_ "$descr" "$@" "$output"
-               else
-                       say_color error "# test_external_without_stderr test $descr failed: $@: $output"
-                       test_failure=$(($test_failure + 1))
-               fi
-       fi
-}
-
 # debugging-friendly alternatives to "test [-f|-d|-e]"
 # The commands test the existence or non-existence of $1
 test_path_is_file () {
index 120f11812c364ae6467e61cf8eac1f92cd73a385..377cc1c1203d6fdfc7418f4ce189bb313cfb0b75 100644 (file)
@@ -238,6 +238,9 @@ parse_option () {
                        ;;
                esac
                ;;
+       --invert-exit-code)
+               invert_exit_code=t
+               ;;
        *)
                echo "error: unknown test option '$opt'" >&2; exit 1 ;;
        esac
@@ -302,6 +305,11 @@ TEST_NUMBER="${TEST_NAME%%-*}"
 TEST_NUMBER="${TEST_NUMBER#t}"
 TEST_RESULTS_DIR="$TEST_OUTPUT_DIRECTORY/test-results"
 TEST_RESULTS_BASE="$TEST_RESULTS_DIR/$TEST_NAME$TEST_STRESS_JOB_SFX"
+TEST_RESULTS_SAN_FILE_PFX=trace
+TEST_RESULTS_SAN_DIR_SFX=leak
+TEST_RESULTS_SAN_FILE=
+TEST_RESULTS_SAN_DIR="$TEST_RESULTS_DIR/$TEST_NAME.$TEST_RESULTS_SAN_DIR_SFX"
+TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP=
 TRASH_DIRECTORY="trash directory.$TEST_NAME$TEST_STRESS_JOB_SFX"
 test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY"
 case "$TRASH_DIRECTORY" in
@@ -309,6 +317,16 @@ case "$TRASH_DIRECTORY" in
  *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;;
 esac
 
+# Utility functions using $TEST_RESULTS_* variables
+nr_san_dir_leaks_ () {
+       # stderr piped to /dev/null because the directory may have
+       # been "rmdir"'d already.
+       find "$TEST_RESULTS_SAN_DIR" \
+               -type f \
+               -name "$TEST_RESULTS_SAN_FILE_PFX.*" 2>/dev/null |
+       wc -l
+}
+
 # If --stress was passed, run this test repeatedly in several parallel loops.
 if test "$GIT_TEST_STRESS_STARTED" = "done"
 then
@@ -793,15 +811,31 @@ test_ok_ () {
        finalize_test_case_output ok "$@"
 }
 
+_invert_exit_code_failure_end_blurb () {
+       say_color warn "# faked up failures as TODO & now exiting with 0 due to --invert-exit-code"
+}
+
 test_failure_ () {
        failure_label=$1
        test_failure=$(($test_failure + 1))
-       say_color error "not ok $test_count - $1"
+       local pfx=""
+       if test -n "$invert_exit_code" # && test -n "$HARNESS_ACTIVE"
+       then
+               pfx="# TODO induced breakage (--invert-exit-code):"
+       fi
+       say_color error "not ok $test_count - ${pfx:+$pfx }$1"
        shift
        printf '%s\n' "$*" | sed -e 's/^/#      /'
        if test -n "$immediate"
        then
                say_color error "1..$test_count"
+               if test -n "$invert_exit_code"
+               then
+                       finalize_test_output
+                       _invert_exit_code_failure_end_blurb
+                       GIT_EXIT_OK=t
+                       exit 0
+               fi
                _error_exit
        fi
        finalize_test_case_output failure "$failure_label" "$@"
@@ -809,14 +843,14 @@ test_failure_ () {
 
 test_known_broken_ok_ () {
        test_fixed=$(($test_fixed+1))
-       say_color error "ok $test_count - $@ # TODO known breakage vanished"
-       finalize_test_case_output fixed "$@"
+       say_color error "ok $test_count - $1 # TODO known breakage vanished"
+       finalize_test_case_output fixed "$1"
 }
 
 test_known_broken_failure_ () {
        test_broken=$(($test_broken+1))
-       say_color warn "not ok $test_count - $@ # TODO known breakage"
-       finalize_test_case_output broken "$@"
+       say_color warn "not ok $test_count - $1 # TODO known breakage"
+       finalize_test_case_output broken "$1"
 }
 
 test_debug () {
@@ -1173,9 +1207,67 @@ test_atexit_handler () {
        teardown_malloc_check
 }
 
-test_done () {
-       GIT_EXIT_OK=t
+sanitize_leak_log_message_ () {
+       local new="$1" &&
+       local old="$2" &&
+       local file="$3" &&
+
+       printf "With SANITIZE=leak at exit we have %d leak logs, but started with %d
+
+This means that we have a blindspot where git is leaking but we're
+losing the exit code somewhere, or not propagating it appropriately
+upwards!
+
+See the logs at \"%s.*\";
+those logs are reproduced below." \
+              "$new" "$old" "$file"
+}
 
+check_test_results_san_file_ () {
+       if test -z "$TEST_RESULTS_SAN_FILE"
+       then
+               return
+       fi &&
+       local old="$TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP" &&
+       local new="$(nr_san_dir_leaks_)" &&
+
+       if test $new -le $old
+       then
+               return
+       fi &&
+       local out="$(sanitize_leak_log_message_ "$new" "$old" "$TEST_RESULTS_SAN_FILE")" &&
+       say_color error "$out" &&
+       if test "$old" != 0
+       then
+               echo &&
+               say_color error "The logs include output from past runs to avoid" &&
+               say_color error "that remove 'test-results' between runs."
+       fi &&
+       say_color error "$(cat "$TEST_RESULTS_SAN_FILE".*)" &&
+
+       if test -n "$passes_sanitize_leak" && test "$test_failure" = 0
+       then
+               say "As TEST_PASSES_SANITIZE_LEAK=true and our logs show we're leaking, exit non-zero!" &&
+               invert_exit_code=t
+       elif test -n "$passes_sanitize_leak"
+       then
+               say "As TEST_PASSES_SANITIZE_LEAK=true and our logs show we're leaking, and we're failing for other reasons too..." &&
+               invert_exit_code=
+       elif test -n "$sanitize_leak_check" && test "$test_failure" = 0
+       then
+               say "As TEST_PASSES_SANITIZE_LEAK=true isn't set the above leak is 'ok' with GIT_TEST_PASSING_SANITIZE_LEAK=check" &&
+               invert_exit_code=
+       elif test -n "$sanitize_leak_check"
+       then
+               say "As TEST_PASSES_SANITIZE_LEAK=true isn't set the above leak is 'ok' with GIT_TEST_PASSING_SANITIZE_LEAK=check" &&
+               invert_exit_code=t
+       else
+               say "With GIT_TEST_SANITIZE_LEAK_LOG=true our logs revealed a memory leak, exit non-zero!" &&
+               invert_exit_code=t
+       fi
+}
+
+test_done () {
        # Run the atexit commands _before_ the trash directory is
        # removed, so the commands can access pidfiles and socket files.
        test_atexit_handler
@@ -1215,28 +1307,32 @@ test_done () {
        fi
        case "$test_failure" in
        0)
-               if test $test_external_has_tap -eq 0
+               if test $test_remaining -gt 0
                then
-                       if test $test_remaining -gt 0
-                       then
-                               say_color pass "# passed all $msg"
-                       fi
-
-                       # Maybe print SKIP message
-                       test -z "$skip_all" || skip_all="# SKIP $skip_all"
-                       case "$test_count" in
-                       0)
-                               say "1..$test_count${skip_all:+ $skip_all}"
-                               ;;
-                       *)
-                               test -z "$skip_all" ||
-                               say_color warn "$skip_all"
-                               say "1..$test_count"
-                               ;;
-                       esac
+                       say_color pass "# passed all $msg"
                fi
 
-               if test -z "$debug" && test -n "$remove_trash"
+               # Maybe print SKIP message
+               test -z "$skip_all" || skip_all="# SKIP $skip_all"
+               case "$test_count" in
+               0)
+                       say "1..$test_count${skip_all:+ $skip_all}"
+                       ;;
+               *)
+                       test -z "$skip_all" ||
+                       say_color warn "$skip_all"
+                       say "1..$test_count"
+                       ;;
+               esac
+
+               if test -n "$stress" && test -n "$invert_exit_code"
+               then
+                       # We're about to move our "$TRASH_DIRECTORY"
+                       # to "$TRASH_DIRECTORY.stress-failed" if
+                       # --stress is combined with
+                       # --invert-exit-code.
+                       say "with --stress and --invert-exit-code we're not removing '$TRASH_DIRECTORY'"
+               elif test -z "$debug" && test -n "$remove_trash"
                then
                        test -d "$TRASH_DIRECTORY" ||
                        error "Tests passed but trash directory already removed before test cleanup; aborting"
@@ -1249,17 +1345,35 @@ test_done () {
                        } ||
                        error "Tests passed but test cleanup failed; aborting"
                fi
+
+               check_test_results_san_file_ "$test_failure"
+
+               if test -z "$skip_all" && test -n "$invert_exit_code"
+               then
+                       say_color warn "# faking up non-zero exit with --invert-exit-code"
+                       GIT_EXIT_OK=t
+                       exit 1
+               fi
+
                test_at_end_hook_
 
+               GIT_EXIT_OK=t
                exit 0 ;;
 
        *)
-               if test $test_external_has_tap -eq 0
+               say_color error "# failed $test_failure among $msg"
+               say "1..$test_count"
+
+               check_test_results_san_file_ "$test_failure"
+
+               if test -n "$invert_exit_code"
                then
-                       say_color error "# failed $test_failure among $msg"
-                       say "1..$test_count"
+                       _invert_exit_code_failure_end_blurb
+                       GIT_EXIT_OK=t
+                       exit 0
                fi
 
+               GIT_EXIT_OK=t
                exit 1 ;;
 
        esac
@@ -1392,14 +1506,12 @@ fi
 GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib
 export GITPERLLIB
 test -d "$GIT_BUILD_DIR"/templates/blt || {
-       error "You haven't built things yet, have you?"
+       BAIL_OUT "You haven't built things yet, have you?"
 }
 
 if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool$X
 then
-       echo >&2 'You need to build test-tool:'
-       echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory'
-       exit 1
+       BAIL_OUT 'You need to build test-tool; Run "make t/helper/test-tool" in the source (toplevel) directory'
 fi
 
 # Are we running this test at all?
@@ -1413,24 +1525,70 @@ then
        test_done
 fi
 
-# skip non-whitelisted tests when compiled with SANITIZE=leak
+BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK () {
+       BAIL_OUT "$1 has no effect except when compiled with SANITIZE=leak"
+}
+
 if test -n "$SANITIZE_LEAK"
 then
-       if test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+       # Normalize with test_bool_env
+       passes_sanitize_leak=
+
+       # We need to see TEST_PASSES_SANITIZE_LEAK in "git
+       # env--helper" (via test_bool_env)
+       export TEST_PASSES_SANITIZE_LEAK
+       if test_bool_env TEST_PASSES_SANITIZE_LEAK false
        then
-               # We need to see it in "git env--helper" (via
-               # test_bool_env)
-               export TEST_PASSES_SANITIZE_LEAK
+               passes_sanitize_leak=t
+       fi
 
-               if ! test_bool_env TEST_PASSES_SANITIZE_LEAK false
+       if test "$GIT_TEST_PASSING_SANITIZE_LEAK" = "check"
+       then
+               sanitize_leak_check=t
+               if test -n "$invert_exit_code"
                then
-                       skip_all="skipping $this_test under GIT_TEST_PASSING_SANITIZE_LEAK=true"
-                       test_done
+                       BAIL_OUT "cannot use --invert-exit-code under GIT_TEST_PASSING_SANITIZE_LEAK=check"
                fi
+
+               if test -z "$passes_sanitize_leak"
+               then
+                       say "in GIT_TEST_PASSING_SANITIZE_LEAK=check mode, setting --invert-exit-code for TEST_PASSES_SANITIZE_LEAK != true"
+                       invert_exit_code=t
+               fi
+       elif test -z "$passes_sanitize_leak" &&
+            test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+       then
+               skip_all="skipping $this_test under GIT_TEST_PASSING_SANITIZE_LEAK=true"
+               test_done
+       fi
+
+       if test_bool_env GIT_TEST_SANITIZE_LEAK_LOG false
+       then
+               if ! mkdir -p "$TEST_RESULTS_SAN_DIR"
+               then
+                       BAIL_OUT "cannot create $TEST_RESULTS_SAN_DIR"
+               fi &&
+               TEST_RESULTS_SAN_FILE="$TEST_RESULTS_SAN_DIR/$TEST_RESULTS_SAN_FILE_PFX"
+
+               # In case "test-results" is left over from a previous
+               # run: Only report if new leaks show up.
+               TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP=$(nr_san_dir_leaks_)
+
+               # Don't litter *.leak dirs if there was nothing to report
+               test_atexit "rmdir \"$TEST_RESULTS_SAN_DIR\" 2>/dev/null || :"
+
+               prepend_var LSAN_OPTIONS : dedup_token_length=9999
+               prepend_var LSAN_OPTIONS : log_exe_name=1
+               prepend_var LSAN_OPTIONS : log_path=\"$TEST_RESULTS_SAN_FILE\"
+               export LSAN_OPTIONS
        fi
-elif test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+elif test "$GIT_TEST_PASSING_SANITIZE_LEAK" = "check" ||
+     test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+then
+       BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK "GIT_TEST_PASSING_SANITIZE_LEAK=true"
+elif test_bool_env GIT_TEST_SANITIZE_LEAK_LOG false
 then
-       BAIL_OUT "GIT_TEST_PASSING_SANITIZE_LEAK=true has no effect except when compiled with SANITIZE=leak"
+       BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK "GIT_TEST_SANITIZE_LEAK_LOG=true"
 fi
 
 # Last-minute variable setup
@@ -1453,15 +1611,15 @@ remove_trash_directory () {
 
 # Test repository
 remove_trash_directory "$TRASH_DIRECTORY" || {
-       GIT_EXIT_OK=t
-       echo >&5 "FATAL: Cannot prepare test area"
-       exit 1
+       BAIL_OUT 'cannot prepare test area'
 }
 
 remove_trash=t
 if test -z "$TEST_NO_CREATE_REPO"
 then
-       git init "$TRASH_DIRECTORY" >&3 2>&4 ||
+       git init \
+           ${TEST_CREATE_REPO_NO_TEMPLATE:+--template=} \
+           "$TRASH_DIRECTORY" >&3 2>&4 ||
        error "cannot run git init"
 else
        mkdir -p "$TRASH_DIRECTORY"
@@ -1469,7 +1627,7 @@ fi
 
 # Use -P to resolve symlinks in our working directory so that the cwd
 # in subprocesses like git equals our $PWD (for pathname comparisons).
-cd -P "$TRASH_DIRECTORY" || exit 1
+cd -P "$TRASH_DIRECTORY" || BAIL_OUT "cannot cd -P to \"$TRASH_DIRECTORY\""
 
 start_test_output "$0"
 
index 2024c82691fe4a1a1c9b5c8ebd31d6c382fe8bfc..e27048f970ba3a22823a0a7eab870c424d2e3e1d 100644 (file)
  *
  * The possible states of a `tempfile` object are as follows:
  *
- * - Uninitialized. In this state the object's `on_list` field must be
- *   zero but the rest of its contents need not be initialized. As
- *   soon as the object is used in any way, it is irrevocably
- *   registered in `tempfile_list`, and `on_list` is set.
+ * - Inactive/unallocated. The only way to get a tempfile is via a creation
+ *   function like create_tempfile(). Once allocated, the tempfile is on the
+ *   global tempfile_list and considered active.
  *
  * - Active, file open (after `create_tempfile()` or
  *   `reopen_tempfile()`). In this state:
  *
  *   - the temporary file exists
- *   - `active` is set
  *   - `filename` holds the filename of the temporary file
  *   - `fd` holds a file descriptor open for writing to it
  *   - `fp` holds a pointer to an open `FILE` object if and only if
  *   `fd` is -1, and `fp` is `NULL`.
  *
  * - Inactive (after `delete_tempfile()`, `rename_tempfile()`, or a
- *   failed attempt to create a temporary file). In this state:
- *
- *   - `active` is unset
- *   - `filename` is empty (usually, though there are transitory
- *     states in which this condition doesn't hold). Client code should
- *     *not* rely on the filename being empty in this state.
- *   - `fd` is -1 and `fp` is `NULL`
- *   - the object is removed from `tempfile_list` (but could be used again)
+ *   failed attempt to create a temporary file). The struct is removed from
+ *   the global tempfile_list and deallocated.
  *
  * A temporary file is owned by the process that created it. The
  * `tempfile` has an `owner` field that records the owner's PID. This
@@ -59,14 +51,11 @@ static VOLATILE_LIST_HEAD(tempfile_list);
 static void remove_template_directory(struct tempfile *tempfile,
                                      int in_signal_handler)
 {
-       if (tempfile->directorylen > 0 &&
-           tempfile->directorylen < tempfile->filename.len &&
-           tempfile->filename.buf[tempfile->directorylen] == '/') {
-               strbuf_setlen(&tempfile->filename, tempfile->directorylen);
+       if (tempfile->directory) {
                if (in_signal_handler)
-                       rmdir(tempfile->filename.buf);
+                       rmdir(tempfile->directory);
                else
-                       rmdir_or_warn(tempfile->filename.buf);
+                       rmdir_or_warn(tempfile->directory);
        }
 }
 
@@ -89,8 +78,6 @@ static void remove_tempfiles(int in_signal_handler)
                else
                        unlink_or_warn(p->filename.buf);
                remove_template_directory(p, in_signal_handler);
-
-               p->active = 0;
        }
 }
 
@@ -111,11 +98,10 @@ static struct tempfile *new_tempfile(void)
        struct tempfile *tempfile = xmalloc(sizeof(*tempfile));
        tempfile->fd = -1;
        tempfile->fp = NULL;
-       tempfile->active = 0;
        tempfile->owner = 0;
        INIT_LIST_HEAD(&tempfile->list);
        strbuf_init(&tempfile->filename, 0);
-       tempfile->directorylen = 0;
+       tempfile->directory = NULL;
        return tempfile;
 }
 
@@ -123,9 +109,6 @@ static void activate_tempfile(struct tempfile *tempfile)
 {
        static int initialized;
 
-       if (is_tempfile_active(tempfile))
-               BUG("activate_tempfile called for active object");
-
        if (!initialized) {
                sigchain_push_common(remove_tempfiles_on_signal);
                atexit(remove_tempfiles_on_exit);
@@ -134,14 +117,13 @@ static void activate_tempfile(struct tempfile *tempfile)
 
        volatile_list_add(&tempfile->list, &tempfile_list);
        tempfile->owner = getpid();
-       tempfile->active = 1;
 }
 
 static void deactivate_tempfile(struct tempfile *tempfile)
 {
-       tempfile->active = 0;
-       strbuf_release(&tempfile->filename);
        volatile_list_del(&tempfile->list);
+       strbuf_release(&tempfile->filename);
+       free(tempfile->directory);
        free(tempfile);
 }
 
@@ -254,7 +236,7 @@ struct tempfile *mks_tempfile_dt(const char *directory_template,
 
        tempfile = new_tempfile();
        strbuf_swap(&tempfile->filename, &sb);
-       tempfile->directorylen = directorylen;
+       tempfile->directory = xmemdupz(tempfile->filename.buf, directorylen);
        tempfile->fd = fd;
        activate_tempfile(tempfile);
        return tempfile;
index d7804a214abb60ee60496ef34a9fc8be110344ea..d0413af733c81ad895669aab30937435cae0f2af 100644 (file)
 
 struct tempfile {
        volatile struct volatile_list_head list;
-       volatile sig_atomic_t active;
        volatile int fd;
        FILE *volatile fp;
        volatile pid_t owner;
        struct strbuf filename;
-       size_t directorylen;
+       char *directory;
 };
 
 /*
@@ -221,7 +220,7 @@ FILE *fdopen_tempfile(struct tempfile *tempfile, const char *mode);
 
 static inline int is_tempfile_active(struct tempfile *tempfile)
 {
-       return tempfile && tempfile->active;
+       return !!tempfile;
 }
 
 /*
index c5c8cfbbaa065bccfbc058efd9eaffc71507d011..37a3163be12110ef9e830eba534d19064408c581 100644 (file)
@@ -479,9 +479,12 @@ static void fn_param_fl(const char *file, int line, const char *param,
 {
        const char *event_name = "def_param";
        struct json_writer jw = JSON_WRITER_INIT;
+       enum config_scope scope = current_config_scope();
+       const char *scope_name = config_scope_name(scope);
 
        jw_object_begin(&jw, 0);
        event_fmt_prepare(event_name, file, line, NULL, &jw);
+       jw_object_string(&jw, "scope", scope_name);
        jw_object_string(&jw, "param", param);
        jw_object_string(&jw, "value", value);
        jw_end(&jw);
index c42fbade7f0381d4ca80c562f51a7864ceb5449e..69f80330778b40e8293b46ed1a49bdb9feedfeff 100644 (file)
@@ -298,8 +298,11 @@ static void fn_param_fl(const char *file, int line, const char *param,
                        const char *value)
 {
        struct strbuf buf_payload = STRBUF_INIT;
+       enum config_scope scope = current_config_scope();
+       const char *scope_name = config_scope_name(scope);
 
-       strbuf_addf(&buf_payload, "def_param %s=%s", param, value);
+       strbuf_addf(&buf_payload, "def_param scope:%s %s=%s", scope_name, param,
+                   value);
        normal_io_write_fl(file, line, &buf_payload);
        strbuf_release(&buf_payload);
 }
index a1eff8bea3101aab10a2b2c4e2a655f2f498a5df..8cb792488c8feee60df3c0e38301d4008b14c50c 100644 (file)
@@ -441,12 +441,17 @@ static void fn_param_fl(const char *file, int line, const char *param,
 {
        const char *event_name = "def_param";
        struct strbuf buf_payload = STRBUF_INIT;
+       struct strbuf scope_payload = STRBUF_INIT;
+       enum config_scope scope = current_config_scope();
+       const char *scope_name = config_scope_name(scope);
 
        strbuf_addf(&buf_payload, "%s:%s", param, value);
+       strbuf_addf(&scope_payload, "%s:%s", "scope", scope_name);
 
-       perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
-                        &buf_payload);
+       perf_io_write_fl(file, line, event_name, NULL, NULL, NULL,
+                        scope_payload.buf, &buf_payload);
        strbuf_release(&buf_payload);
+       strbuf_release(&scope_payload);
 }
 
 static void fn_repo_fl(const char *file, int line,
index d419c20735e993d5812f396428e7727533d56768..0fd5b142a377056d1e45f8dfd1ff6ae7a9c40097 100644 (file)
--- a/trailer.c
+++ b/trailer.c
@@ -478,7 +478,8 @@ static struct {
        { "ifmissing", TRAILER_IF_MISSING }
 };
 
-static int git_trailer_default_config(const char *conf_key, const char *value, void *cb)
+static int git_trailer_default_config(const char *conf_key, const char *value,
+                                     void *cb UNUSED)
 {
        const char *trailer_item, *variable_name;
 
@@ -509,7 +510,8 @@ static int git_trailer_default_config(const char *conf_key, const char *value, v
        return 0;
 }
 
-static int git_trailer_config(const char *conf_key, const char *value, void *cb)
+static int git_trailer_config(const char *conf_key, const char *value,
+                             void *cb UNUSED)
 {
        const char *trailer_item, *variable_name;
        struct arg_item *item;
index b51e991e4439467c3cb2fc15381c259e135f5139..1687ad7e2c66c23b6c488ac7d82117828d23dfc9 100644 (file)
@@ -142,7 +142,7 @@ static void get_refs_from_bundle_inner(struct transport *transport)
 
 static struct ref *get_refs_from_bundle(struct transport *transport,
                                        int for_push,
-                                       struct transport_ls_refs_options *transport_options)
+                                       struct transport_ls_refs_options *transport_options UNUSED)
 {
        struct bundle_transport_data *data = transport->data;
        struct ref *result = NULL;
@@ -386,7 +386,8 @@ static int fetch_refs_via_pack(struct transport *transport,
        args.cloning = transport->cloning;
        args.update_shallow = data->options.update_shallow;
        args.from_promisor = data->options.from_promisor;
-       args.filter_options = data->options.filter_options;
+       list_objects_filter_copy(&args.filter_options,
+                                &data->options.filter_options);
        args.refetch = data->options.refetch;
        args.stateless_rpc = transport->stateless_rpc;
        args.server_options = transport->server_options;
@@ -453,6 +454,7 @@ cleanup:
 
        free_refs(refs_tmp);
        free_refs(refs);
+       list_objects_filter_release(&args.filter_options);
        return ret;
 }
 
@@ -893,6 +895,7 @@ static int disconnect_git(struct transport *transport)
                finish_connect(data->conn);
        }
 
+       list_objects_filter_release(&data->options.filter_options);
        free(data);
        return 0;
 }
index 90b92114be8558b63371531aecfa246829317840..bae812156c4fedb8e296e422d4cb6a8af1a2dada 100644 (file)
@@ -1423,7 +1423,7 @@ static void debug_unpack_callback(int n,
  * from the tree walk at the given traverse_info.
  */
 static int is_sparse_directory_entry(struct cache_entry *ce,
-                                    struct name_entry *name,
+                                    const struct name_entry *name,
                                     struct traverse_info *info)
 {
        if (!ce || !name || !S_ISSPARSEDIR(ce->ce_mode))
@@ -1562,7 +1562,7 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
                        }
                }
 
-               if (!is_sparse_directory_entry(src[0], names, info) &&
+               if (!is_sparse_directory_entry(src[0], p, info) &&
                    !is_new_sparse_dir &&
                    traverse_trees_recursive(n, dirmask, mask & ~dirmask,
                                                    names, info) < 0) {
index 3a851b360663a56bc2ad0d7beed0cc566d581546..abf2c11cfee5204c00266480a8437b37238c1db1 100644 (file)
@@ -455,6 +455,7 @@ static void create_pack_file(struct upload_pack_data *pack_data,
        return;
 
  fail:
+       free(output_state);
        send_client_data(3, abort_msg, sizeof(abort_msg),
                         pack_data->use_sideband);
        die("git upload-pack: %s", abort_msg);
@@ -1169,7 +1170,7 @@ static int mark_our_ref(const char *refname, const char *refname_full,
 }
 
 static int check_ref(const char *refname_full, const struct object_id *oid,
-                    int flag, void *cb_data)
+                    int flag UNUSED, void *cb_data UNUSED)
 {
        const char *refname = strip_namespace(refname_full);
 
@@ -1193,7 +1194,7 @@ static void format_session_id(struct strbuf *buf, struct upload_pack_data *d) {
 }
 
 static int send_ref(const char *refname, const struct object_id *oid,
-                   int flag, void *cb_data)
+                   int flag UNUSED, void *cb_data)
 {
        static const char *capabilities = "multi_ack thin-pack side-band"
                " side-band-64k ofs-delta shallow deepen-since deepen-not"
@@ -1235,7 +1236,8 @@ static int send_ref(const char *refname, const struct object_id *oid,
        return 0;
 }
 
-static int find_symref(const char *refname, const struct object_id *oid,
+static int find_symref(const char *refname,
+                      const struct object_id *oid UNUSED,
                       int flag, void *cb_data)
 {
        const char *symref_target;
@@ -1321,18 +1323,27 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data)
                data->advertise_sid = git_config_bool(var, value);
        }
 
-       if (current_config_scope() != CONFIG_SCOPE_LOCAL &&
-           current_config_scope() != CONFIG_SCOPE_WORKTREE) {
-               if (!strcmp("uploadpack.packobjectshook", var))
-                       return git_config_string(&data->pack_objects_hook, var, value);
-       }
-
        if (parse_object_filter_config(var, value, data) < 0)
                return -1;
 
        return parse_hide_refs_config(var, value, "uploadpack");
 }
 
+static int upload_pack_protected_config(const char *var, const char *value, void *cb_data)
+{
+       struct upload_pack_data *data = cb_data;
+
+       if (!strcmp("uploadpack.packobjectshook", var))
+               return git_config_string(&data->pack_objects_hook, var, value);
+       return 0;
+}
+
+static void get_upload_pack_config(struct upload_pack_data *data)
+{
+       git_config(upload_pack_config, data);
+       git_protected_config(upload_pack_protected_config, data);
+}
+
 void upload_pack(const int advertise_refs, const int stateless_rpc,
                 const int timeout)
 {
@@ -1340,8 +1351,7 @@ void upload_pack(const int advertise_refs, const int stateless_rpc,
        struct upload_pack_data data;
 
        upload_pack_data_init(&data);
-
-       git_config(upload_pack_config, &data);
+       get_upload_pack_config(&data);
 
        data.stateless_rpc = stateless_rpc;
        data.timeout = timeout;
@@ -1400,18 +1410,14 @@ static int parse_want(struct packet_writer *writer, const char *line,
        const char *arg;
        if (skip_prefix(line, "want ", &arg)) {
                struct object_id oid;
-               struct commit *commit;
                struct object *o;
 
                if (get_oid_hex(arg, &oid))
                        die("git upload-pack: protocol error, "
                            "expected to get oid, not '%s'", line);
 
-               commit = lookup_commit_in_graph(the_repository, &oid);
-               if (commit)
-                       o = &commit->object;
-               else
-                       o = parse_object(the_repository, &oid);
+               o = parse_object_with_flags(the_repository, &oid,
+                                           PARSE_OBJECT_SKIP_HASH_CHECK);
 
                if (!o) {
                        packet_writer_error(writer,
@@ -1695,8 +1701,7 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request)
 
        upload_pack_data_init(&data);
        data.use_sideband = LARGE_PACKET_MAX;
-
-       git_config(upload_pack_config, &data);
+       get_upload_pack_config(&data);
 
        while (state != FETCH_DONE) {
                switch (state) {
index c5e292197956487c2a44f84f7839126d3a796cfc..99d0e0eae047410660f0bf3b0d3f487d45c1134d 100644 (file)
--- a/walker.c
+++ b/walker.c
@@ -215,8 +215,10 @@ static int interpret_target(struct walker *walker, char *target, struct object_i
        return -1;
 }
 
-static int mark_complete(const char *path, const struct object_id *oid,
-                        int flag, void *cb_data)
+static int mark_complete(const char *path UNUSED,
+                        const struct object_id *oid,
+                        int flag UNUSED,
+                        void *cb_data UNUSED)
 {
        struct commit *commit = lookup_commit_reference_gently(the_repository,
                                                               oid, 1);
index 1c3c970080b0e00169802b8de4d3f70b709b44ad..299d6489a6b0a148b57fa6c8a11f9245e1dfd0dd 100644 (file)
--- a/wrapper.c
+++ b/wrapper.c
@@ -161,28 +161,6 @@ void xsetenv(const char *name, const char *value, int overwrite)
                die_errno(_("could not setenv '%s'"), name ? name : "(null)");
 }
 
-/*
- * Limit size of IO chunks, because huge chunks only cause pain.  OS X
- * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
- * the absence of bugs, large chunks can result in bad latencies when
- * you decide to kill the process.
- *
- * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX
- * that is smaller than that, clip it to SSIZE_MAX, as a call to
- * read(2) or write(2) larger than that is allowed to fail.  As the last
- * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value"
- * to override this, if the definition of SSIZE_MAX given by the platform
- * is broken.
- */
-#ifndef MAX_IO_SIZE
-# define MAX_IO_SIZE_DEFAULT (8*1024*1024)
-# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT)
-#  define MAX_IO_SIZE SSIZE_MAX
-# else
-#  define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT
-# endif
-#endif
-
 /**
  * xopen() is the same as open(), but it die()s if the open() fails.
  */
@@ -616,10 +594,16 @@ int git_fsync(int fd, enum fsync_action action)
        }
 }
 
+static void log_trace_fsync_if(const char *key, intmax_t value)
+{
+       if (value)
+               trace2_data_intmax("fsync", the_repository, key, value);
+}
+
 void trace_git_fsync_stats(void)
 {
-       trace2_data_intmax("fsync", the_repository, "fsync/writeout-only", count_fsync_writeout_only);
-       trace2_data_intmax("fsync", the_repository, "fsync/hardware-flush", count_fsync_hardware_flush);
+       log_trace_fsync_if("fsync/writeout-only", count_fsync_writeout_only);
+       log_trace_fsync_if("fsync/hardware-flush", count_fsync_hardware_flush);
 }
 
 static int warn_if_unremovable(const char *op, const char *file, int rc)
index 867e3e417e9d4d26036a15a2034018529ce6dec7..5813174896cc9ae5fa9287c8bcc432dc8cf2ef47 100644 (file)
@@ -947,9 +947,11 @@ static void wt_longstatus_print_changed(struct wt_status *s)
        wt_longstatus_print_trailer(s);
 }
 
-static int stash_count_refs(struct object_id *ooid, struct object_id *noid,
-                           const char *email, timestamp_t timestamp, int tz,
-                           const char *message, void *cb_data)
+static int stash_count_refs(struct object_id *ooid UNUSED,
+                           struct object_id *noid UNUSED,
+                           const char *email UNUSED,
+                           timestamp_t timestamp UNUSED, int tz UNUSED,
+                           const char *message UNUSED, void *cb_data)
 {
        int *c = cb_data;
        (*c)++;
@@ -1612,8 +1614,10 @@ struct grab_1st_switch_cbdata {
        struct object_id noid;
 };
 
-static int grab_1st_switch(struct object_id *ooid, struct object_id *noid,
-                          const char *email, timestamp_t timestamp, int tz,
+static int grab_1st_switch(struct object_id *ooid UNUSED,
+                          struct object_id *noid,
+                          const char *email UNUSED,
+                          timestamp_t timestamp UNUSED, int tz UNUSED,
                           const char *message, void *cb_data)
 {
        struct grab_1st_switch_cbdata *cb = cb_data;
index 72e25a9ffa56fbeebefbd2e9904b3957d0a8610d..bb56b23f34c96ceec169d39b829e89cd512d7e39 100644 (file)
@@ -120,6 +120,7 @@ typedef struct s_bdiffparam {
 
 
 #define xdl_malloc(x) xmalloc(x)
+#define xdl_calloc(n, sz) xcalloc(n, sz)
 #define xdl_free(ptr) free(ptr)
 #define xdl_realloc(ptr,x) xrealloc(ptr,x)
 
index 758410c11ac286adc77c6f992e51822def202a40..32652ded2d7c5cad87f120a13c3c6c84d6def61f 100644 (file)
@@ -321,12 +321,12 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
                return -1;
 
        if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) {
-               res = xdl_do_patience_diff(mf1, mf2, xpp, xe);
+               res = xdl_do_patience_diff(xpp, xe);
                goto out;
        }
 
        if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) {
-               res = xdl_do_histogram_diff(mf1, mf2, xpp, xe);
+               res = xdl_do_histogram_diff(xpp, xe);
                goto out;
        }
 
@@ -337,7 +337,7 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
         * One is to store the forward path and one to store the backward path.
         */
        ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
-       if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
+       if (!XDL_ALLOC_ARRAY(kvd, 2 * ndiags + 2)) {
 
                xdl_free_env(xe);
                return -1;
index 8f1c7c8b0445f88514d0cb0ce868380d1711ccd2..126c9d8ff4e4147e0f4e96ddb9debfde17d9b637 100644 (file)
@@ -56,9 +56,7 @@ int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr);
 void xdl_free_script(xdchange_t *xscr);
 int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
                  xdemitconf_t const *xecfg);
-int xdl_do_patience_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
-               xdfenv_t *env);
-int xdl_do_histogram_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
-               xdfenv_t *env);
+int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env);
+int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env);
 
 #endif /* #if !defined(XDIFFI_H) */
index 01decffc332629dd9dcfd79c904187b7cc6d0943..16a8fe2f3f3df3fe52fee0ed6d527682a61a0dc8 100644 (file)
@@ -251,7 +251,7 @@ static int find_lcs(xpparam_t const *xpp, xdfenv_t *env,
                    int line1, int count1, int line2, int count2)
 {
        int b_ptr;
-       int sz, ret = -1;
+       int ret = -1;
        struct histindex index;
 
        memset(&index, 0, sizeof(index));
@@ -265,23 +265,16 @@ static int find_lcs(xpparam_t const *xpp, xdfenv_t *env,
        index.rcha.head = NULL;
 
        index.table_bits = xdl_hashbits(count1);
-       sz = index.records_size = 1 << index.table_bits;
-       sz *= sizeof(struct record *);
-       if (!(index.records = (struct record **) xdl_malloc(sz)))
+       index.records_size = 1 << index.table_bits;
+       if (!XDL_CALLOC_ARRAY(index.records, index.records_size))
                goto cleanup;
-       memset(index.records, 0, sz);
 
-       sz = index.line_map_size = count1;
-       sz *= sizeof(struct record *);
-       if (!(index.line_map = (struct record **) xdl_malloc(sz)))
+       index.line_map_size = count1;
+       if (!XDL_CALLOC_ARRAY(index.line_map, index.line_map_size))
                goto cleanup;
-       memset(index.line_map, 0, sz);
 
-       sz = index.line_map_size;
-       sz *= sizeof(unsigned int);
-       if (!(index.next_ptrs = (unsigned int *) xdl_malloc(sz)))
+       if (!XDL_CALLOC_ARRAY(index.next_ptrs, index.line_map_size))
                goto cleanup;
-       memset(index.next_ptrs, 0, sz);
 
        /* lines / 4 + 1 comes from xprepare.c:xdl_prepare_ctx() */
        if (xdl_cha_init(&index.rcha, sizeof(struct record), count1 / 4 + 1) < 0)
@@ -369,8 +362,7 @@ out:
        return result;
 }
 
-int xdl_do_histogram_diff(mmfile_t *file1, mmfile_t *file2,
-       xpparam_t const *xpp, xdfenv_t *env)
+int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env)
 {
        return histogram_diff(xpp, env,
                env->xdf1.dstart + 1, env->xdf1.dend - env->xdf1.dstart + 1,
index ae4636c2477cc640eae84578805d9722d5e28d1b..8487bb396faa5c6ac984e8295b737b4fbab3f92c 100644 (file)
@@ -49,5 +49,23 @@ do { \
                ((unsigned long) __p[2]) << 16 | ((unsigned long) __p[3]) << 24; \
 } while (0)
 
+/* Allocate an array of nr elements, returns NULL on failure */
+#define XDL_ALLOC_ARRAY(p, nr)                         \
+       ((p) = SIZE_MAX / sizeof(*(p)) >= (size_t)(nr)  \
+               ? xdl_malloc((nr) * sizeof(*(p)))       \
+               : NULL)
+
+/* Allocate an array of nr zeroed out elements, returns NULL on failure */
+#define XDL_CALLOC_ARRAY(p, nr)        ((p) = xdl_calloc(nr, sizeof(*(p))))
+
+/*
+ * Ensure array p can accommodate at least nr elements, growing the
+ * array and updating alloc (which is the number of allocated
+ * elements) as necessary. Frees p and returns -1 on failure, returns
+ * 0 on success
+ */
+#define XDL_ALLOC_GROW(p, nr, alloc)   \
+       (-!((nr) <= (alloc) ||          \
+           ((p) = xdl_alloc_grow_helper((p), (nr), &(alloc), sizeof(*(p))))))
 
 #endif /* #if !defined(XMACROS_H) */
index 1a21c6a74b368cb094e20c708a43071c72558d7e..a2d8955537f566458b6e912ff5fb8496fa7fe85a 100644 (file)
@@ -69,7 +69,6 @@ struct hashmap {
        } *entries, *first, *last;
        /* were common records found? */
        unsigned long has_matches;
-       mmfile_t *file1, *file2;
        xdfenv_t *env;
        xpparam_t const *xpp;
 };
@@ -139,23 +138,17 @@ static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map,
  *
  * It is assumed that env has been prepared using xdl_prepare().
  */
-static int fill_hashmap(mmfile_t *file1, mmfile_t *file2,
-               xpparam_t const *xpp, xdfenv_t *env,
+static int fill_hashmap(xpparam_t const *xpp, xdfenv_t *env,
                struct hashmap *result,
                int line1, int count1, int line2, int count2)
 {
-       result->file1 = file1;
-       result->file2 = file2;
        result->xpp = xpp;
        result->env = env;
 
        /* We know exactly how large we want the hash map */
        result->alloc = count1 * 2;
-       result->entries = (struct entry *)
-               xdl_malloc(result->alloc * sizeof(struct entry));
-       if (!result->entries)
+       if (!XDL_CALLOC_ARRAY(result->entries, result->alloc))
                return -1;
-       memset(result->entries, 0, result->alloc * sizeof(struct entry));
 
        /* First, fill with entries from the first file */
        while (count1--)
@@ -200,7 +193,7 @@ static int binary_search(struct entry **sequence, int longest,
  */
 static int find_longest_common_sequence(struct hashmap *map, struct entry **res)
 {
-       struct entry **sequence = xdl_malloc(map->nr * sizeof(struct entry *));
+       struct entry **sequence;
        int longest = 0, i;
        struct entry *entry;
 
@@ -211,7 +204,7 @@ static int find_longest_common_sequence(struct hashmap *map, struct entry **res)
         */
        int anchor_i = -1;
 
-       if (!sequence)
+       if (!XDL_ALLOC_ARRAY(sequence, map->nr))
                return -1;
 
        for (entry = map->first; entry; entry = entry->next) {
@@ -257,8 +250,7 @@ static int match(struct hashmap *map, int line1, int line2)
        return record1->ha == record2->ha;
 }
 
-static int patience_diff(mmfile_t *file1, mmfile_t *file2,
-               xpparam_t const *xpp, xdfenv_t *env,
+static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
                int line1, int count1, int line2, int count2);
 
 static int walk_common_sequence(struct hashmap *map, struct entry *first,
@@ -289,8 +281,7 @@ static int walk_common_sequence(struct hashmap *map, struct entry *first,
 
                /* Recurse */
                if (next1 > line1 || next2 > line2) {
-                       if (patience_diff(map->file1, map->file2,
-                                       map->xpp, map->env,
+                       if (patience_diff(map->xpp, map->env,
                                        line1, next1 - line1,
                                        line2, next2 - line2))
                                return -1;
@@ -329,8 +320,7 @@ static int fall_back_to_classic_diff(struct hashmap *map,
  *
  * This function assumes that env was prepared with xdl_prepare_env().
  */
-static int patience_diff(mmfile_t *file1, mmfile_t *file2,
-               xpparam_t const *xpp, xdfenv_t *env,
+static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
                int line1, int count1, int line2, int count2)
 {
        struct hashmap map;
@@ -349,7 +339,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2,
        }
 
        memset(&map, 0, sizeof(map));
-       if (fill_hashmap(file1, file2, xpp, env, &map,
+       if (fill_hashmap(xpp, env, &map,
                        line1, count1, line2, count2))
                return -1;
 
@@ -377,9 +367,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2,
        return result;
 }
 
-int xdl_do_patience_diff(mmfile_t *file1, mmfile_t *file2,
-               xpparam_t const *xpp, xdfenv_t *env)
+int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env)
 {
-       return patience_diff(file1, file2, xpp, env,
-                       1, env->xdf1.nrec, 1, env->xdf2.nrec);
+       return patience_diff(xpp, env, 1, env->xdf1.nrec, 1, env->xdf2.nrec);
 }
index 105752758f2f3870448f1ec1cf0070c3a29b36e6..c84549f6c5089ea08c7bc1daad3ef57dd3fceb77 100644 (file)
@@ -78,15 +78,14 @@ static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) {
 
                return -1;
        }
-       if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) {
+       if (!XDL_CALLOC_ARRAY(cf->rchash, cf->hsize)) {
 
                xdl_cha_free(&cf->ncha);
                return -1;
        }
-       memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *));
 
        cf->alloc = size;
-       if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) {
+       if (!XDL_ALLOC_ARRAY(cf->rcrecs, cf->alloc)) {
 
                xdl_free(cf->rchash);
                xdl_cha_free(&cf->ncha);
@@ -112,7 +111,6 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t
        long hi;
        char const *line;
        xdlclass_t *rcrec;
-       xdlclass_t **rcrecs;
 
        line = rec->ptr;
        hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
@@ -128,14 +126,8 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t
                        return -1;
                }
                rcrec->idx = cf->count++;
-               if (cf->count > cf->alloc) {
-                       cf->alloc *= 2;
-                       if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) {
-
+               if (XDL_ALLOC_GROW(cf->rcrecs, cf->count, cf->alloc))
                                return -1;
-                       }
-                       cf->rcrecs = rcrecs;
-               }
                cf->rcrecs[rcrec->idx] = rcrec;
                rcrec->line = line;
                rcrec->size = rec->size;
@@ -164,7 +156,7 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_
        unsigned long hav;
        char const *blk, *cur, *top, *prev;
        xrecord_t *crec;
-       xrecord_t **recs, **rrecs;
+       xrecord_t **recs;
        xrecord_t **rhash;
        unsigned long *ha;
        char *rchg;
@@ -178,26 +170,21 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_
 
        if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
                goto abort;
-       if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *))))
+       if (!XDL_ALLOC_ARRAY(recs, narec))
                goto abort;
 
        hbits = xdl_hashbits((unsigned int) narec);
        hsize = 1 << hbits;
-       if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *))))
+       if (!XDL_CALLOC_ARRAY(rhash, hsize))
                goto abort;
-       memset(rhash, 0, hsize * sizeof(xrecord_t *));
 
        nrec = 0;
        if ((cur = blk = xdl_mmfile_first(mf, &bsize))) {
                for (top = blk + bsize; cur < top; ) {
                        prev = cur;
                        hav = xdl_hash_record(&cur, top, xpp->flags);
-                       if (nrec >= narec) {
-                               narec *= 2;
-                               if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *))))
-                                       goto abort;
-                               recs = rrecs;
-                       }
+                       if (XDL_ALLOC_GROW(recs, nrec + 1, narec))
+                               goto abort;
                        if (!(crec = xdl_cha_alloc(&xdf->rcha)))
                                goto abort;
                        crec->ptr = prev;
@@ -209,15 +196,14 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_
                }
        }
 
-       if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char))))
+       if (!XDL_CALLOC_ARRAY(rchg, nrec + 2))
                goto abort;
-       memset(rchg, 0, (nrec + 2) * sizeof(char));
 
        if ((XDF_DIFF_ALG(xpp->flags) != XDF_PATIENCE_DIFF) &&
            (XDF_DIFF_ALG(xpp->flags) != XDF_HISTOGRAM_DIFF)) {
-               if (!(rindex = xdl_malloc((nrec + 1) * sizeof(*rindex))))
+               if (!XDL_ALLOC_ARRAY(rindex, nrec + 1))
                        goto abort;
-               if (!(ha = xdl_malloc((nrec + 1) * sizeof(*ha))))
+               if (!XDL_ALLOC_ARRAY(ha, nrec + 1))
                        goto abort;
        }
 
@@ -383,11 +369,8 @@ static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xd
        xdlclass_t *rcrec;
        char *dis, *dis1, *dis2;
 
-       if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) {
-
+       if (!XDL_CALLOC_ARRAY(dis, xdf1->nrec + xdf2->nrec + 2))
                return -1;
-       }
-       memset(dis, 0, xdf1->nrec + xdf2->nrec + 2);
        dis1 = dis;
        dis2 = dis1 + xdf1->nrec + 1;
 
index 115b2b1640b4504d1b7eb1bc4dc1428b109f6380..9e36f24875d20711b61d243994f324d00a1b211e 100644 (file)
@@ -432,3 +432,20 @@ int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp,
 
        return 0;
 }
+
+void* xdl_alloc_grow_helper(void *p, long nr, long *alloc, size_t size)
+{
+       void *tmp = NULL;
+       size_t n = ((LONG_MAX - 16) / 2 >= *alloc) ? 2 * *alloc + 16 : LONG_MAX;
+       if (nr > n)
+               n = nr;
+       if (SIZE_MAX / size >= n)
+               tmp = xdl_realloc(p, n * size);
+       if (tmp) {
+               *alloc = n;
+       } else {
+               xdl_free(p);
+               *alloc = 0;
+       }
+       return tmp;
+}
index fba7bae03c7855ca90aff3f238321581a91a6676..fd0bba94e8b4d2442ba59d0a4327d2d53e10210a 100644 (file)
@@ -42,6 +42,7 @@ int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2,
 int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp,
                       int line1, int count1, int line2, int count2);
 
-
+/* Do not call this function, use XDL_ALLOC_GROW instead */
+void* xdl_alloc_grow_helper(void* p, long nr, long* alloc, size_t size);
 
 #endif /* #if !defined(XUTILS_H) */