]> git.ipfire.org Git - thirdparty/git.git/commitdiff
Merge branch 'jc/local-extern-shell-rules'
authorJunio C Hamano <gitster@pobox.com>
Tue, 16 Apr 2024 21:50:27 +0000 (14:50 -0700)
committerJunio C Hamano <gitster@pobox.com>
Tue, 16 Apr 2024 21:50:27 +0000 (14:50 -0700)
Document and apply workaround for a buggy version of dash that
mishandles "local var=val" construct.

* jc/local-extern-shell-rules:
  t1016: local VAR="VAL" fix
  t0610: local VAR="VAL" fix
  t: teach lint that RHS of 'local VAR=VAL' needs to be quoted
  t: local VAR="VAL" (quote ${magic-reference})
  t: local VAR="VAL" (quote command substitution)
  t: local VAR="VAL" (quote positional parameters)
  CodingGuidelines: quote assigned value in 'local var=$val'
  CodingGuidelines: describe "export VAR=VAL" rule

84 files changed:
.editorconfig
Documentation/CodingGuidelines
Documentation/MyFirstObjectWalk.txt
Documentation/RelNotes/2.45.0.txt
Documentation/config/clone.txt
Documentation/config/core.txt
Documentation/config/init.txt
Documentation/git-add.txt
Documentation/git-clone.txt
Documentation/git-init.txt
Documentation/git-pack-refs.txt
Documentation/git-update-ref.txt
Documentation/githooks.txt
Documentation/urls.txt
INSTALL
Makefile
add-patch.c
advice.c
apply.c
builtin/add.c
builtin/checkout.c
builtin/commit.c
builtin/credential-cache--daemon.c
builtin/credential-cache.c
builtin/fetch.c
builtin/gc.c
builtin/merge-tree.c
builtin/pack-refs.c
builtin/update-ref.c
compat/mingw.c
compat/mingw.h
config.c
config.mak.uname
contrib/completion/git-prompt.sh
contrib/vscode/init.sh
diff-lib.c
git-compat-util.h
git-curl-compat.h
http.c
imap-send.c
mem-pool.c
midx-write.c [new file with mode: 0644]
midx.c
midx.h
path.c
path.h
read-cache-ll.h
read-cache.c
refs.h
refs/reftable-backend.c
reftable/basics.c
reftable/basics.h
reftable/basics_test.c
reftable/block.c
reftable/error.c
reftable/record.c
reftable/record.h
reftable/refname.c
reftable/reftable-error.h
reftable/stack.c
reftable/stack_test.c
remote-curl.c
revision.h
t/helper/test-ref-store.c
t/oid-info/hash-info
t/t0301-credential-cache.sh
t/t0450-txt-doc-vs-help.sh
t/t0601-reffiles-pack-refs.sh
t/t0610-reftable-basics.sh
t/t1400-update-ref.sh
t/t2020-checkout-detach.sh
t/t2104-update-index-skip-worktree.sh
t/t2200-add-update.sh
t/t3200-branch.sh
t/t3700-add.sh
t/t3701-add-interactive.sh
t/t4126-apply-empty.sh
t/t6500-gc.sh
t/t7004-tag.sh
t/t7300-clean.sh
t/t7400-submodule-basic.sh
t/t7501-commit-basic-functionality.sh
t/test-lib.sh
t/unit-tests/t-prio-queue.c

index f9d819623d832113014dd5d5366e8ee44ac9666a..15d6cbeab109efadb786b7e0d63fcfbe8eb79ee8 100644 (file)
@@ -4,7 +4,7 @@ insert_final_newline = true
 
 # The settings for C (*.c and *.h) files are mirrored in .clang-format.  Keep
 # them in sync.
-[*.{c,h,sh,perl,pl,pm,txt}]
+[{*.{c,h,sh,perl,pl,pm,txt},config.mak.*,Makefile}]
 indent_style = tab
 tab_width = 8
 
index 30bf2904183570bc1899c77e6d6569052122b2fb..1d92b2da03e8ca4f6f562ed55e2a6d9199b592c3 100644 (file)
@@ -657,15 +657,15 @@ Writing Documentation:
   - Prefer succinctness and matter-of-factly describing functionality
     in the abstract.  E.g.
 
-     --short:: Emit output in the short-format.
+     `--short`:: Emit output in the short-format.
 
     and avoid something like these overly verbose alternatives:
 
-     --short:: Use this to emit output in the short-format.
-     --short:: You can use this to get output in the short-format.
-     --short:: A user who prefers shorter output could....
-     --short:: Should a person and/or program want shorter output, he
-               she/they/it can...
+     `--short`:: Use this to emit output in the short-format.
+     `--short`:: You can use this to get output in the short-format.
+     `--short`:: A user who prefers shorter output could....
+     `--short`:: Should a person and/or program want shorter output, he
+                 she/they/it can...
 
     This practice often eliminates the need to involve human actors in
     your description, but it is a good practice regardless of the
@@ -675,12 +675,12 @@ Writing Documentation:
     addressing the hypothetical user, and possibly "we" when
     discussing how the program might react to the user.  E.g.
 
-      You can use this option instead of --xyz, but we might remove
+      You can use this option instead of `--xyz`, but we might remove
       support for it in future versions.
 
     while keeping in mind that you can probably be less verbose, e.g.
 
-      Use this instead of --xyz. This option might be removed in future
+      Use this instead of `--xyz`. This option might be removed in future
       versions.
 
   - If you still need to refer to an example person that is
@@ -698,68 +698,118 @@ Writing Documentation:
  The same general rule as for code applies -- imitate the existing
  conventions.
 
- A few commented examples follow to provide reference when writing or
- modifying command usage strings and synopsis sections in the manual
- pages:
 
- Placeholders are spelled in lowercase and enclosed in angle brackets:
-   <file>
-   --sort=<key>
-   --abbrev[=<n>]
+Markup:
+
+ Literal parts (e.g. use of command-line options, command names,
+ branch names, URLs, pathnames (files and directories), configuration and
+ environment variables) must be typeset as verbatim (i.e. wrapped with
+ backticks):
+   `--pretty=oneline`
+   `git rev-list`
+   `remote.pushDefault`
+   `http://git.example.com`
+   `.git/config`
+   `GIT_DIR`
+   `HEAD`
+   `umask`(2)
+
+ An environment variable must be prefixed with "$" only when referring to its
+ value and not when referring to the variable itself, in this case there is
+ nothing to add except the backticks:
+   `GIT_DIR` is specified
+   `$GIT_DIR/hooks/pre-receive`
+
+ Word phrases enclosed in `backtick characters` are rendered literally
+ and will not be further expanded. The use of `backticks` to achieve the
+ previous rule means that literal examples should not use AsciiDoc
+ escapes.
+   Correct:
+      `--pretty=oneline`
+   Incorrect:
+      `\--pretty=oneline`
+
+ Placeholders are spelled in lowercase and enclosed in
+ angle brackets surrounded by underscores:
+   _<file>_
+   _<commit>_
 
  If a placeholder has multiple words, they are separated by dashes:
-   <new-branch-name>
-   --template=<template-directory>
+   _<new-branch-name>_
+   _<template-directory>_
+
+ A placeholder is not enclosed in backticks, as it is not a literal.
+
+ When needed, use a distinctive identifier for placeholders, usually
+ made of a qualification and a type:
+   _<git-dir>_
+   _<key-id>_
+
+ When literal and placeholders are mixed, each markup is applied for
+ each sub-entity. If they are stuck, a special markup, called
+ unconstrained formatting is required.
+ Unconstrained formating for placeholders is __<like-this>__
+ Unconstrained formatting for literal formatting is ++like this++
+   `--jobs` _<n>_
+   ++--sort=++__<key>__
+   __<directory>__++/.git++
+   ++remote.++__<name>__++.mirror++
+
+ caveat: ++ unconstrained format is not verbatim and may expand
+ content. Use Asciidoc escapes inside them.
 
- When a placeholder is cited in text paragraph, it is enclosed in angle
- brackets to remind the reader the reference in the synopsis section.
- For better visibility, the placeholder is typeset in italics:
-   The _<file>_ to be added.
+Synopsis Syntax
+
+ Syntax grammar is formatted neither as literal nor as placeholder.
+
+ A few commented examples follow to provide reference when writing or
+ modifying command usage strings and synopsis sections in the manual
+ pages:
 
  Possibility of multiple occurrences is indicated by three dots:
-   <file>...
+   _<file>_...
    (One or more of <file>.)
 
  Optional parts are enclosed in square brackets:
-   [<file>...]
+   [_<file>_...]
    (Zero or more of <file>.)
 
-   --exec-path[=<path>]
+   ++--exec-path++[++=++__<path>__]
    (Option with an optional argument.  Note that the "=" is inside the
    brackets.)
 
-   [<patch>...]
+   [_<patch>_...]
    (Zero or more of <patch>.  Note that the dots are inside, not
    outside the brackets.)
 
  Multiple alternatives are indicated with vertical bars:
-   [-q | --quiet]
-   [--utf8 | --no-utf8]
+   [`-q` | `--quiet`]
+   [`--utf8` | `--no-utf8`]
 
  Use spacing around "|" token(s), but not immediately after opening or
  before closing a [] or () pair:
-   Do: [-q | --quiet]
-   Don't: [-q|--quiet]
+   Do: [`-q` | `--quiet`]
+   Don't: [`-q`|`--quiet`]
 
  Don't use spacing around "|" tokens when they're used to separate the
  alternate arguments of an option:
-    Do: --track[=(direct|inherit)]
-    Don't: --track[=(direct | inherit)]
+    Do: ++--track++[++=++(`direct`|`inherit`)]`
+    Don't: ++--track++[++=++(`direct` | `inherit`)]
 
  Parentheses are used for grouping:
-   [(<rev> | <range>)...]
+   [(_<rev>_ | _<range>_)...]
    (Any number of either <rev> or <range>.  Parens are needed to make
    it clear that "..." pertains to both <rev> and <range>.)
 
-   [(-p <parent>)...]
+   [(`-p` _<parent>_)...]
    (Any number of option -p, each with one <parent> argument.)
 
-   git remote set-head <name> (-a | -d | <branch>)
+   `git remote set-head` _<name>_ (`-a` | `-d` | _<branch>_)
    (One and only one of "-a", "-d" or "<branch>" _must_ (no square
    brackets) be provided.)
 
  And a somewhat more contrived example:
-   --diff-filter=[(A|C|D|M|R|T|U|X|B)...[*]]
+   `--diff-filter=[(A|C|D|M|R|T|U|X|B)...[*]]`
    Here "=" is outside the brackets, because "--diff-filter=" is a
    valid usage.  "*" has its own pair of brackets, because it can
    (optionally) be specified only when one or more of the letters is
@@ -770,39 +820,6 @@ Writing Documentation:
    the user would type into a shell and use 'Git' (uppercase first letter)
    when talking about the version control system and its properties.
 
- A few commented examples follow to provide reference when writing or
- modifying paragraphs or option/command explanations that contain options
- or commands:
-
- Literal examples (e.g. use of command-line options, command names,
- branch names, URLs, pathnames (files and directories), configuration and
- environment variables) must be typeset in monospace (i.e. wrapped with
- backticks):
-   `--pretty=oneline`
-   `git rev-list`
-   `remote.pushDefault`
-   `http://git.example.com`
-   `.git/config`
-   `GIT_DIR`
-   `HEAD`
-
- An environment variable must be prefixed with "$" only when referring to its
- value and not when referring to the variable itself, in this case there is
- nothing to add except the backticks:
-   `GIT_DIR` is specified
-   `$GIT_DIR/hooks/pre-receive`
-
- Word phrases enclosed in `backtick characters` are rendered literally
- and will not be further expanded. The use of `backticks` to achieve the
- previous rule means that literal examples should not use AsciiDoc
- escapes.
-   Correct:
-      `--pretty=oneline`
-   Incorrect:
-      `\--pretty=oneline`
-
-A placeholder is not enclosed in backticks, as it is not a literal.
-
  If some place in the documentation needs to typeset a command usage
  example with inline substitutions, it is fine to use +monospaced and
  inline substituted text+ instead of `monospaced literal text`, and with
index c68cdb11b9d5a53ddc11361d0f1c889edeb24536..dec8afe5b10533aba5548699b5414b6d459be371 100644 (file)
@@ -210,13 +210,14 @@ We'll also need to include the `config.h` header:
 
 ...
 
-static int git_walken_config(const char *var, const char *value, void *cb)
+static int git_walken_config(const char *var, const char *value,
+                            const struct config_context *ctx, void *cb)
 {
        /*
         * For now, we don't have any custom configuration, so fall back to
         * the default config.
         */
-       return git_default_config(var, value, cb);
+       return git_default_config(var, value, ctx, cb);
 }
 ----
 
@@ -389,10 +390,11 @@ modifying `rev_info.grep_filter`, which is a `struct grep_opt`.
 First some setup. Add `grep_config()` to `git_walken_config()`:
 
 ----
-static int git_walken_config(const char *var, const char *value, void *cb)
+static int git_walken_config(const char *var, const char *value,
+                            const struct config_context *ctx, void *cb)
 {
-       grep_config(var, value, cb);
-       return git_default_config(var, value, cb);
+       grep_config(var, value, ctx, cb);
+       return git_default_config(var, value, ctx, cb);
 }
 ----
 
@@ -523,7 +525,7 @@ about each one.
 
 We can base our work on an example. `git pack-objects` prepares all kinds of
 objects for packing into a bitmap or packfile. The work we are interested in
-resides in `builtins/pack-objects.c:get_object_list()`; examination of that
+resides in `builtin/pack-objects.c:get_object_list()`; examination of that
 function shows that the all-object walk is being performed by
 `traverse_commit_list()` or `traverse_commit_list_filtered()`. Those two
 functions reside in `list-objects.c`; examining the source shows that, despite
@@ -732,8 +734,8 @@ walk we've just performed:
        } else {
                trace_printf(
                        _("Filtered object walk with filterspec 'tree:1'.\n"));
-               CALLOC_ARRAY(rev->filter, 1);
-               parse_list_objects_filter(rev->filter, "tree:1");
+
+               parse_list_objects_filter(&rev->filter, "tree:1");
        }
        traverse_commit_list(rev, walken_show_commit,
                             walken_show_object, NULL);
@@ -752,10 +754,12 @@ points to the same tree object as its grandparent.)
 === Counting Omitted Objects
 
 We also have the capability to enumerate all objects which were omitted by a
-filter, like with `git log --filter=<spec> --filter-print-omitted`. Asking
-`traverse_commit_list_filtered()` to populate the `omitted` list means that our
-object walk does not perform any better than an unfiltered object walk; all
-reachable objects are walked in order to populate the list.
+filter, like with `git log --filter=<spec> --filter-print-omitted`. To do this,
+change `traverse_commit_list()` to `traverse_commit_list_filtered()`, which is
+able to populate an `omitted` list.  Asking for this list of filtered objects
+may cause performance degradations, however, because in this case, despite
+filtering objects, the possibly much larger set of all reachable objects must
+be processed in order to populate that list.
 
 First, add the `struct oidset` and related items we will use to iterate it:
 
@@ -776,8 +780,9 @@ static void walken_object_walk(
        ...
 ----
 
-Modify the call to `traverse_commit_list_filtered()` to include your `omitted`
-object:
+Replace the call to `traverse_commit_list()` with
+`traverse_commit_list_filtered()` and pass a pointer to the `omitted` oidset
+defined and initialized above:
 
 ----
        ...
@@ -843,7 +848,7 @@ those lines without having to recompile.
 With only that change, run again (but save yourself some scrollback):
 
 ----
-$ GIT_TRACE=1 ./bin-wrappers/git walken | head -n 10
+$ GIT_TRACE=1 ./bin-wrappers/git walken 2>&1 | head -n 10
 ----
 
 Take a look at the top commit with `git show` and the object ID you printed; it
@@ -871,7 +876,7 @@ of the first handful:
 
 ----
 $ make
-$ GIT_TRACE=1 ./bin-wrappers git walken | tail -n 10
+$ GIT_TRACE=1 ./bin-wrappers/git walken 2>&1 | tail -n 10
 ----
 
 The last commit object given should have the same OID as the one we saw at the
index 5bca5a0c4d3d0f4353a4ba028a9d40557e14789d..039cce7708571bd79e76ee03bad962e2836aa899 100644 (file)
@@ -73,6 +73,17 @@ UI, Workflows & Features
  * core.commentChar used to be limited to a single byte, but has been
    updated to allow an arbitrary multi-byte sequence.
 
+ * "git add -p" and other "interactive hunk selection" UI has learned to
+   skip showing the hunk immediately after it has already been shown, and
+   an additional action to explicitly ask to reshow the current hunk.
+
+ * "git pack-refs" learned the "--auto" option, which is a useful
+   addition to be triggered from "git gc --auto".
+
+ * "git add -u <pathspec>" and "git commit [-i] <pathspec>" did not
+   diagnose a pathspec element that did not match any files in certain
+   situations, unlike "git add <pathspec>" did.
+
 
 Performance, Internal Implementation, Development Support etc.
 
@@ -140,6 +151,28 @@ Performance, Internal Implementation, Development Support etc.
    the "t/" directory with "make t<num>-*.sh t<num>-*.sh".
    (merge 8d383806fc pb/test-scripts-are-build-targets later to maint).
 
+ * The "hint:" messages given by the advice mechanism, when given a
+   message with a blank line, left a line with trailing whitespace,
+   which has been cleansed.
+
+ * Documentation rules has been explicitly described how to mark-up
+   literal parts and a few manual pages have been updated as examples.
+
+ * The .editorconfig file has been taught that a Makefile uses HT
+   indentation.
+
+ * t-prio-queue test has been cleaned up by using C99 compound
+   literals; this is meant to also serve as a weather-balloon to smoke
+   out folks with compilers who have trouble compiling code that uses
+   the feature.
+
+ * Windows binary used to decide the use of unix-domain socket at
+   build time, but it learned to make the decision at runtime instead.
+
+ * The "shared repository" test in the t0610 reftable test failed
+   under restrictive umask setting (e.g. 007), which has been
+   corrected.
+
 
 Fixes since v2.44
 -----------------
@@ -302,6 +335,52 @@ Fixes since v2.44
    corrected.
    (merge f999d5188b bl/pretty-shorthand-config-fix later to maint).
 
+ * "git apply" failed to extract the filename the patch applied to,
+   when the change was about an empty file created in or deleted from
+   a directory whose name ends with a SP, which has been corrected.
+   (merge 776ffd1a30 jc/apply-parse-diff-git-header-names-fix later to maint).
+
+ * Update a more recent tutorial doc.
+   (merge 95ab557b4b dg/myfirstobjectwalk-updates later to maint).
+
+ * The test script had an incomplete and ineffective attempt to avoid
+   clobbering the testing user's real crontab (and its equivalents),
+   which has been completed.
+   (merge 73cb87773b es/test-cron-safety later to maint).
+
+ * Use advice_if_enabled() API to rewrite a simple pattern to
+   call advise() after checking advice_enabled().
+   (merge 6412d01527 rj/use-adv-if-enabled later to maint).
+
+ * Another "set -u" fix for the bash prompt (in contrib/) script.
+   (merge d7805bc743 vs/complete-with-set-u-fix later to maint).
+
+ * "git checkout/switch --detach foo", after switching to the detached
+   HEAD state, gave the tracking information for the 'foo' branch,
+   which was pointless.
+
+ * "git apply" has been updated to lift the hardcoded pathname length
+   limit, which in turn allowed a mksnpath() function that is no
+   longer used.
+   (merge 708f7e0590 rs/apply-lift-path-length-limit later to maint).
+
+ * A file descriptor leak in an error codepath, used when "git apply
+   --reject" fails to create the *.rej file, has been corrected.
+   (merge 2b1f456adf rs/apply-reject-fd-leakfix later to maint).
+
+ * A config parser callback function fell through instead of returning
+   after recognising and processing a variable, wasting cycles, which
+   has been corrected.
+   (merge a816ccd642 ds/fetch-config-parse-microfix later to maint).
+
+ * Fix was added to work around a regression in libcURL 8.7.0 (which has
+   already been fixed in their tip of the tree).
+   (merge 92a209bf24 jk/libcurl-8.7-regression-workaround later to maint).
+
+ * The variable that holds the value read from the core.excludefile
+   configuration variable used to leak, which has been corrected.
+   (merge 0e0fefb29f jc/unleak-core-excludesfile later to maint).
+
  * Other code cleanup, docfix, build fix, etc.
    (merge f0e578c69c rs/use-xstrncmpz later to maint).
    (merge 83e6eb7d7a ba/credential-test-clean-fix later to maint).
@@ -326,3 +405,6 @@ Fixes since v2.44
    (merge 86f9ce7dd6 bl/doc-config-fixes later to maint).
    (merge 0d527842b7 az/grep-group-error-message-update later to maint).
    (merge 7c43bdf07b rs/strbuf-expand-bad-format later to maint).
+   (merge 8b68b48d5c ds/typofix-core-config-doc later to maint).
+   (merge 39bb692152 rs/imap-send-use-xsnprintf later to maint).
+   (merge 8d320cec60 jc/t2104-style-fixes later to maint).
index d037b57f729e5e10549235b6278c123754d0aee8..0a10efd174ea4bdfe84c4749fd87e3b0dcdae211 100644 (file)
@@ -1,13 +1,23 @@
-clone.defaultRemoteName::
+`clone.defaultRemoteName`::
        The name of the remote to create when cloning a repository.  Defaults to
-       `origin`, and can be overridden by passing the `--origin` command-line
+       `origin`.
+ifdef::git-clone[]
+       It can be overridden by passing the `--origin` command-line
+       option.
+endif::[]
+ifndef::git-clone[]
+       It can be overridden by passing the `--origin` command-line
        option to linkgit:git-clone[1].
+endif::[]
 
-clone.rejectShallow::
+`clone.rejectShallow`::
        Reject cloning a repository if it is a shallow one; this can be overridden by
-       passing the `--reject-shallow` option on the command line. See linkgit:git-clone[1]
+       passing the `--reject-shallow` option on the command line.
+ifndef::git-clone[]
+       See linkgit:git-clone[1].
+endif::[]
 
-clone.filterSubmodules::
+`clone.filterSubmodules`::
        If a partial clone filter is provided (see `--filter` in
        linkgit:git-rev-list[1]) and `--recurse-submodules` is used, also apply
        the filter to submodules.
index bbe869c4975cdbacec09fc6616bbfba28a668b18..93d65e1dfd24f715e08d8a4d0bb71ca77fa6fde6 100644 (file)
@@ -703,7 +703,7 @@ core.createObject::
        will not overwrite existing objects.
 +
 On some file system/operating system combinations, this is unreliable.
-Set this config setting to 'rename' there; However, This will remove the
+Set this config setting to 'rename' there; however, this will remove the
 check that makes sure that existing object files will not get overwritten.
 
 core.notesRef::
index dd1d8332737fe89a2feca44ce59adad4f64bce5a..af03acdbcbbeee913429d4828b976dd166242938 100644 (file)
@@ -3,8 +3,8 @@ ifndef::git-init[]
 :see-git-init: (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].)
 endif::[]
 
-init.templateDir::
+`init.templateDir`::
        Specify the directory from which templates will be copied. {see-git-init}
-init.defaultBranch::
+`init.defaultBranch`::
        Allows overriding the default branch name e.g. when initializing
        a new repository.
index 14a371fff3569eac4fd633ebc945b1d8793b1237..aceaa025e3020adbb6f958f2314eccc99b67d470 100644 (file)
@@ -348,6 +348,7 @@ patch::
        K - leave this hunk undecided, see previous hunk
        s - split the current hunk into smaller hunks
        e - manually edit the current hunk
+       p - print the current hunk
        ? - print help
 +
 After deciding the fate for all hunks, if there is any hunk
index f90977a8519b4c9c057438afa042b4bd25855567..5de18de2ab83fef4fffdb95348adddec02d1fcfa 100644 (file)
@@ -9,15 +9,15 @@ git-clone - Clone a repository into a new directory
 SYNOPSIS
 --------
 [verse]
-'git clone' [--template=<template-directory>]
-         [-l] [-s] [--no-hardlinks] [-q] [-n] [--bare] [--mirror]
-         [-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>]
-         [--dissociate] [--separate-git-dir <git-dir>]
-         [--depth <depth>] [--[no-]single-branch] [--no-tags]
-         [--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
-         [--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
-         [--filter=<filter> [--also-filter-submodules]] [--] <repository>
-         [<directory>]
+`git clone` [++--template=++__<template-directory>__]
+         [`-l`] [`-s`] [`--no-hardlinks`] [`-q`] [`-n`] [`--bare`] [`--mirror`]
+         [`-o` _<name>_] [`-b` _<name>_] [`-u` _<upload-pack>_] [`--reference` _<repository>_]
+         [`--dissociate`] [`--separate-git-dir` _<git-dir>_]
+         [`--depth` _<depth>_] [`--`[`no-`]`single-branch`] [`--no-tags`]
+         [++--recurse-submodules++[++=++__<pathspec>__]] [`--`[`no-`]`shallow-submodules`]
+         [`--`[`no-`]`remote-submodules`] [`--jobs` _<n>_] [`--sparse`] [`--`[`no-`]`reject-shallow`]
+         [++--filter=++__<filter-spec>__] [`--also-filter-submodules`]] [`--`] _<repository>_
+         [_<directory>_]
 
 DESCRIPTION
 -----------
@@ -31,7 +31,7 @@ currently active branch.
 After the clone, a plain `git fetch` without arguments will update
 all the remote-tracking branches, and a `git pull` without
 arguments will in addition merge the remote master branch into the
-current master branch, if any (this is untrue when "--single-branch"
+current master branch, if any (this is untrue when `--single-branch`
 is given; see below).
 
 This default configuration is achieved by creating references to
@@ -42,12 +42,12 @@ configuration variables.
 
 OPTIONS
 -------
--l::
---local::
+`-l`::
+`--local`::
        When the repository to clone from is on a local machine,
        this flag bypasses the normal "Git aware" transport
        mechanism and clones the repository by making a copy of
-       HEAD and everything under objects and refs directories.
+       `HEAD` and everything under objects and refs directories.
        The files under `.git/objects/` directory are hardlinked
        to save space when possible.
 +
@@ -67,14 +67,14 @@ links.
 source repository, similar to running `cp -r src dst` while modifying
 `src`.
 
---no-hardlinks::
+`--no-hardlinks`::
        Force the cloning process from a repository on a local
        filesystem to copy the files under the `.git/objects`
        directory instead of using hardlinks. This may be desirable
        if you are trying to make a back-up of your repository.
 
--s::
---shared::
+`-s`::
+`--shared`::
        When the repository to clone is on the local machine,
        instead of using hard links, automatically setup
        `.git/objects/info/alternates` to share the objects
@@ -101,7 +101,7 @@ If you want to break the dependency of a repository cloned with `--shared` on
 its source repository, you can simply run `git repack -a` to copy all
 objects from the source repository into a pack in the cloned repository.
 
---reference[-if-able] <repository>::
+`--reference`[`-if-able`] _<repository>_::
        If the reference _<repository>_ is on the local machine,
        automatically setup `.git/objects/info/alternates` to
        obtain objects from the reference _<repository>_.  Using
@@ -115,7 +115,7 @@ objects from the source repository into a pack in the cloned repository.
 *NOTE*: see the NOTE for the `--shared` option, and also the
 `--dissociate` option.
 
---dissociate::
+`--dissociate`::
        Borrow the objects from reference repositories specified
        with the `--reference` options only to reduce network
        transfer, and stop borrowing from them after a clone is made
@@ -126,43 +126,43 @@ objects from the source repository into a pack in the cloned repository.
        same repository, and this option can be used to stop the
        borrowing.
 
--q::
---quiet::
+`-q`::
+`--quiet`::
        Operate quietly.  Progress is not reported to the standard
        error stream.
 
--v::
---verbose::
+`-v`::
+`--verbose`::
        Run verbosely. Does not affect the reporting of progress status
        to the standard error stream.
 
---progress::
+`--progress`::
        Progress status is reported on the standard error stream
        by default when it is attached to a terminal, unless `--quiet`
        is specified. This flag forces progress status even if the
        standard error stream is not directed to a terminal.
 
---server-option=<option>::
+++--server-option=++__<option>__::
        Transmit the given string to the server when communicating using
        protocol version 2.  The given string must not contain a NUL or LF
        character.  The server's handling of server options, including
        unknown ones, is server-specific.
-       When multiple `--server-option=<option>` are given, they are all
+       When multiple ++--server-option=++__<option>__ are given, they are all
        sent to the other side in the order listed on the command line.
 
--n::
---no-checkout::
+`-n`::
+`--no-checkout`::
        No checkout of HEAD is performed after the clone is complete.
 
---[no-]reject-shallow::
+`--`[`no-`]`reject-shallow`::
        Fail if the source repository is a shallow repository.
        The `clone.rejectShallow` configuration variable can be used to
        specify the default.
 
---bare::
+`--bare`::
        Make a 'bare' Git repository.  That is, instead of
        creating _<directory>_ and placing the administrative
-       files in `<directory>/.git`, make the _<directory>_
+       files in _<directory>_`/.git`, make the _<directory>_
        itself the `$GIT_DIR`. This obviously implies the `--no-checkout`
        because there is nowhere to check out the working tree.
        Also the branch heads at the remote are copied directly
@@ -171,28 +171,28 @@ objects from the source repository into a pack in the cloned repository.
        used, neither remote-tracking branches nor the related
        configuration variables are created.
 
---sparse::
+`--sparse`::
        Employ a sparse-checkout, with only files in the toplevel
        directory initially being present.  The
        linkgit:git-sparse-checkout[1] command can be used to grow the
        working directory as needed.
 
---filter=<filter-spec>::
+++--filter=++__<filter-spec>__::
        Use the partial clone feature and request that the server sends
        a subset of reachable objects according to a given object filter.
        When using `--filter`, the supplied _<filter-spec>_ is used for
        the partial clone filter. For example, `--filter=blob:none` will
        filter out all blobs (file contents) until needed by Git. Also,
-       `--filter=blob:limit=<size>` will filter out all blobs of size
+       ++--filter=blob:limit=++__<size>__ will filter out all blobs of size
        at least _<size>_. For more details on filter specifications, see
        the `--filter` option in linkgit:git-rev-list[1].
 
---also-filter-submodules::
+`--also-filter-submodules`::
        Also apply the partial clone filter to any submodules in the repository.
        Requires `--filter` and `--recurse-submodules`. This can be turned on by
        default by setting the `clone.filterSubmodules` config option.
 
---mirror::
+`--mirror`::
        Set up a mirror of the source repository.  This implies `--bare`.
        Compared to `--bare`, `--mirror` not only maps local branches of the
        source to local branches of the target, it maps all refs (including
@@ -200,14 +200,14 @@ objects from the source repository into a pack in the cloned repository.
        that all these refs are overwritten by a `git remote update` in the
        target repository.
 
--o <name>::
---origin <name>::
+`-o` _<name>_::
+`--origin` _<name>_::
        Instead of using the remote name `origin` to keep track of the upstream
        repository, use _<name>_.  Overrides `clone.defaultRemoteName` from the
        config.
 
--b <name>::
---branch <name>::
+`-b` _<name>_::
+`--branch` _<name>_::
        Instead of pointing the newly created HEAD to the branch pointed
        to by the cloned repository's HEAD, point to _<name>_ branch
        instead. In a non-bare repository, this is the branch that will
@@ -215,18 +215,18 @@ objects from the source repository into a pack in the cloned repository.
        `--branch` can also take tags and detaches the HEAD at that commit
        in the resulting repository.
 
--u <upload-pack>::
---upload-pack <upload-pack>::
+`-u` _<upload-pack>_::
+`--upload-pack` _<upload-pack>_::
        When given, and the repository to clone from is accessed
        via ssh, this specifies a non-default path for the command
        run on the other end.
 
---template=<template-directory>::
+++--template=++__<template-directory>__::
        Specify the directory from which templates will be used;
        (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].)
 
--c <key>=<value>::
---config <key>=<value>::
+`-c` __<key>__++=++__<value>__::
+`--config` __<key>__++=++__<value>__::
        Set a configuration variable in the newly-created repository;
        this takes effect immediately after the repository is
        initialized, but before the remote history is fetched or any
@@ -239,25 +239,25 @@ objects from the source repository into a pack in the cloned repository.
 Due to limitations of the current implementation, some configuration
 variables do not take effect until after the initial fetch and checkout.
 Configuration variables known to not take effect are:
-`remote.<name>.mirror` and `remote.<name>.tagOpt`.  Use the
+++remote.++__<name>__++.mirror++ and ++remote.++__<name>__++.tagOpt++.  Use the
 corresponding `--mirror` and `--no-tags` options instead.
 
---depth <depth>::
+`--depth` _<depth>_::
        Create a 'shallow' clone with a history truncated to the
        specified number of commits. Implies `--single-branch` unless
        `--no-single-branch` is given to fetch the histories near the
        tips of all branches. If you want to clone submodules shallowly,
        also pass `--shallow-submodules`.
 
---shallow-since=<date>::
+++--shallow-since=++__<date>__::
        Create a shallow clone with a history after the specified time.
 
---shallow-exclude=<revision>::
+++--shallow-exclude=++__<revision>__::
        Create a shallow clone with a history, excluding commits
        reachable from a specified remote branch or tag.  This option
        can be specified multiple times.
 
---[no-]single-branch::
+`--`[`no-`]`single-branch`::
        Clone only the history leading to the tip of a single branch,
        either specified by the `--branch` option or the primary
        branch remote's `HEAD` points at.
@@ -267,7 +267,7 @@ corresponding `--mirror` and `--no-tags` options instead.
        branch when `--single-branch` clone was made, no remote-tracking
        branch is created.
 
---no-tags::
+`--no-tags`::
        Don't clone any tags, and set
        `remote.<remote>.tagOpt=--no-tags` in the config, ensuring
        that future `git pull` and `git fetch` operations won't follow
@@ -279,7 +279,7 @@ maintain a branch with no references other than a single cloned
 branch. This is useful e.g. to maintain minimal clones of the default
 branch of some repository for search indexing.
 
---recurse-submodules[=<pathspec>]::
+`--recurse-submodules`[`=`{empty}__<pathspec>__]::
        After the clone is created, initialize and clone submodules
        within based on the provided _<pathspec>_.  If no _=<pathspec>_ is
        provided, all submodules are initialized and cloned.
@@ -295,46 +295,46 @@ the clone is finished. This option is ignored if the cloned repository does
 not have a worktree/checkout (i.e. if any of `--no-checkout`/`-n`, `--bare`,
 or `--mirror` is given)
 
---[no-]shallow-submodules::
+`--`[`no-`]`shallow-submodules`::
        All submodules which are cloned will be shallow with a depth of 1.
 
---[no-]remote-submodules::
+`--`[`no-`]`remote-submodules`::
        All submodules which are cloned will use the status of the submodule's
        remote-tracking branch to update the submodule, rather than the
        superproject's recorded SHA-1. Equivalent to passing `--remote` to
        `git submodule update`.
 
---separate-git-dir=<git-dir>::
+`--separate-git-dir=`{empty}__<git-dir>__::
        Instead of placing the cloned repository where it is supposed
        to be, place the cloned repository at the specified directory,
        then make a filesystem-agnostic Git symbolic link to there.
        The result is Git repository can be separated from working
        tree.
 
---ref-format=<ref-format>::
+`--ref-format=`{empty}__<ref-format>__::
 
 Specify the given ref storage format for the repository. The valid values are:
 +
 include::ref-storage-format.txt[]
 
--j <n>::
---jobs <n>::
+`-j` _<n>_::
+`--jobs` _<n>_::
        The number of submodules fetched at the same time.
        Defaults to the `submodule.fetchJobs` option.
 
-<repository>::
+_<repository>_::
        The (possibly remote) _<repository>_ to clone from.  See the
        <<URLS,GIT URLS>> section below for more information on specifying
        repositories.
 
-<directory>::
+_<directory>_::
        The name of a new directory to clone into.  The "humanish"
        part of the source repository is used if no _<directory>_ is
        explicitly given (`repo` for `/path/to/repo.git` and `foo`
        for `host.xz:foo/.git`).  Cloning into an existing directory
        is only allowed if the directory is empty.
 
---bundle-uri=<uri>::
+`--bundle-uri=`{empty}__<uri>__::
        Before fetching from the remote, fetch a bundle from the given
        _<uri>_ and unbundle the data into the local repository. The refs
        in the bundle will be stored under the hidden `refs/bundle/*`
index 2f864e11ed9719a2443ece695534636fcb6e2631..daff93bd164b7c0bba6d977276a5d0a81f81d1fa 100644 (file)
@@ -9,11 +9,11 @@ git-init - Create an empty Git repository or reinitialize an existing one
 SYNOPSIS
 --------
 [verse]
-'git init' [-q | --quiet] [--bare] [--template=<template-directory>]
-         [--separate-git-dir <git-dir>] [--object-format=<format>]
-         [--ref-format=<format>]
-         [-b <branch-name> | --initial-branch=<branch-name>]
-         [--shared[=<permissions>]] [<directory>]
+`git init` [`-q` | `--quiet`] [`--bare`] [++--template=++__<template-directory>__]
+         [`--separate-git-dir` _<git-dir>_] [++--object-format=++__<format>__]
+         [++--ref-format=++__<format>__]
+         [`-b` _<branch-name>_ | ++--initial-branch=++__<branch-name>__]
+         [++--shared++[++=++__<permissions>__]] [_<directory>_]
 
 
 DESCRIPTION
@@ -41,35 +41,35 @@ the repository to another place if `--separate-git-dir` is given).
 OPTIONS
 -------
 
--q::
---quiet::
+`-q`::
+`--quiet`::
 
 Only print error and warning messages; all other output will be suppressed.
 
---bare::
+`--bare`::
 
 Create a bare repository. If `GIT_DIR` environment is not set, it is set to the
 current working directory.
 
---object-format=<format>::
+++--object-format=++__<format>__::
 
 Specify the given object _<format>_ (hash algorithm) for the repository.  The valid
 values are `sha1` and (if enabled) `sha256`.  `sha1` is the default.
 +
 include::object-format-disclaimer.txt[]
 
---ref-format=<format>::
+++--ref-format=++__<format>__::
 
 Specify the given ref storage _<format>_ for the repository. The valid values are:
 +
 include::ref-storage-format.txt[]
 
---template=<template-directory>::
+++--template=++__<template-directory>__::
 
 Specify the directory from which templates will be used.  (See the "TEMPLATE
 DIRECTORY" section below.)
 
---separate-git-dir=<git-dir>::
+++--separate-git-dir=++__<git-dir>__::
 
 Instead of initializing the repository as a directory to either `$GIT_DIR` or
 `./.git/`, create a text file there containing the path to the actual
@@ -78,53 +78,53 @@ repository.
 +
 If this is a reinitialization, the repository will be moved to the specified path.
 
--b <branch-name>::
---initial-branch=<branch-name>::
+`-b` _<branch-name>_::
+++--initial-branch=++__<branch-name>__::
 
 Use _<branch-name>_ for the initial branch in the newly created
 repository.  If not specified, fall back to the default name (currently
 `master`, but this is subject to change in the future; the name can be
 customized via the `init.defaultBranch` configuration variable).
 
---shared[=(false|true|umask|group|all|world|everybody|<perm>)]::
+++--shared++[++=++(`false`|`true`|`umask`|`group`|`all`|`world`|`everybody`|_<perm>_)]::
 
 Specify that the Git repository is to be shared amongst several users.  This
 allows users belonging to the same group to push into that
 repository.  When specified, the config variable `core.sharedRepository` is
 set so that files and directories under `$GIT_DIR` are created with the
 requested permissions.  When not specified, Git will use permissions reported
-by `umask(2)`.
+by `umask`(2).
 +
 The option can have the following values, defaulting to `group` if no value
 is given:
 +
 --
-umask::
-false::
+`umask`::
+`false`::
 
-Use permissions reported by umask(2). The default, when `--shared` is not
+Use permissions reported by `umask`(2). The default, when `--shared` is not
 specified.
 
-group::
-true::
+`group`::
+`true`::
 
-Make the repository group-writable, (and g+sx, since the git group may not be
+Make the repository group-writable, (and `g+sx`, since the git group may not be
 the primary group of all users). This is used to loosen the permissions of an
-otherwise safe umask(2) value. Note that the umask still applies to the other
+otherwise safe `umask`(2) value. Note that the umask still applies to the other
 permission bits (e.g. if umask is `0022`, using `group` will not remove read
 privileges from other (non-group) users). See `0xxx` for how to exactly specify
 the repository permissions.
 
-all::
-world::
-everybody::
+`all`::
+`world`::
+`everybody`::
 
 Same as `group`, but make the repository readable by all users.
 
-<perm>::
+_<perm>_::
 
 _<perm>_ is a 3-digit octal number prefixed with `0` and each file
-will have mode _<perm>_. _<perm>_ will override users'`umask(2)`
+will have mode _<perm>_. _<perm>_ will override users' `umask`(2)
 value (and not only loosen permissions as `group` and `all`
 do). `0640` will create a repository which is group-readable, but
 not group-writable or accessible to others. `0660` will create a repo
index 284956acb3c5e8bb168626c9729a17a799c0afd7..2dcabaf74cefb4111e94e92b8c7e47e8346a1d42 100644 (file)
@@ -8,7 +8,7 @@ git-pack-refs - Pack heads and tags for efficient repository access
 SYNOPSIS
 --------
 [verse]
-'git pack-refs' [--all] [--no-prune] [--include <pattern>] [--exclude <pattern>]
+'git pack-refs' [--all] [--no-prune] [--auto] [--include <pattern>] [--exclude <pattern>]
 
 DESCRIPTION
 -----------
@@ -60,6 +60,19 @@ with many branches of historical interests.
 The command usually removes loose refs under `$GIT_DIR/refs`
 hierarchy after packing them.  This option tells it not to.
 
+--auto::
+
+Pack refs as needed depending on the current state of the ref database. The
+behavior depends on the ref format used by the repository and may change in the
+future.
++
+       - "files": No special handling for `--auto` has been implemented.
++
+       - "reftable": Tables are compacted such that they form a geometric
+         sequence. For two tables N and N+1, where N+1 is newer, this
+         maintains the property that N is at least twice as big as N+1. Only
+         tables that violate this property are compacted.
+
 --include <pattern>::
 
 Pack refs based on a `glob(7)` pattern. Repetitions of this option
index 0561808cca04a6873909ad18a6fd5f6e9af8b8de..374a2ebd2b0bdaa1e0a8cb09e172f27d1fb5fada 100644 (file)
@@ -8,21 +8,21 @@ git-update-ref - Update the object name stored in a ref safely
 SYNOPSIS
 --------
 [verse]
-'git update-ref' [-m <reason>] [--no-deref] (-d <ref> [<oldvalue>] | [--create-reflog] <ref> <newvalue> [<oldvalue>] | --stdin [-z])
+'git update-ref' [-m <reason>] [--no-deref] (-d <ref> [<old-oid>] | [--create-reflog] <ref> <new-oid> [<old-oid>] | --stdin [-z])
 
 DESCRIPTION
 -----------
-Given two arguments, stores the <newvalue> in the <ref>, possibly
+Given two arguments, stores the <new-oid> in the <ref>, possibly
 dereferencing the symbolic refs.  E.g. `git update-ref HEAD
-<newvalue>` updates the current branch head to the new object.
+<new-oid>` updates the current branch head to the new object.
 
-Given three arguments, stores the <newvalue> in the <ref>,
+Given three arguments, stores the <new-oid> in the <ref>,
 possibly dereferencing the symbolic refs, after verifying that
-the current value of the <ref> matches <oldvalue>.
-E.g. `git update-ref refs/heads/master <newvalue> <oldvalue>`
-updates the master branch head to <newvalue> only if its current
-value is <oldvalue>.  You can specify 40 "0" or an empty string
-as <oldvalue> to make sure that the ref you are creating does
+the current value of the <ref> matches <old-oid>.
+E.g. `git update-ref refs/heads/master <new-oid> <old-oid>`
+updates the master branch head to <new-oid> only if its current
+value is <old-oid>.  You can specify 40 "0" or an empty string
+as <old-oid> to make sure that the ref you are creating does
 not exist.
 
 It also allows a "ref" file to be a symbolic pointer to another
@@ -56,15 +56,15 @@ ref symlink to some other tree, if you have copied a whole
 archive by creating a symlink tree).
 
 With `-d` flag, it deletes the named <ref> after verifying it
-still contains <oldvalue>.
+still contains <old-oid>.
 
 With `--stdin`, update-ref reads instructions from standard input and
 performs all modifications together.  Specify commands of the form:
 
-       update SP <ref> SP <newvalue> [SP <oldvalue>] LF
-       create SP <ref> SP <newvalue> LF
-       delete SP <ref> [SP <oldvalue>] LF
-       verify SP <ref> [SP <oldvalue>] LF
+       update SP <ref> SP <new-oid> [SP <old-oid>] LF
+       create SP <ref> SP <new-oid> LF
+       delete SP <ref> [SP <old-oid>] LF
+       verify SP <ref> [SP <old-oid>] LF
        option SP <opt> LF
        start LF
        prepare LF
@@ -82,10 +82,10 @@ specify a missing value, omit the value and its preceding SP entirely.
 Alternatively, use `-z` to specify in NUL-terminated format, without
 quoting:
 
-       update SP <ref> NUL <newvalue> NUL [<oldvalue>] NUL
-       create SP <ref> NUL <newvalue> NUL
-       delete SP <ref> NUL [<oldvalue>] NUL
-       verify SP <ref> NUL [<oldvalue>] NUL
+       update SP <ref> NUL <new-oid> NUL [<old-oid>] NUL
+       create SP <ref> NUL <new-oid> NUL
+       delete SP <ref> NUL [<old-oid>] NUL
+       verify SP <ref> NUL [<old-oid>] NUL
        option SP <opt> NUL
        start NUL
        prepare NUL
@@ -100,22 +100,22 @@ recognizes as an object name.  Commands in any other format or a
 repeated <ref> produce an error.  Command meanings are:
 
 update::
-       Set <ref> to <newvalue> after verifying <oldvalue>, if given.
-       Specify a zero <newvalue> to ensure the ref does not exist
-       after the update and/or a zero <oldvalue> to make sure the
+       Set <ref> to <new-oid> after verifying <old-oid>, if given.
+       Specify a zero <new-oid> to ensure the ref does not exist
+       after the update and/or a zero <old-oid> to make sure the
        ref does not exist before the update.
 
 create::
-       Create <ref> with <newvalue> after verifying it does not
-       exist.  The given <newvalue> may not be zero.
+       Create <ref> with <new-oid> after verifying it does not
+       exist.  The given <new-oid> may not be zero.
 
 delete::
-       Delete <ref> after verifying it exists with <oldvalue>, if
-       given.  If given, <oldvalue> may not be zero.
+       Delete <ref> after verifying it exists with <old-oid>, if
+       given.  If given, <old-oid> may not be zero.
 
 verify::
-       Verify <ref> against <oldvalue> but do not change it.  If
-       <oldvalue> is zero or missing, the ref must not exist.
+       Verify <ref> against <old-oid> but do not change it.  If
+       <old-oid> is zero or missing, the ref must not exist.
 
 option::
        Modify the behavior of the next command naming a <ref>.
@@ -141,7 +141,7 @@ abort::
        Abort the transaction, releasing all locks if the transaction is in
        prepared state.
 
-If all <ref>s can be locked with matching <oldvalue>s
+If all <ref>s can be locked with matching <old-oid>s
 simultaneously, all modifications are performed.  Otherwise, no
 modifications are performed.  Note that while each individual
 <ref> is updated or deleted atomically, a concurrent reader may
@@ -161,7 +161,7 @@ formatted as:
 
 Where "oldsha1" is the 40 character hexadecimal value previously
 stored in <ref>, "newsha1" is the 40 character hexadecimal value of
-<newvalue> and "committer" is the committer's name, email address
+<new-oid> and "committer" is the committer's name, email address
 and date in the standard Git committer ident format.
 
 Optionally with -m:
index 37f91d5b50ca3ac9053bd687ee540ca87de21083..ee9b92c90da99df3dc77e2122ee9feead2d1de8e 100644 (file)
@@ -275,12 +275,12 @@ This hook executes once for the receive operation. It takes no
 arguments, but for each ref to be updated it receives on standard
 input a line of the format:
 
-  <old-value> SP <new-value> SP <ref-name> LF
+  <old-oid> SP <new-oid> SP <ref-name> LF
 
-where `<old-value>` is the old object name stored in the ref,
-`<new-value>` is the new object name to be stored in the ref and
+where `<old-oid>` is the old object name stored in the ref,
+`<new-oid>` is the new object name to be stored in the ref and
 `<ref-name>` is the full name of the ref.
-When creating a new ref, `<old-value>` is the all-zeroes object name.
+When creating a new ref, `<old-oid>` is the all-zeroes object name.
 
 If the hook exits with non-zero status, none of the refs will be
 updated. If the hook exits with zero, updating of individual refs can
@@ -503,13 +503,13 @@ given reference transaction is in:
 For each reference update that was added to the transaction, the hook
 receives on standard input a line of the format:
 
-  <old-value> SP <new-value> SP <ref-name> LF
+  <old-oid> SP <new-oid> SP <ref-name> LF
 
-where `<old-value>` is the old object name passed into the reference
-transaction, `<new-value>` is the new object name to be stored in the
+where `<old-oid>` is the old object name passed into the reference
+transaction, `<new-oid>` is the new object name to be stored in the
 ref and `<ref-name>` is the full name of the ref. When force updating
 the reference regardless of its current value or when the reference is
-to be created anew, `<old-value>` is the all-zeroes object name. To
+to be created anew, `<old-oid>` is the all-zeroes object name. To
 distinguish these cases, you can inspect the current value of
 `<ref-name>` via `git rev-parse`.
 
index 0b9e0c4302d850a7a38044d03bd1146764bc83ca..7cec85aef17f437ce71a73f0e43fa19c046a4aa8 100644 (file)
@@ -15,14 +15,14 @@ should be used with caution on unsecured networks.
 
 The following syntaxes may be used with them:
 
-- ssh://{startsb}user@{endsb}host.xz{startsb}:port{endsb}/path/to/repo.git/
-- git://host.xz{startsb}:port{endsb}/path/to/repo.git/
-- http{startsb}s{endsb}://host.xz{startsb}:port{endsb}/path/to/repo.git/
-- ftp{startsb}s{endsb}://host.xz{startsb}:port{endsb}/path/to/repo.git/
+- ++ssh://++{startsb}__<user>__++@++{endsb}__<host>__{startsb}++:++__<port>__{endsb}++/++__<path-to-git-repo>__
+- ++git://++__<host>__{startsb}:__<port>__{endsb}++/++__<path-to-git-repo>__
+- ++http++{startsb}++s++{endsb}++://++__<host>__{startsb}++:++__<port>__{endsb}++/++__<path-to-git-repo>__
+- ++ftp++{startsb}++s++{endsb}++://++__<host>__{startsb}++:++__<port>__{endsb}++/++__<path-to-git-repo>__
 
 An alternative scp-like syntax may also be used with the ssh protocol:
 
-- {startsb}user@{endsb}host.xz:path/to/repo.git/
+- {startsb}__<user>__++@++{endsb}__<host>__++:/++__<path-to-git-repo>__
 
 This syntax is only recognized if there are no slashes before the
 first colon. This helps differentiate a local path that contains a
@@ -30,17 +30,17 @@ colon. For example the local path `foo:bar` could be specified as an
 absolute path or `./foo:bar` to avoid being misinterpreted as an ssh
 url.
 
-The ssh and git protocols additionally support ~username expansion:
+The ssh and git protocols additionally support ++~++__<username>__ expansion:
 
-- ssh://{startsb}user@{endsb}host.xz{startsb}:port{endsb}/~{startsb}user{endsb}/path/to/repo.git/
-- git://host.xz{startsb}:port{endsb}/~{startsb}user{endsb}/path/to/repo.git/
-- {startsb}user@{endsb}host.xz:/~{startsb}user{endsb}/path/to/repo.git/
+- ++ssh://++{startsb}__<user>__++@++{endsb}__<host>__{startsb}++:++__<port>__{endsb}++/~++__<user>__++/++__<path-to-git-repo>__
+- ++git://++__<host>__{startsb}++:++__<port>__{endsb}++/~++__<user>__++/++__<path-to-git-repo>__
+- {startsb}__<user>__++@++{endsb}__<host>__++:~++__<user>__++/++__<path-to-git-repo>__
 
 For local repositories, also supported by Git natively, the following
 syntaxes may be used:
 
-- /path/to/repo.git/
-- \file:///path/to/repo.git/
+- `/path/to/repo.git/`
+- ++file:///path/to/repo.git/++
 
 ifndef::git-clone[]
 These two syntaxes are mostly equivalent, except when cloning, when
@@ -57,11 +57,11 @@ endif::git-clone[]
 accept a suitable bundle file. See linkgit:git-bundle[1].
 
 When Git doesn't know how to handle a certain transport protocol, it
-attempts to use the `remote-<transport>` remote helper, if one
+attempts to use the `remote-`{empty}__<transport>__ remote helper, if one
 exists. To explicitly request a remote helper, the following syntax
 may be used:
 
-- _<transport>_::_<address>_
+- _<transport>_::__<address>__
 
 where _<address>_ may be a path, a server and path, or an arbitrary
 URL-like string recognized by the specific remote helper being
@@ -72,10 +72,11 @@ you want to use a different format for them (such that the URLs you
 use will be rewritten into URLs that work), you can create a
 configuration section of the form:
 
-------------
-       [url "<actual-url-base>"]
-               insteadOf = <other-url-base>
-------------
+[verse]
+--
+       [url "__<actual-url-base>__"]
+               insteadOf = _<other-url-base>_
+--
 
 For example, with this:
 
@@ -91,10 +92,11 @@ rewritten in any context that takes a URL to be "git://git.host.xz/repo.git".
 If you want to rewrite URLs for push only, you can create a
 configuration section of the form:
 
-------------
-       [url "<actual-url-base>"]
-               pushInsteadOf = <other-url-base>
-------------
+[verse]
+--
+       [url "__<actual-url-base>__"]
+               pushInsteadOf = _<other-url-base>_
+--
 
 For example, with this:
 
diff --git a/INSTALL b/INSTALL
index c6fb240c91eb9044f1baea43ae29c2991447bbc6..2a46d045928a1159caf5fedcbafb8d4f816ec8ec 100644 (file)
--- a/INSTALL
+++ b/INSTALL
@@ -139,7 +139,7 @@ Issues of note:
          not need that functionality, use NO_CURL to build without
          it.
 
-         Git requires version "7.19.5" or later of "libcurl" to build
+         Git requires version "7.21.3" or later of "libcurl" to build
          without NO_CURL. This version requirement may be bumped in
          the future.
 
index c43c1bd1a05c895737307cac38e8f3caf573ffe1..44b281bb63b0aa21afb78dd6806170a81259b5b3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1075,6 +1075,7 @@ LIB_OBJS += merge-ort-wrappers.o
 LIB_OBJS += merge-recursive.o
 LIB_OBJS += merge.o
 LIB_OBJS += midx.o
+LIB_OBJS += midx-write.o
 LIB_OBJS += name-hash.o
 LIB_OBJS += negotiator/default.o
 LIB_OBJS += negotiator/noop.o
index d599ca53e19d01ff35657c8d03d16c274f42a5da..a06dd189854a7a8e680ce76b8995296f2e773b98 100644 (file)
@@ -1388,13 +1388,14 @@ N_("j - leave this hunk undecided, see next undecided hunk\n"
    "/ - search for a hunk matching the given regex\n"
    "s - split the current hunk into smaller hunks\n"
    "e - manually edit the current hunk\n"
+   "p - print the current hunk\n"
    "? - print help\n");
 
 static int patch_update_file(struct add_p_state *s,
                             struct file_diff *file_diff)
 {
        size_t hunk_index = 0;
-       ssize_t i, undecided_previous, undecided_next;
+       ssize_t i, undecided_previous, undecided_next, rendered_hunk_index = -1;
        struct hunk *hunk;
        char ch;
        struct child_process cp = CHILD_PROCESS_INIT;
@@ -1447,8 +1448,11 @@ static int patch_update_file(struct add_p_state *s,
 
                strbuf_reset(&s->buf);
                if (file_diff->hunk_nr) {
-                       render_hunk(s, hunk, 0, colored, &s->buf);
-                       fputs(s->buf.buf, stdout);
+                       if (rendered_hunk_index != hunk_index) {
+                               render_hunk(s, hunk, 0, colored, &s->buf);
+                               fputs(s->buf.buf, stdout);
+                               rendered_hunk_index = hunk_index;
+                       }
 
                        strbuf_reset(&s->buf);
                        if (undecided_previous >= 0) {
@@ -1480,6 +1484,7 @@ static int patch_update_file(struct add_p_state *s,
                                permitted |= ALLOW_EDIT;
                                strbuf_addstr(&s->buf, ",e");
                        }
+                       strbuf_addstr(&s->buf, ",p");
                }
                if (file_diff->deleted)
                        prompt_mode_type = PROMPT_DELETION;
@@ -1644,13 +1649,15 @@ soft_increment:
                        hunk_index = i;
                } else if (s->answer.buf[0] == 's') {
                        size_t splittable_into = hunk->splittable_into;
-                       if (!(permitted & ALLOW_SPLIT))
+                       if (!(permitted & ALLOW_SPLIT)) {
                                err(s, _("Sorry, cannot split this hunk"));
-                       else if (!split_hunk(s, file_diff,
-                                            hunk - file_diff->hunk))
+                       else if (!split_hunk(s, file_diff,
+                                            hunk - file_diff->hunk)) {
                                color_fprintf_ln(stdout, s->s.header_color,
                                                 _("Split into %d hunks."),
                                                 (int)splittable_into);
+                               rendered_hunk_index = -1;
+                       }
                } else if (s->answer.buf[0] == 'e') {
                        if (!(permitted & ALLOW_EDIT))
                                err(s, _("Sorry, cannot edit this hunk"));
@@ -1658,6 +1665,8 @@ soft_increment:
                                hunk->use = USE_HUNK;
                                goto soft_increment;
                        }
+               } else if (s->answer.buf[0] == 'p') {
+                       rendered_hunk_index = -1;
                } else {
                        const char *p = _(help_patch_remainder), *eol = p;
 
index d19648b7f88b75261f0666da94607a9a052dc079..75111191ad586d4cbae07e806f8b2bb6f7ebe83b 100644 (file)
--- a/advice.c
+++ b/advice.c
@@ -105,8 +105,9 @@ static void vadvise(const char *advice, int display_instructions,
 
        for (cp = buf.buf; *cp; cp = np) {
                np = strchrnul(cp, '\n');
-               fprintf(stderr, _("%shint: %.*s%s\n"),
+               fprintf(stderr, _("%shint:%s%.*s%s\n"),
                        advise_get_color(ADVICE_COLOR_HINT),
+                       (np == cp) ? "" : " ",
                        (int)(np - cp), cp,
                        advise_get_color(ADVICE_COLOR_RESET));
                if (*np)
diff --git a/apply.c b/apply.c
index 432837a674c3cc559f762aa4b7766bd8f43177e0..34f20326a7f0ad2328b83af969d2c5419c2349eb 100644 (file)
--- a/apply.c
+++ b/apply.c
@@ -1292,8 +1292,15 @@ static char *git_header_name(int p_value,
                                return NULL; /* no postimage name */
                        second = skip_tree_prefix(p_value, name + len + 1,
                                                  line_len - (len + 1));
+                       /*
+                        * If we are at the SP at the end of a directory,
+                        * skip_tree_prefix() may return NULL as that makes
+                        * it appears as if we have an absolute path.
+                        * Keep going to find another SP.
+                        */
                        if (!second)
-                               return NULL;
+                               continue;
+
                        /*
                         * Does len bytes starting at "name" and "second"
                         * (that are separated by one HT or SP we just
@@ -4441,6 +4448,7 @@ static int create_one_file(struct apply_state *state,
                           const char *buf,
                           unsigned long size)
 {
+       char *newpath = NULL;
        int res;
 
        if (state->cached)
@@ -4502,24 +4510,26 @@ static int create_one_file(struct apply_state *state,
                unsigned int nr = getpid();
 
                for (;;) {
-                       char newpath[PATH_MAX];
-                       mksnpath(newpath, sizeof(newpath), "%s~%u", path, nr);
+                       newpath = mkpathdup("%s~%u", path, nr);
                        res = try_create_file(state, newpath, mode, buf, size);
                        if (res < 0)
-                               return -1;
+                               goto out;
                        if (!res) {
                                if (!rename(newpath, path))
-                                       return 0;
+                                       goto out;
                                unlink_or_warn(newpath);
                                break;
                        }
                        if (errno != EEXIST)
                                break;
                        ++nr;
+                       FREE_AND_NULL(newpath);
                }
        }
-       return error_errno(_("unable to write file '%s' mode %o"),
-                          path, mode);
+       res = error_errno(_("unable to write file '%s' mode %o"), path, mode);
+out:
+       free(newpath);
+       return res;
 }
 
 static int add_conflicted_stages_file(struct apply_state *state,
@@ -4655,8 +4665,11 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
                        return error_errno(_("cannot open %s"), namebuf);
        }
        rej = fdopen(fd, "w");
-       if (!rej)
-               return error_errno(_("cannot open %s"), namebuf);
+       if (!rej) {
+               error_errno(_("cannot open %s"), namebuf);
+               close(fd);
+               return -1;
+       }
 
        /* Normal git tools never deal with .rej, so do not pretend
         * this is a git patch by saying --git or giving extended
index 393c10cbcf6315efb525b38db26e218bf6b1959d..ae723bc85e63365903861cfb816d79b425deaf14 100644 (file)
@@ -310,9 +310,9 @@ static void check_embedded_repo(const char *path)
        strbuf_strip_suffix(&name, "/");
 
        warning(_("adding embedded git repository: %s"), name.buf);
-       if (!adviced_on_embedded_repo &&
-           advice_enabled(ADVICE_ADD_EMBEDDED_REPO)) {
-               advise(embedded_advice, name.buf, name.buf);
+       if (!adviced_on_embedded_repo) {
+               advise_if_enabled(ADVICE_ADD_EMBEDDED_REPO,
+                                 embedded_advice, name.buf, name.buf);
                adviced_on_embedded_repo = 1;
        }
 
@@ -328,10 +328,8 @@ static int add_files(struct dir_struct *dir, int flags)
                fprintf(stderr, _(ignore_error));
                for (i = 0; i < dir->ignored_nr; i++)
                        fprintf(stderr, "%s\n", dir->ignored[i]->name);
-               if (advice_enabled(ADVICE_ADD_IGNORED_FILE))
-                       advise(_("Use -f if you really want to add them.\n"
-                               "Turn this message off by running\n"
-                               "\"git config advice.addIgnoredFile false\""));
+               advise_if_enabled(ADVICE_ADD_IGNORED_FILE,
+                                 _("Use -f if you really want to add them."));
                exit_status = 1;
        }
 
@@ -370,6 +368,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
        int add_new_files;
        int require_pathspec;
        char *seen = NULL;
+       char *ps_matched = NULL;
        struct lock_file lock_file = LOCK_INIT;
 
        git_config(add_config, NULL);
@@ -440,10 +439,8 @@ int cmd_add(int argc, const char **argv, const char *prefix)
 
        if (require_pathspec && pathspec.nr == 0) {
                fprintf(stderr, _("Nothing specified, nothing added.\n"));
-               if (advice_enabled(ADVICE_ADD_EMPTY_PATHSPEC))
-                       advise( _("Maybe you wanted to say 'git add .'?\n"
-                               "Turn this message off by running\n"
-                               "\"git config advice.addEmptyPathspec false\""));
+               advise_if_enabled(ADVICE_ADD_EMPTY_PATHSPEC,
+                                 _("Maybe you wanted to say 'git add .'?"));
                return 0;
        }
 
@@ -549,12 +546,17 @@ int cmd_add(int argc, const char **argv, const char *prefix)
 
        begin_odb_transaction();
 
+       ps_matched = xcalloc(pathspec.nr, 1);
        if (add_renormalize)
                exit_status |= renormalize_tracked_files(&pathspec, flags);
        else
                exit_status |= add_files_to_cache(the_repository, prefix,
-                                                 &pathspec, include_sparse,
-                                                 flags);
+                                                 &pathspec, ps_matched,
+                                                 include_sparse, flags);
+
+       if (take_worktree_changes && !add_renormalize && !ignore_add_errors &&
+           report_path_error(ps_matched, &pathspec))
+               exit(128);
 
        if (add_new_files)
                exit_status |= add_files(&dir, flags);
@@ -568,6 +570,7 @@ finish:
                               COMMIT_LOCK | SKIP_IF_UNCHANGED))
                die(_("unable to write new index file"));
 
+       free(ps_matched);
        dir_clear(&dir);
        clear_pathspec(&pathspec);
        return exit_status;
index 2b6166c284e0dda89b829c88313711d078bd2632..71e6036aab1427e69a7038195a01ded1edd0ae59 100644 (file)
@@ -882,7 +882,8 @@ static int merge_working_tree(const struct checkout_opts *opts,
                         * entries in the index.
                         */
 
-                       add_files_to_cache(the_repository, NULL, NULL, 0, 0);
+                       add_files_to_cache(the_repository, NULL, NULL, NULL, 0,
+                                          0);
                        init_merge_options(&o, the_repository);
                        o.verbosity = 0;
                        work = write_in_core_index_as_tree(the_repository);
@@ -1035,7 +1036,8 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
        remove_branch_state(the_repository, !opts->quiet);
        strbuf_release(&msg);
        if (!opts->quiet &&
-           (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+           !opts->force_detach &&
+           (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD")))
                report_tracking(new_branch_info);
 }
 
index 7ba7201cfb9160dc062e50f64b875e59d0405c6a..6e1484446b0cc2c98ccade5f8285ad81747c6416 100644 (file)
@@ -441,16 +441,21 @@ static const char *prepare_index(const char **argv, const char *prefix,
         * (B) on failure, rollback the real index.
         */
        if (all || (also && pathspec.nr)) {
+               char *ps_matched = xcalloc(pathspec.nr, 1);
                repo_hold_locked_index(the_repository, &index_lock,
                                       LOCK_DIE_ON_ERROR);
                add_files_to_cache(the_repository, also ? prefix : NULL,
-                                  &pathspec, 0, 0);
+                                  &pathspec, ps_matched, 0, 0);
+               if (!all && report_path_error(ps_matched, &pathspec))
+                       exit(128);
+
                refresh_cache_or_die(refresh_flags);
                cache_tree_update(&the_index, WRITE_TREE_SILENT);
                if (write_locked_index(&the_index, &index_lock, 0))
                        die(_("unable to write new index file"));
                commit_style = COMMIT_NORMAL;
                ret = get_lock_file_path(&index_lock);
+               free(ps_matched);
                goto out;
        }
 
index 3a6a750a8eb320bb3622184843ede3d2b9884385..17f929dede30d1bfda914beccdc579bfb3a9d756 100644 (file)
@@ -294,6 +294,8 @@ int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
        argc = parse_options(argc, argv, prefix, options, usage, 0);
        socket_path = argv[0];
 
+       if (!have_unix_sockets())
+               die(_("credential-cache--daemon unavailable; no unix socket support"));
        if (!socket_path)
                usage_with_options(usage, options);
 
index bba96d4ffd6f198adb186aaba0c853e34a93dd11..bef120b537533ca7c085d990da941817697f07dc 100644 (file)
@@ -149,6 +149,9 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix)
                usage_with_options(usage, options);
        op = argv[0];
 
+       if (!have_unix_sockets())
+               die(_("credential-cache unavailable; no unix socket support"));
+
        if (!socket_path)
                socket_path = get_socket_path();
        if (!socket_path)
index 46a793411a437969b53c4f14d941df27358d00ed..5857d860dbf64a7d3e32e7b5b6e4eaec6f07a6c3 100644 (file)
@@ -138,6 +138,7 @@ static int git_fetch_config(const char *k, const char *v,
                int r = git_config_bool(k, v) ?
                        RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
                fetch_config->recurse_submodules = r;
+               return 0;
        }
 
        if (!strcmp(k, "submodule.fetchjobs")) {
index 342907f7bdb5b825a8052065645c8c3c7b47670b..d187cec1ea7550aa237cc2d6c86975c865457a21 100644 (file)
@@ -180,13 +180,51 @@ static void gc_config(void)
        git_config(git_default_config, NULL);
 }
 
-struct maintenance_run_opts;
+enum schedule_priority {
+       SCHEDULE_NONE = 0,
+       SCHEDULE_WEEKLY = 1,
+       SCHEDULE_DAILY = 2,
+       SCHEDULE_HOURLY = 3,
+};
+
+static enum schedule_priority parse_schedule(const char *value)
+{
+       if (!value)
+               return SCHEDULE_NONE;
+       if (!strcasecmp(value, "hourly"))
+               return SCHEDULE_HOURLY;
+       if (!strcasecmp(value, "daily"))
+               return SCHEDULE_DAILY;
+       if (!strcasecmp(value, "weekly"))
+               return SCHEDULE_WEEKLY;
+       return SCHEDULE_NONE;
+}
+
+struct maintenance_run_opts {
+       int auto_flag;
+       int quiet;
+       enum schedule_priority schedule;
+};
+
+static int pack_refs_condition(void)
+{
+       /*
+        * The auto-repacking logic for refs is handled by the ref backends and
+        * exposed via `git pack-refs --auto`. We thus always return truish
+        * here and let the backend decide for us.
+        */
+       return 1;
+}
+
 static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts)
 {
        struct child_process cmd = CHILD_PROCESS_INIT;
 
        cmd.git_cmd = 1;
        strvec_pushl(&cmd.args, "pack-refs", "--all", "--prune", NULL);
+       if (opts->auto_flag)
+               strvec_push(&cmd.args, "--auto");
+
        return run_command(&cmd);
 }
 
@@ -547,7 +585,7 @@ done:
        return ret;
 }
 
-static void gc_before_repack(void)
+static void gc_before_repack(struct maintenance_run_opts *opts)
 {
        /*
         * We may be called twice, as both the pre- and
@@ -558,7 +596,7 @@ static void gc_before_repack(void)
        if (done++)
                return;
 
-       if (pack_refs && maintenance_task_pack_refs(NULL))
+       if (pack_refs && maintenance_task_pack_refs(opts))
                die(FAILED_RUN, "pack-refs");
 
        if (prune_reflogs) {
@@ -574,7 +612,6 @@ static void gc_before_repack(void)
 int cmd_gc(int argc, const char **argv, const char *prefix)
 {
        int aggressive = 0;
-       int auto_gc = 0;
        int quiet = 0;
        int force = 0;
        const char *name;
@@ -583,6 +620,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
        int keep_largest_pack = -1;
        timestamp_t dummy;
        struct child_process rerere_cmd = CHILD_PROCESS_INIT;
+       struct maintenance_run_opts opts = {0};
 
        struct option builtin_gc_options[] = {
                OPT__QUIET(&quiet, N_("suppress progress reporting")),
@@ -593,7 +631,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                OPT_MAGNITUDE(0, "max-cruft-size", &max_cruft_size,
                              N_("with --cruft, limit the size of new cruft packs")),
                OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
-               OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+               OPT_BOOL_F(0, "auto", &opts.auto_flag, N_("enable auto-gc mode"),
                           PARSE_OPT_NOCOMPLETE),
                OPT_BOOL_F(0, "force", &force,
                           N_("force running gc even if there may be another gc running"),
@@ -638,7 +676,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
        if (quiet)
                strvec_push(&repack, "-q");
 
-       if (auto_gc) {
+       if (opts.auto_flag) {
                /*
                 * Auto-gc should be least intrusive as possible.
                 */
@@ -663,7 +701,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
 
                        if (lock_repo_for_gc(force, &pid))
                                return 0;
-                       gc_before_repack(); /* dies on failure */
+                       gc_before_repack(&opts); /* dies on failure */
                        delete_tempfile(&pidfile);
 
                        /*
@@ -688,7 +726,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
 
        name = lock_repo_for_gc(force, &pid);
        if (name) {
-               if (auto_gc)
+               if (opts.auto_flag)
                        return 0; /* be quiet on --auto */
                die(_("gc is already running on machine '%s' pid %"PRIuMAX" (use --force if not)"),
                    name, (uintmax_t)pid);
@@ -703,7 +741,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                atexit(process_log_file_at_exit);
        }
 
-       gc_before_repack();
+       gc_before_repack(&opts);
 
        if (!repository_format_precious_objects) {
                struct child_process repack_cmd = CHILD_PROCESS_INIT;
@@ -758,7 +796,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                                             !quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
                                             NULL);
 
-       if (auto_gc && too_many_loose_objects())
+       if (opts.auto_flag && too_many_loose_objects())
                warning(_("There are too many unreachable loose objects; "
                        "run 'git prune' to remove them."));
 
@@ -773,26 +811,6 @@ static const char *const builtin_maintenance_run_usage[] = {
        NULL
 };
 
-enum schedule_priority {
-       SCHEDULE_NONE = 0,
-       SCHEDULE_WEEKLY = 1,
-       SCHEDULE_DAILY = 2,
-       SCHEDULE_HOURLY = 3,
-};
-
-static enum schedule_priority parse_schedule(const char *value)
-{
-       if (!value)
-               return SCHEDULE_NONE;
-       if (!strcasecmp(value, "hourly"))
-               return SCHEDULE_HOURLY;
-       if (!strcasecmp(value, "daily"))
-               return SCHEDULE_DAILY;
-       if (!strcasecmp(value, "weekly"))
-               return SCHEDULE_WEEKLY;
-       return SCHEDULE_NONE;
-}
-
 static int maintenance_opt_schedule(const struct option *opt, const char *arg,
                                    int unset)
 {
@@ -809,12 +827,6 @@ static int maintenance_opt_schedule(const struct option *opt, const char *arg,
        return 0;
 }
 
-struct maintenance_run_opts {
-       int auto_flag;
-       int quiet;
-       enum schedule_priority schedule;
-};
-
 /* Remember to update object flag allocation in object.h */
 #define SEEN           (1u<<0)
 
@@ -1296,7 +1308,7 @@ static struct maintenance_task tasks[] = {
        [TASK_PACK_REFS] = {
                "pack-refs",
                maintenance_task_pack_refs,
-               NULL,
+               pack_refs_condition,
        },
 };
 
index 05d0cad55438a90b72c8e001dfe71b05ad8203d3..8bdb439131499a57751ee87569028389dec82179 100644 (file)
@@ -563,7 +563,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
                           PARSE_OPT_NONEG),
                OPT_STRING(0, "merge-base",
                           &merge_base,
-                          N_("commit"),
+                          N_("tree-ish"),
                           N_("specify a merge-base for the merge")),
                OPT_STRVEC('X', "strategy-option", &xopts, N_("option=value"),
                        N_("option for selected merge strategy")),
index bcf383cac9dd875354d3c91152f7b8d635f82ff4..db4082566634cbf5bdbb081d0807b0a82be7645e 100644 (file)
@@ -7,24 +7,28 @@
 #include "revision.h"
 
 static char const * const pack_refs_usage[] = {
-       N_("git pack-refs [--all] [--no-prune] [--include <pattern>] [--exclude <pattern>]"),
+       N_("git pack-refs [--all] [--no-prune] [--auto] [--include <pattern>] [--exclude <pattern>]"),
        NULL
 };
 
 int cmd_pack_refs(int argc, const char **argv, const char *prefix)
 {
-       unsigned int flags = PACK_REFS_PRUNE;
-       static struct ref_exclusions excludes = REF_EXCLUSIONS_INIT;
-       static struct string_list included_refs = STRING_LIST_INIT_NODUP;
-       struct pack_refs_opts pack_refs_opts = { .exclusions = &excludes,
-                                                .includes = &included_refs,
-                                                .flags = flags };
-       static struct string_list option_excluded_refs = STRING_LIST_INIT_NODUP;
+       struct ref_exclusions excludes = REF_EXCLUSIONS_INIT;
+       struct string_list included_refs = STRING_LIST_INIT_NODUP;
+       struct pack_refs_opts pack_refs_opts = {
+               .exclusions = &excludes,
+               .includes = &included_refs,
+               .flags = PACK_REFS_PRUNE,
+       };
+       struct string_list option_excluded_refs = STRING_LIST_INIT_NODUP;
        struct string_list_item *item;
+       int pack_all = 0;
+       int ret;
 
        struct option opts[] = {
-               OPT_BIT(0, "all",   &pack_refs_opts.flags, N_("pack everything"), PACK_REFS_ALL),
+               OPT_BOOL(0, "all",   &pack_all, N_("pack everything")),
                OPT_BIT(0, "prune", &pack_refs_opts.flags, N_("prune loose refs (default)"), PACK_REFS_PRUNE),
+               OPT_BIT(0, "auto", &pack_refs_opts.flags, N_("auto-pack refs as needed"), PACK_REFS_AUTO),
                OPT_STRING_LIST(0, "include", pack_refs_opts.includes, N_("pattern"),
                        N_("references to include")),
                OPT_STRING_LIST(0, "exclude", &option_excluded_refs, N_("pattern"),
@@ -38,11 +42,16 @@ int cmd_pack_refs(int argc, const char **argv, const char *prefix)
        for_each_string_list_item(item, &option_excluded_refs)
                add_ref_exclusion(pack_refs_opts.exclusions, item->string);
 
-       if (pack_refs_opts.flags & PACK_REFS_ALL)
+       if (pack_all)
                string_list_append(pack_refs_opts.includes, "*");
 
        if (!pack_refs_opts.includes->nr)
                string_list_append(pack_refs_opts.includes, "refs/tags/*");
 
-       return refs_pack_refs(get_main_ref_store(the_repository), &pack_refs_opts);
+       ret = refs_pack_refs(get_main_ref_store(the_repository), &pack_refs_opts);
+
+       clear_ref_exclusions(&excludes);
+       string_list_clear(&included_refs, 0);
+       string_list_clear(&option_excluded_refs, 0);
+       return ret;
 }
index 61338a01ecfc0ee4f029215149651bb9ac03da1e..e46afbc46d950df6f98dccc1908602470976618c 100644 (file)
@@ -9,8 +9,8 @@
 #include "repository.h"
 
 static const char * const git_update_ref_usage[] = {
-       N_("git update-ref [<options>] -d <refname> [<old-val>]"),
-       N_("git update-ref [<options>]    <refname> <new-val> [<old-val>]"),
+       N_("git update-ref [<options>] -d <refname> [<old-oid>]"),
+       N_("git update-ref [<options>]    <refname> <new-oid> [<old-oid>]"),
        N_("git update-ref [<options>] --stdin [-z]"),
        NULL
 };
@@ -77,14 +77,14 @@ static char *parse_refname(const char **next)
 }
 
 /*
- * The value being parsed is <oldvalue> (as opposed to <newvalue>; the
+ * The value being parsed is <old-oid> (as opposed to <new-oid>; the
  * difference affects which error messages are generated):
  */
 #define PARSE_SHA1_OLD 0x01
 
 /*
  * For backwards compatibility, accept an empty string for update's
- * <newvalue> in binary mode to be equivalent to specifying zeros.
+ * <new-oid> in binary mode to be equivalent to specifying zeros.
  */
 #define PARSE_SHA1_ALLOW_EMPTY 0x02
 
@@ -140,7 +140,7 @@ static int parse_next_oid(const char **next, const char *end,
                                goto invalid;
                } else if (flags & PARSE_SHA1_ALLOW_EMPTY) {
                        /* With -z, treat an empty value as all zeros: */
-                       warning("%s %s: missing <newvalue>, treating as zero",
+                       warning("%s %s: missing <new-oid>, treating as zero",
                                command, refname);
                        oidclr(oid);
                } else {
@@ -158,14 +158,14 @@ static int parse_next_oid(const char **next, const char *end,
 
  invalid:
        die(flags & PARSE_SHA1_OLD ?
-           "%s %s: invalid <oldvalue>: %s" :
-           "%s %s: invalid <newvalue>: %s",
+           "%s %s: invalid <old-oid>: %s" :
+           "%s %s: invalid <new-oid>: %s",
            command, refname, arg.buf);
 
  eof:
        die(flags & PARSE_SHA1_OLD ?
-           "%s %s: unexpected end of input when reading <oldvalue>" :
-           "%s %s: unexpected end of input when reading <newvalue>",
+           "%s %s: unexpected end of input when reading <old-oid>" :
+           "%s %s: unexpected end of input when reading <new-oid>",
            command, refname);
 }
 
@@ -194,7 +194,7 @@ static void parse_cmd_update(struct ref_transaction *transaction,
 
        if (parse_next_oid(&next, end, &new_oid, "update", refname,
                           PARSE_SHA1_ALLOW_EMPTY))
-               die("update %s: missing <newvalue>", refname);
+               die("update %s: missing <new-oid>", refname);
 
        have_old = !parse_next_oid(&next, end, &old_oid, "update", refname,
                                   PARSE_SHA1_OLD);
@@ -225,10 +225,10 @@ static void parse_cmd_create(struct ref_transaction *transaction,
                die("create: missing <ref>");
 
        if (parse_next_oid(&next, end, &new_oid, "create", refname, 0))
-               die("create %s: missing <newvalue>", refname);
+               die("create %s: missing <new-oid>", refname);
 
        if (is_null_oid(&new_oid))
-               die("create %s: zero <newvalue>", refname);
+               die("create %s: zero <new-oid>", refname);
 
        if (*next != line_termination)
                die("create %s: extra input: %s", refname, next);
@@ -260,7 +260,7 @@ static void parse_cmd_delete(struct ref_transaction *transaction,
                have_old = 0;
        } else {
                if (is_null_oid(&old_oid))
-                       die("delete %s: zero <oldvalue>", refname);
+                       die("delete %s: zero <old-oid>", refname);
                have_old = 1;
        }
 
index 320fb99a90e1db6006135e9798f7300f6fa29798..4876344b5b8009794eb9cefcbff342b1cc4f90a2 100644 (file)
@@ -3158,3 +3158,22 @@ int uname(struct utsname *buf)
                  "%u", (v >> 16) & 0x7fff);
        return 0;
 }
+
+int mingw_have_unix_sockets(void)
+{
+       SC_HANDLE scm, srvc;
+       SERVICE_STATUS_PROCESS status;
+       DWORD bytes;
+       int ret = 0;
+       scm = OpenSCManagerA(NULL, NULL, SC_MANAGER_CONNECT);
+       if (scm) {
+               srvc = OpenServiceA(scm, "afunix", SERVICE_QUERY_STATUS);
+               if (srvc) {
+                       if(QueryServiceStatusEx(srvc, SC_STATUS_PROCESS_INFO, (LPBYTE)&status, sizeof(SERVICE_STATUS_PROCESS), &bytes))
+                               ret = status.dwCurrentState == SERVICE_RUNNING;
+                       CloseServiceHandle(srvc);
+               }
+               CloseServiceHandle(scm);
+       }
+       return ret;
+}
index 6aec50e4124e145d6d43f584418288d3fc29f481..27b61284f46be61ec7baefa2e19328d58397d1f9 100644 (file)
@@ -631,3 +631,9 @@ void open_in_gdb(void);
  * Used by Pthread API implementation for Windows
  */
 int err_win_to_posix(DWORD winerr);
+
+#ifndef NO_UNIX_SOCKETS
+int mingw_have_unix_sockets(void);
+#undef have_unix_sockets
+#define have_unix_sockets mingw_have_unix_sockets
+#endif
index eebce8c7e09393fa8756230e483965674b9afbdf..ae3652b08fa6f36af6c085e36b89efdd46d72389 100644 (file)
--- a/config.c
+++ b/config.c
@@ -1584,8 +1584,10 @@ static int git_default_core_config(const char *var, const char *value,
        if (!strcmp(var, "core.askpass"))
                return git_config_string(&askpass_program, var, value);
 
-       if (!strcmp(var, "core.excludesfile"))
+       if (!strcmp(var, "core.excludesfile")) {
+               free((char *)excludes_file);
                return git_config_pathname(&excludes_file, var, value);
+       }
 
        if (!strcmp(var, "core.whitespace")) {
                if (!value)
index d0dcca2ec554cddf61447215357a7e15877b0aca..fcf3e2d785a04d765a495b67e24515755ef81c8e 100644 (file)
@@ -447,7 +447,6 @@ ifeq ($(uname_S),Windows)
        NO_POLL = YesPlease
        NO_SYMLINK_HEAD = YesPlease
        NO_IPV6 = YesPlease
-       NO_UNIX_SOCKETS = YesPlease
        NO_SETENV = YesPlease
        NO_STRCASESTR = YesPlease
        NO_STRLCPY = YesPlease
@@ -661,7 +660,6 @@ ifeq ($(uname_S),MINGW)
        NO_LIBGEN_H = YesPlease
        NO_POLL = YesPlease
        NO_SYMLINK_HEAD = YesPlease
-       NO_UNIX_SOCKETS = YesPlease
        NO_SETENV = YesPlease
        NO_STRCASESTR = YesPlease
        NO_STRLCPY = YesPlease
index 71f179cba3fbda3bc93de461649cf75bb08c6653..5330e769a72a86df6c4d2e47d796bc4e0cb90773 100644 (file)
@@ -141,7 +141,7 @@ __git_ps1_show_upstream ()
 
        # parse configuration values
        local option
-       for option in ${GIT_PS1_SHOWUPSTREAM}; do
+       for option in ${GIT_PS1_SHOWUPSTREAM-}; do
                case "$option" in
                git|svn) upstream_type="$option" ;;
                verbose) verbose=1 ;;
@@ -528,7 +528,7 @@ __git_ps1 ()
        fi
 
        local conflict="" # state indicator for unresolved conflicts
-       if [[ "${GIT_PS1_SHOWCONFLICTSTATE}" == "yes" ]] &&
+       if [[ "${GIT_PS1_SHOWCONFLICTSTATE-}" == "yes" ]] &&
           [[ $(git ls-files --unmerged 2>/dev/null) ]]; then
                conflict="|CONFLICT"
        fi
index 521d303722595719c79d0ed6a11612327c2d0972..f2d61bb0e6a7b70e7c1d9f13ab77681c3ffff639 100755 (executable)
@@ -92,7 +92,6 @@ cat >.vscode/settings.json.new <<\EOF ||
         "isexe",
         "iskeychar",
         "kompare",
-        "mksnpath",
         "mktag",
         "mktree",
         "mmblob",
index 1cd790a4d2bef6a3df8a6eb080622ccaa6749fa3..683f11e50953a659dcdb075ce84eb92a19064077 100644 (file)
@@ -127,7 +127,16 @@ void run_diff_files(struct rev_info *revs, unsigned int option)
                if (diff_can_quit_early(&revs->diffopt))
                        break;
 
-               if (!ce_path_match(istate, ce, &revs->prune_data, NULL))
+               /*
+                * NEEDSWORK:
+                * Here we filter with pathspec but the result is further
+                * filtered out when --relative is in effect.  To end-users,
+                * a pathspec element that matched only to paths outside the
+                * current directory is like not matching anything at all;
+                * the handling of ps_matched[] here may become problematic
+                * if/when we add the "--error-unmatch" option to "git diff".
+                */
+               if (!ce_path_match(istate, ce, &revs->prune_data, revs->ps_matched))
                        continue;
 
                if (revs->diffopt.prefix &&
index 7c2a6538e5afea607f3d9a1c09cc6aea5539d8de..044f87454a291b116a436276ac812ac6fd5983e9 100644 (file)
@@ -218,6 +218,18 @@ struct strbuf;
 #define GIT_WINDOWS_NATIVE
 #endif
 
+#if defined(NO_UNIX_SOCKETS) || !defined(GIT_WINDOWS_NATIVE)
+static inline int _have_unix_sockets(void)
+{
+#if defined(NO_UNIX_SOCKETS)
+       return 0;
+#else
+       return 1;
+#endif
+}
+#define have_unix_sockets _have_unix_sockets
+#endif
+
 #include <unistd.h>
 #include <stdio.h>
 #include <sys/stat.h>
index fd96b3cdffdb6cd11b429bcada40663e80dbebc9..e1d0bdd273501f98a186961f6eb00aa537953052 100644 (file)
 #define GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS
 #endif
 
+/**
+ * Versions before curl 7.66.0 (September 2019) required manually setting the
+ * transfer-encoding for a streaming POST; after that this is handled
+ * automatically.
+ */
+#if LIBCURL_VERSION_NUM < 0x074200
+#define GIT_CURL_NEED_TRANSFER_ENCODING_HEADER
+#endif
+
 /**
  * CURLOPT_PROTOCOLS_STR and CURLOPT_REDIR_PROTOCOLS_STR were added in 7.85.0,
  * released in August 2022.
diff --git a/http.c b/http.c
index e73b136e5897bd8fce2f874b20f316141ba59c31..3d80bd6116e9e44a061610e33359c8e74da75058 100644 (file)
--- a/http.c
+++ b/http.c
@@ -1452,6 +1452,7 @@ struct active_request_slot *get_active_slot(void)
        curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, NULL);
        curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, NULL);
        curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, NULL);
+       curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE, -1L);
        curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 0);
        curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1);
        curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 1);
index f2e1947e63815c80e45f648f0ce37cd6341f825b..4caa8668e6ccb410ebf7eabd4245c00ac7fff0aa 100644 (file)
@@ -68,9 +68,6 @@ static void imap_warn(const char *, ...);
 
 static char *next_arg(char **);
 
-__attribute__((format (printf, 3, 4)))
-static int nfsnprintf(char *buf, int blen, const char *fmt, ...);
-
 static int nfvasprintf(char **strp, const char *fmt, va_list ap)
 {
        int len;
@@ -500,19 +497,6 @@ static char *next_arg(char **s)
        return ret;
 }
 
-__attribute__((format (printf, 3, 4)))
-static int nfsnprintf(char *buf, int blen, const char *fmt, ...)
-{
-       int ret;
-       va_list va;
-
-       va_start(va, fmt);
-       if (blen <= 0 || (unsigned)(ret = vsnprintf(buf, blen, fmt, va)) >= (unsigned)blen)
-               BUG("buffer too small. Please report a bug.");
-       va_end(va);
-       return ret;
-}
-
 static struct imap_cmd *issue_imap_cmd(struct imap_store *ctx,
                                       struct imap_cmd_cb *cb,
                                       const char *fmt, va_list ap)
@@ -535,11 +519,11 @@ static struct imap_cmd *issue_imap_cmd(struct imap_store *ctx,
                get_cmd_result(ctx, NULL);
 
        if (!cmd->cb.data)
-               bufl = nfsnprintf(buf, sizeof(buf), "%d %s\r\n", cmd->tag, cmd->cmd);
+               bufl = xsnprintf(buf, sizeof(buf), "%d %s\r\n", cmd->tag, cmd->cmd);
        else
-               bufl = nfsnprintf(buf, sizeof(buf), "%d %s{%d%s}\r\n",
-                                 cmd->tag, cmd->cmd, cmd->cb.dlen,
-                                 CAP(LITERALPLUS) ? "+" : "");
+               bufl = xsnprintf(buf, sizeof(buf), "%d %s{%d%s}\r\n",
+                                cmd->tag, cmd->cmd, cmd->cb.dlen,
+                                CAP(LITERALPLUS) ? "+" : "");
 
        if (0 < verbosity) {
                if (imap->num_in_progress)
index 2078c22b097a61c97faddb6689a75e6c10ccb9ee..3065b12b237a9d9e2314e08306222d33c6f137dd 100644 (file)
@@ -115,6 +115,7 @@ static char *mem_pool_strvfmt(struct mem_pool *pool, const char *fmt,
        size_t available = block ? block->end - block->next_free : 0;
        va_list cp;
        int len, len2;
+       size_t size;
        char *ret;
 
        va_copy(cp, ap);
@@ -123,13 +124,14 @@ static char *mem_pool_strvfmt(struct mem_pool *pool, const char *fmt,
        if (len < 0)
                BUG("your vsnprintf is broken (returned %d)", len);
 
-       ret = mem_pool_alloc(pool, len + 1);  /* 1 for NUL */
+       size = st_add(len, 1); /* 1 for NUL */
+       ret = mem_pool_alloc(pool, size);
 
        /* Shortcut; relies on mem_pool_alloc() not touching buffer contents. */
        if (ret == next_free)
                return ret;
 
-       len2 = vsnprintf(ret, len + 1, fmt, ap);
+       len2 = vsnprintf(ret, size, fmt, ap);
        if (len2 != len)
                BUG("your vsnprintf is broken (returns inconsistent lengths)");
        return ret;
diff --git a/midx-write.c b/midx-write.c
new file mode 100644 (file)
index 0000000..65e69d2
--- /dev/null
@@ -0,0 +1,1525 @@
+#include "git-compat-util.h"
+#include "abspath.h"
+#include "config.h"
+#include "hex.h"
+#include "lockfile.h"
+#include "packfile.h"
+#include "object-file.h"
+#include "hash-lookup.h"
+#include "midx.h"
+#include "progress.h"
+#include "trace2.h"
+#include "run-command.h"
+#include "chunk-format.h"
+#include "pack-bitmap.h"
+#include "refs.h"
+#include "revision.h"
+#include "list-objects.h"
+
+#define PACK_EXPIRED UINT_MAX
+#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
+#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
+#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
+
+extern int midx_checksum_valid(struct multi_pack_index *m);
+extern void clear_midx_files_ext(const char *object_dir, const char *ext,
+                                unsigned char *keep_hash);
+extern int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+                               const char *idx_name);
+
+static size_t write_midx_header(struct hashfile *f,
+                               unsigned char num_chunks,
+                               uint32_t num_packs)
+{
+       hashwrite_be32(f, MIDX_SIGNATURE);
+       hashwrite_u8(f, MIDX_VERSION);
+       hashwrite_u8(f, oid_version(the_hash_algo));
+       hashwrite_u8(f, num_chunks);
+       hashwrite_u8(f, 0); /* unused */
+       hashwrite_be32(f, num_packs);
+
+       return MIDX_HEADER_SIZE;
+}
+
+struct pack_info {
+       uint32_t orig_pack_int_id;
+       char *pack_name;
+       struct packed_git *p;
+
+       uint32_t bitmap_pos;
+       uint32_t bitmap_nr;
+
+       unsigned expired : 1;
+};
+
+static void fill_pack_info(struct pack_info *info,
+                          struct packed_git *p, const char *pack_name,
+                          uint32_t orig_pack_int_id)
+{
+       memset(info, 0, sizeof(struct pack_info));
+
+       info->orig_pack_int_id = orig_pack_int_id;
+       info->pack_name = xstrdup(pack_name);
+       info->p = p;
+       info->bitmap_pos = BITMAP_POS_UNKNOWN;
+}
+
+static int pack_info_compare(const void *_a, const void *_b)
+{
+       struct pack_info *a = (struct pack_info *)_a;
+       struct pack_info *b = (struct pack_info *)_b;
+       return strcmp(a->pack_name, b->pack_name);
+}
+
+static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+{
+       const char *pack_name = _va;
+       const struct pack_info *compar = _vb;
+
+       return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+}
+
+struct write_midx_context {
+       struct pack_info *info;
+       size_t nr;
+       size_t alloc;
+       struct multi_pack_index *m;
+       struct progress *progress;
+       unsigned pack_paths_checked;
+
+       struct pack_midx_entry *entries;
+       size_t entries_nr;
+
+       uint32_t *pack_perm;
+       uint32_t *pack_order;
+       unsigned large_offsets_needed:1;
+       uint32_t num_large_offsets;
+
+       int preferred_pack_idx;
+
+       struct string_list *to_include;
+};
+
+static void add_pack_to_midx(const char *full_path, size_t full_path_len,
+                            const char *file_name, void *data)
+{
+       struct write_midx_context *ctx = data;
+       struct packed_git *p;
+
+       if (ends_with(file_name, ".idx")) {
+               display_progress(ctx->progress, ++ctx->pack_paths_checked);
+               /*
+                * Note that at most one of ctx->m and ctx->to_include are set,
+                * so we are testing midx_contains_pack() and
+                * string_list_has_string() independently (guarded by the
+                * appropriate NULL checks).
+                *
+                * We could support passing to_include while reusing an existing
+                * MIDX, but don't currently since the reuse process drags
+                * forward all packs from an existing MIDX (without checking
+                * whether or not they appear in the to_include list).
+                *
+                * If we added support for that, these next two conditional
+                * should be performed independently (likely checking
+                * to_include before the existing MIDX).
+                */
+               if (ctx->m && midx_contains_pack(ctx->m, file_name))
+                       return;
+               else if (ctx->to_include &&
+                        !string_list_has_string(ctx->to_include, file_name))
+                       return;
+
+               ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
+
+               p = add_packed_git(full_path, full_path_len, 0);
+               if (!p) {
+                       warning(_("failed to add packfile '%s'"),
+                               full_path);
+                       return;
+               }
+
+               if (open_pack_index(p)) {
+                       warning(_("failed to open pack-index '%s'"),
+                               full_path);
+                       close_pack(p);
+                       free(p);
+                       return;
+               }
+
+               fill_pack_info(&ctx->info[ctx->nr], p, file_name, ctx->nr);
+               ctx->nr++;
+       }
+}
+
+struct pack_midx_entry {
+       struct object_id oid;
+       uint32_t pack_int_id;
+       time_t pack_mtime;
+       uint64_t offset;
+       unsigned preferred : 1;
+};
+
+static int midx_oid_compare(const void *_a, const void *_b)
+{
+       const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
+       const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
+       int cmp = oidcmp(&a->oid, &b->oid);
+
+       if (cmp)
+               return cmp;
+
+       /* Sort objects in a preferred pack first when multiple copies exist. */
+       if (a->preferred > b->preferred)
+               return -1;
+       if (a->preferred < b->preferred)
+               return 1;
+
+       if (a->pack_mtime > b->pack_mtime)
+               return -1;
+       else if (a->pack_mtime < b->pack_mtime)
+               return 1;
+
+       return a->pack_int_id - b->pack_int_id;
+}
+
+static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
+                                     struct pack_midx_entry *e,
+                                     uint32_t pos)
+{
+       if (pos >= m->num_objects)
+               return 1;
+
+       nth_midxed_object_oid(&e->oid, m, pos);
+       e->pack_int_id = nth_midxed_pack_int_id(m, pos);
+       e->offset = nth_midxed_offset(m, pos);
+
+       /* consider objects in midx to be from "old" packs */
+       e->pack_mtime = 0;
+       return 0;
+}
+
+static void fill_pack_entry(uint32_t pack_int_id,
+                           struct packed_git *p,
+                           uint32_t cur_object,
+                           struct pack_midx_entry *entry,
+                           int preferred)
+{
+       if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
+               die(_("failed to locate object %d in packfile"), cur_object);
+
+       entry->pack_int_id = pack_int_id;
+       entry->pack_mtime = p->mtime;
+
+       entry->offset = nth_packed_object_offset(p, cur_object);
+       entry->preferred = !!preferred;
+}
+
+struct midx_fanout {
+       struct pack_midx_entry *entries;
+       size_t nr, alloc;
+};
+
+static void midx_fanout_grow(struct midx_fanout *fanout, size_t nr)
+{
+       if (nr < fanout->nr)
+               BUG("negative growth in midx_fanout_grow() (%"PRIuMAX" < %"PRIuMAX")",
+                   (uintmax_t)nr, (uintmax_t)fanout->nr);
+       ALLOC_GROW(fanout->entries, nr, fanout->alloc);
+}
+
+static void midx_fanout_sort(struct midx_fanout *fanout)
+{
+       QSORT(fanout->entries, fanout->nr, midx_oid_compare);
+}
+
+static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
+                                       struct multi_pack_index *m,
+                                       uint32_t cur_fanout,
+                                       int preferred_pack)
+{
+       uint32_t start = 0, end;
+       uint32_t cur_object;
+
+       if (cur_fanout)
+               start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
+       end = ntohl(m->chunk_oid_fanout[cur_fanout]);
+
+       for (cur_object = start; cur_object < end; cur_object++) {
+               if ((preferred_pack > -1) &&
+                   (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
+                       /*
+                        * Objects from preferred packs are added
+                        * separately.
+                        */
+                       continue;
+               }
+
+               midx_fanout_grow(fanout, fanout->nr + 1);
+               nth_midxed_pack_midx_entry(m,
+                                          &fanout->entries[fanout->nr],
+                                          cur_object);
+               fanout->entries[fanout->nr].preferred = 0;
+               fanout->nr++;
+       }
+}
+
+static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
+                                       struct pack_info *info,
+                                       uint32_t cur_pack,
+                                       int preferred,
+                                       uint32_t cur_fanout)
+{
+       struct packed_git *pack = info[cur_pack].p;
+       uint32_t start = 0, end;
+       uint32_t cur_object;
+
+       if (cur_fanout)
+               start = get_pack_fanout(pack, cur_fanout - 1);
+       end = get_pack_fanout(pack, cur_fanout);
+
+       for (cur_object = start; cur_object < end; cur_object++) {
+               midx_fanout_grow(fanout, fanout->nr + 1);
+               fill_pack_entry(cur_pack,
+                               info[cur_pack].p,
+                               cur_object,
+                               &fanout->entries[fanout->nr],
+                               preferred);
+               fanout->nr++;
+       }
+}
+
+/*
+ * It is possible to artificially get into a state where there are many
+ * duplicate copies of objects. That can create high memory pressure if
+ * we are to create a list of all objects before de-duplication. To reduce
+ * this memory pressure without a significant performance drop, automatically
+ * group objects by the first byte of their object id. Use the IDX fanout
+ * tables to group the data, copy to a local array, then sort.
+ *
+ * Copy only the de-duplicated entries (selected by most-recent modified time
+ * of a packfile containing the object).
+ */
+static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
+                                                 struct pack_info *info,
+                                                 uint32_t nr_packs,
+                                                 size_t *nr_objects,
+                                                 int preferred_pack)
+{
+       uint32_t cur_fanout, cur_pack, cur_object;
+       size_t alloc_objects, total_objects = 0;
+       struct midx_fanout fanout = { 0 };
+       struct pack_midx_entry *deduplicated_entries = NULL;
+       uint32_t start_pack = m ? m->num_packs : 0;
+
+       for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
+               total_objects = st_add(total_objects,
+                                      info[cur_pack].p->num_objects);
+
+       /*
+        * As we de-duplicate by fanout value, we expect the fanout
+        * slices to be evenly distributed, with some noise. Hence,
+        * allocate slightly more than one 256th.
+        */
+       alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16;
+
+       ALLOC_ARRAY(fanout.entries, fanout.alloc);
+       ALLOC_ARRAY(deduplicated_entries, alloc_objects);
+       *nr_objects = 0;
+
+       for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
+               fanout.nr = 0;
+
+               if (m)
+                       midx_fanout_add_midx_fanout(&fanout, m, cur_fanout,
+                                                   preferred_pack);
+
+               for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
+                       int preferred = cur_pack == preferred_pack;
+                       midx_fanout_add_pack_fanout(&fanout,
+                                                   info, cur_pack,
+                                                   preferred, cur_fanout);
+               }
+
+               if (-1 < preferred_pack && preferred_pack < start_pack)
+                       midx_fanout_add_pack_fanout(&fanout, info,
+                                                   preferred_pack, 1,
+                                                   cur_fanout);
+
+               midx_fanout_sort(&fanout);
+
+               /*
+                * The batch is now sorted by OID and then mtime (descending).
+                * Take only the first duplicate.
+                */
+               for (cur_object = 0; cur_object < fanout.nr; cur_object++) {
+                       if (cur_object && oideq(&fanout.entries[cur_object - 1].oid,
+                                               &fanout.entries[cur_object].oid))
+                               continue;
+
+                       ALLOC_GROW(deduplicated_entries, st_add(*nr_objects, 1),
+                                  alloc_objects);
+                       memcpy(&deduplicated_entries[*nr_objects],
+                              &fanout.entries[cur_object],
+                              sizeof(struct pack_midx_entry));
+                       (*nr_objects)++;
+               }
+       }
+
+       free(fanout.entries);
+       return deduplicated_entries;
+}
+
+static int write_midx_pack_names(struct hashfile *f, void *data)
+{
+       struct write_midx_context *ctx = data;
+       uint32_t i;
+       unsigned char padding[MIDX_CHUNK_ALIGNMENT];
+       size_t written = 0;
+
+       for (i = 0; i < ctx->nr; i++) {
+               size_t writelen;
+
+               if (ctx->info[i].expired)
+                       continue;
+
+               if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
+                       BUG("incorrect pack-file order: %s before %s",
+                           ctx->info[i - 1].pack_name,
+                           ctx->info[i].pack_name);
+
+               writelen = strlen(ctx->info[i].pack_name) + 1;
+               hashwrite(f, ctx->info[i].pack_name, writelen);
+               written += writelen;
+       }
+
+       /* add padding to be aligned */
+       i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
+       if (i < MIDX_CHUNK_ALIGNMENT) {
+               memset(padding, 0, sizeof(padding));
+               hashwrite(f, padding, i);
+       }
+
+       return 0;
+}
+
+static int write_midx_bitmapped_packs(struct hashfile *f, void *data)
+{
+       struct write_midx_context *ctx = data;
+       size_t i;
+
+       for (i = 0; i < ctx->nr; i++) {
+               struct pack_info *pack = &ctx->info[i];
+               if (pack->expired)
+                       continue;
+
+               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN && pack->bitmap_nr)
+                       BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
+                           pack->pack_name, pack->bitmap_nr);
+
+               hashwrite_be32(f, pack->bitmap_pos);
+               hashwrite_be32(f, pack->bitmap_nr);
+       }
+       return 0;
+}
+
+static int write_midx_oid_fanout(struct hashfile *f,
+                                void *data)
+{
+       struct write_midx_context *ctx = data;
+       struct pack_midx_entry *list = ctx->entries;
+       struct pack_midx_entry *last = ctx->entries + ctx->entries_nr;
+       uint32_t count = 0;
+       uint32_t i;
+
+       /*
+       * Write the first-level table (the list is sorted,
+       * but we use a 256-entry lookup to be able to avoid
+       * having to do eight extra binary search iterations).
+       */
+       for (i = 0; i < 256; i++) {
+               struct pack_midx_entry *next = list;
+
+               while (next < last && next->oid.hash[0] == i) {
+                       count++;
+                       next++;
+               }
+
+               hashwrite_be32(f, count);
+               list = next;
+       }
+
+       return 0;
+}
+
+static int write_midx_oid_lookup(struct hashfile *f,
+                                void *data)
+{
+       struct write_midx_context *ctx = data;
+       unsigned char hash_len = the_hash_algo->rawsz;
+       struct pack_midx_entry *list = ctx->entries;
+       uint32_t i;
+
+       for (i = 0; i < ctx->entries_nr; i++) {
+               struct pack_midx_entry *obj = list++;
+
+               if (i < ctx->entries_nr - 1) {
+                       struct pack_midx_entry *next = list;
+                       if (oidcmp(&obj->oid, &next->oid) >= 0)
+                               BUG("OIDs not in order: %s >= %s",
+                                   oid_to_hex(&obj->oid),
+                                   oid_to_hex(&next->oid));
+               }
+
+               hashwrite(f, obj->oid.hash, (int)hash_len);
+       }
+
+       return 0;
+}
+
+static int write_midx_object_offsets(struct hashfile *f,
+                                    void *data)
+{
+       struct write_midx_context *ctx = data;
+       struct pack_midx_entry *list = ctx->entries;
+       uint32_t i, nr_large_offset = 0;
+
+       for (i = 0; i < ctx->entries_nr; i++) {
+               struct pack_midx_entry *obj = list++;
+
+               if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
+                       BUG("object %s is in an expired pack with int-id %d",
+                           oid_to_hex(&obj->oid),
+                           obj->pack_int_id);
+
+               hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
+
+               if (ctx->large_offsets_needed && obj->offset >> 31)
+                       hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
+               else if (!ctx->large_offsets_needed && obj->offset >> 32)
+                       BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
+                           oid_to_hex(&obj->oid),
+                           obj->offset);
+               else
+                       hashwrite_be32(f, (uint32_t)obj->offset);
+       }
+
+       return 0;
+}
+
+static int write_midx_large_offsets(struct hashfile *f,
+                                   void *data)
+{
+       struct write_midx_context *ctx = data;
+       struct pack_midx_entry *list = ctx->entries;
+       struct pack_midx_entry *end = ctx->entries + ctx->entries_nr;
+       uint32_t nr_large_offset = ctx->num_large_offsets;
+
+       while (nr_large_offset) {
+               struct pack_midx_entry *obj;
+               uint64_t offset;
+
+               if (list >= end)
+                       BUG("too many large-offset objects");
+
+               obj = list++;
+               offset = obj->offset;
+
+               if (!(offset >> 31))
+                       continue;
+
+               hashwrite_be64(f, offset);
+
+               nr_large_offset--;
+       }
+
+       return 0;
+}
+
+static int write_midx_revindex(struct hashfile *f,
+                              void *data)
+{
+       struct write_midx_context *ctx = data;
+       uint32_t i;
+
+       for (i = 0; i < ctx->entries_nr; i++)
+               hashwrite_be32(f, ctx->pack_order[i]);
+
+       return 0;
+}
+
+struct midx_pack_order_data {
+       uint32_t nr;
+       uint32_t pack;
+       off_t offset;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+       const struct midx_pack_order_data *a = va, *b = vb;
+       if (a->pack < b->pack)
+               return -1;
+       else if (a->pack > b->pack)
+               return 1;
+       else if (a->offset < b->offset)
+               return -1;
+       else if (a->offset > b->offset)
+               return 1;
+       else
+               return 0;
+}
+
+static uint32_t *midx_pack_order(struct write_midx_context *ctx)
+{
+       struct midx_pack_order_data *data;
+       uint32_t *pack_order;
+       uint32_t i;
+
+       trace2_region_enter("midx", "midx_pack_order", the_repository);
+
+       ALLOC_ARRAY(data, ctx->entries_nr);
+       for (i = 0; i < ctx->entries_nr; i++) {
+               struct pack_midx_entry *e = &ctx->entries[i];
+               data[i].nr = i;
+               data[i].pack = ctx->pack_perm[e->pack_int_id];
+               if (!e->preferred)
+                       data[i].pack |= (1U << 31);
+               data[i].offset = e->offset;
+       }
+
+       QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
+
+       ALLOC_ARRAY(pack_order, ctx->entries_nr);
+       for (i = 0; i < ctx->entries_nr; i++) {
+               struct pack_midx_entry *e = &ctx->entries[data[i].nr];
+               struct pack_info *pack = &ctx->info[ctx->pack_perm[e->pack_int_id]];
+               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
+                       pack->bitmap_pos = i;
+               pack->bitmap_nr++;
+               pack_order[i] = data[i].nr;
+       }
+       for (i = 0; i < ctx->nr; i++) {
+               struct pack_info *pack = &ctx->info[ctx->pack_perm[i]];
+               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
+                       pack->bitmap_pos = 0;
+       }
+       free(data);
+
+       trace2_region_leave("midx", "midx_pack_order", the_repository);
+
+       return pack_order;
+}
+
+static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
+                                    struct write_midx_context *ctx)
+{
+       struct strbuf buf = STRBUF_INIT;
+       const char *tmp_file;
+
+       trace2_region_enter("midx", "write_midx_reverse_index", the_repository);
+
+       strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
+
+       tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
+                                       midx_hash, WRITE_REV);
+
+       if (finalize_object_file(tmp_file, buf.buf))
+               die(_("cannot store reverse index file"));
+
+       strbuf_release(&buf);
+
+       trace2_region_leave("midx", "write_midx_reverse_index", the_repository);
+}
+
+static void prepare_midx_packing_data(struct packing_data *pdata,
+                                     struct write_midx_context *ctx)
+{
+       uint32_t i;
+
+       trace2_region_enter("midx", "prepare_midx_packing_data", the_repository);
+
+       memset(pdata, 0, sizeof(struct packing_data));
+       prepare_packing_data(the_repository, pdata);
+
+       for (i = 0; i < ctx->entries_nr; i++) {
+               struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]];
+               struct object_entry *to = packlist_alloc(pdata, &from->oid);
+
+               oe_set_in_pack(pdata, to,
+                              ctx->info[ctx->pack_perm[from->pack_int_id]].p);
+       }
+
+       trace2_region_leave("midx", "prepare_midx_packing_data", the_repository);
+}
+
+static int add_ref_to_pending(const char *refname,
+                             const struct object_id *oid,
+                             int flag, void *cb_data)
+{
+       struct rev_info *revs = (struct rev_info*)cb_data;
+       struct object_id peeled;
+       struct object *object;
+
+       if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
+               warning("symbolic ref is dangling: %s", refname);
+               return 0;
+       }
+
+       if (!peel_iterated_oid(oid, &peeled))
+               oid = &peeled;
+
+       object = parse_object_or_die(oid, refname);
+       if (object->type != OBJ_COMMIT)
+               return 0;
+
+       add_pending_object(revs, object, "");
+       if (bitmap_is_preferred_refname(revs->repo, refname))
+               object->flags |= NEEDS_BITMAP;
+       return 0;
+}
+
+struct bitmap_commit_cb {
+       struct commit **commits;
+       size_t commits_nr, commits_alloc;
+
+       struct write_midx_context *ctx;
+};
+
+static const struct object_id *bitmap_oid_access(size_t index,
+                                                const void *_entries)
+{
+       const struct pack_midx_entry *entries = _entries;
+       return &entries[index].oid;
+}
+
+static void bitmap_show_commit(struct commit *commit, void *_data)
+{
+       struct bitmap_commit_cb *data = _data;
+       int pos = oid_pos(&commit->object.oid, data->ctx->entries,
+                         data->ctx->entries_nr,
+                         bitmap_oid_access);
+       if (pos < 0)
+               return;
+
+       ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc);
+       data->commits[data->commits_nr++] = commit;
+}
+
+static int read_refs_snapshot(const char *refs_snapshot,
+                             struct rev_info *revs)
+{
+       struct strbuf buf = STRBUF_INIT;
+       struct object_id oid;
+       FILE *f = xfopen(refs_snapshot, "r");
+
+       while (strbuf_getline(&buf, f) != EOF) {
+               struct object *object;
+               int preferred = 0;
+               char *hex = buf.buf;
+               const char *end = NULL;
+
+               if (buf.len && *buf.buf == '+') {
+                       preferred = 1;
+                       hex = &buf.buf[1];
+               }
+
+               if (parse_oid_hex(hex, &oid, &end) < 0)
+                       die(_("could not parse line: %s"), buf.buf);
+               if (*end)
+                       die(_("malformed line: %s"), buf.buf);
+
+               object = parse_object_or_die(&oid, NULL);
+               if (preferred)
+                       object->flags |= NEEDS_BITMAP;
+
+               add_pending_object(revs, object, "");
+       }
+
+       fclose(f);
+       strbuf_release(&buf);
+       return 0;
+}
+static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p,
+                                                   const char *refs_snapshot,
+                                                   struct write_midx_context *ctx)
+{
+       struct rev_info revs;
+       struct bitmap_commit_cb cb = {0};
+
+       trace2_region_enter("midx", "find_commits_for_midx_bitmap",
+                           the_repository);
+
+       cb.ctx = ctx;
+
+       repo_init_revisions(the_repository, &revs, NULL);
+       if (refs_snapshot) {
+               read_refs_snapshot(refs_snapshot, &revs);
+       } else {
+               setup_revisions(0, NULL, &revs, NULL);
+               for_each_ref(add_ref_to_pending, &revs);
+       }
+
+       /*
+        * Skipping promisor objects here is intentional, since it only excludes
+        * them from the list of reachable commits that we want to select from
+        * when computing the selection of MIDX'd commits to receive bitmaps.
+        *
+        * Reachability bitmaps do require that their objects be closed under
+        * reachability, but fetching any objects missing from promisors at this
+        * point is too late. But, if one of those objects can be reached from
+        * an another object that is included in the bitmap, then we will
+        * complain later that we don't have reachability closure (and fail
+        * appropriately).
+        */
+       fetch_if_missing = 0;
+       revs.exclude_promisor_objects = 1;
+
+       if (prepare_revision_walk(&revs))
+               die(_("revision walk setup failed"));
+
+       traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb);
+       if (indexed_commits_nr_p)
+               *indexed_commits_nr_p = cb.commits_nr;
+
+       release_revisions(&revs);
+
+       trace2_region_leave("midx", "find_commits_for_midx_bitmap",
+                           the_repository);
+
+       return cb.commits;
+}
+
+static int write_midx_bitmap(const char *midx_name,
+                            const unsigned char *midx_hash,
+                            struct packing_data *pdata,
+                            struct commit **commits,
+                            uint32_t commits_nr,
+                            uint32_t *pack_order,
+                            unsigned flags)
+{
+       int ret, i;
+       uint16_t options = 0;
+       struct pack_idx_entry **index;
+       char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
+                                       hash_to_hex(midx_hash));
+
+       trace2_region_enter("midx", "write_midx_bitmap", the_repository);
+
+       if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
+               options |= BITMAP_OPT_HASH_CACHE;
+
+       if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE)
+               options |= BITMAP_OPT_LOOKUP_TABLE;
+
+       /*
+        * Build the MIDX-order index based on pdata.objects (which is already
+        * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
+        * this order).
+        */
+       ALLOC_ARRAY(index, pdata->nr_objects);
+       for (i = 0; i < pdata->nr_objects; i++)
+               index[i] = &pdata->objects[i].idx;
+
+       bitmap_writer_show_progress(flags & MIDX_PROGRESS);
+       bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
+
+       /*
+        * bitmap_writer_finish expects objects in lex order, but pack_order
+        * gives us exactly that. use it directly instead of re-sorting the
+        * array.
+        *
+        * This changes the order of objects in 'index' between
+        * bitmap_writer_build_type_index and bitmap_writer_finish.
+        *
+        * The same re-ordering takes place in the single-pack bitmap code via
+        * write_idx_file(), which is called by finish_tmp_packfile(), which
+        * happens between bitmap_writer_build_type_index() and
+        * bitmap_writer_finish().
+        */
+       for (i = 0; i < pdata->nr_objects; i++)
+               index[pack_order[i]] = &pdata->objects[i].idx;
+
+       bitmap_writer_select_commits(commits, commits_nr, -1);
+       ret = bitmap_writer_build(pdata);
+       if (ret < 0)
+               goto cleanup;
+
+       bitmap_writer_set_checksum(midx_hash);
+       bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options);
+
+cleanup:
+       free(index);
+       free(bitmap_name);
+
+       trace2_region_leave("midx", "write_midx_bitmap", the_repository);
+
+       return ret;
+}
+
+static struct multi_pack_index *lookup_multi_pack_index(struct repository *r,
+                                                       const char *object_dir)
+{
+       struct multi_pack_index *result = NULL;
+       struct multi_pack_index *cur;
+       char *obj_dir_real = real_pathdup(object_dir, 1);
+       struct strbuf cur_path_real = STRBUF_INIT;
+
+       /* Ensure the given object_dir is local, or a known alternate. */
+       find_odb(r, obj_dir_real);
+
+       for (cur = get_multi_pack_index(r); cur; cur = cur->next) {
+               strbuf_realpath(&cur_path_real, cur->object_dir, 1);
+               if (!strcmp(obj_dir_real, cur_path_real.buf)) {
+                       result = cur;
+                       goto cleanup;
+               }
+       }
+
+cleanup:
+       free(obj_dir_real);
+       strbuf_release(&cur_path_real);
+       return result;
+}
+
+static int write_midx_internal(const char *object_dir,
+                              struct string_list *packs_to_include,
+                              struct string_list *packs_to_drop,
+                              const char *preferred_pack_name,
+                              const char *refs_snapshot,
+                              unsigned flags)
+{
+       struct strbuf midx_name = STRBUF_INIT;
+       unsigned char midx_hash[GIT_MAX_RAWSZ];
+       uint32_t i;
+       struct hashfile *f = NULL;
+       struct lock_file lk;
+       struct write_midx_context ctx = { 0 };
+       int bitmapped_packs_concat_len = 0;
+       int pack_name_concat_len = 0;
+       int dropped_packs = 0;
+       int result = 0;
+       struct chunkfile *cf;
+
+       trace2_region_enter("midx", "write_midx_internal", the_repository);
+
+       get_midx_filename(&midx_name, object_dir);
+       if (safe_create_leading_directories(midx_name.buf))
+               die_errno(_("unable to create leading directories of %s"),
+                         midx_name.buf);
+
+       if (!packs_to_include) {
+               /*
+                * Only reference an existing MIDX when not filtering which
+                * packs to include, since all packs and objects are copied
+                * blindly from an existing MIDX if one is present.
+                */
+               ctx.m = lookup_multi_pack_index(the_repository, object_dir);
+       }
+
+       if (ctx.m && !midx_checksum_valid(ctx.m)) {
+               warning(_("ignoring existing multi-pack-index; checksum mismatch"));
+               ctx.m = NULL;
+       }
+
+       ctx.nr = 0;
+       ctx.alloc = ctx.m ? ctx.m->num_packs : 16;
+       ctx.info = NULL;
+       ALLOC_ARRAY(ctx.info, ctx.alloc);
+
+       if (ctx.m) {
+               for (i = 0; i < ctx.m->num_packs; i++) {
+                       ALLOC_GROW(ctx.info, ctx.nr + 1, ctx.alloc);
+
+                       if (flags & MIDX_WRITE_REV_INDEX) {
+                               /*
+                                * If generating a reverse index, need to have
+                                * packed_git's loaded to compare their
+                                * mtimes and object count.
+                                */
+                               if (prepare_midx_pack(the_repository, ctx.m, i)) {
+                                       error(_("could not load pack"));
+                                       result = 1;
+                                       goto cleanup;
+                               }
+
+                               if (open_pack_index(ctx.m->packs[i]))
+                                       die(_("could not open index for %s"),
+                                           ctx.m->packs[i]->pack_name);
+                       }
+
+                       fill_pack_info(&ctx.info[ctx.nr++], ctx.m->packs[i],
+                                      ctx.m->pack_names[i], i);
+               }
+       }
+
+       ctx.pack_paths_checked = 0;
+       if (flags & MIDX_PROGRESS)
+               ctx.progress = start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
+       else
+               ctx.progress = NULL;
+
+       ctx.to_include = packs_to_include;
+
+       for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
+       stop_progress(&ctx.progress);
+
+       if ((ctx.m && ctx.nr == ctx.m->num_packs) &&
+           !(packs_to_include || packs_to_drop)) {
+               struct bitmap_index *bitmap_git;
+               int bitmap_exists;
+               int want_bitmap = flags & MIDX_WRITE_BITMAP;
+
+               bitmap_git = prepare_midx_bitmap_git(ctx.m);
+               bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
+               free_bitmap_index(bitmap_git);
+
+               if (bitmap_exists || !want_bitmap) {
+                       /*
+                        * The correct MIDX already exists, and so does a
+                        * corresponding bitmap (or one wasn't requested).
+                        */
+                       if (!want_bitmap)
+                               clear_midx_files_ext(object_dir, ".bitmap",
+                                                    NULL);
+                       goto cleanup;
+               }
+       }
+
+       if (preferred_pack_name) {
+               ctx.preferred_pack_idx = -1;
+
+               for (i = 0; i < ctx.nr; i++) {
+                       if (!cmp_idx_or_pack_name(preferred_pack_name,
+                                                 ctx.info[i].pack_name)) {
+                               ctx.preferred_pack_idx = i;
+                               break;
+                       }
+               }
+
+               if (ctx.preferred_pack_idx == -1)
+                       warning(_("unknown preferred pack: '%s'"),
+                               preferred_pack_name);
+       } else if (ctx.nr &&
+                  (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
+               struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
+               ctx.preferred_pack_idx = 0;
+
+               if (packs_to_drop && packs_to_drop->nr)
+                       BUG("cannot write a MIDX bitmap during expiration");
+
+               /*
+                * set a preferred pack when writing a bitmap to ensure that
+                * the pack from which the first object is selected in pseudo
+                * pack-order has all of its objects selected from that pack
+                * (and not another pack containing a duplicate)
+                */
+               for (i = 1; i < ctx.nr; i++) {
+                       struct packed_git *p = ctx.info[i].p;
+
+                       if (!oldest->num_objects || p->mtime < oldest->mtime) {
+                               oldest = p;
+                               ctx.preferred_pack_idx = i;
+                       }
+               }
+
+               if (!oldest->num_objects) {
+                       /*
+                        * If all packs are empty; unset the preferred index.
+                        * This is acceptable since there will be no duplicate
+                        * objects to resolve, so the preferred value doesn't
+                        * matter.
+                        */
+                       ctx.preferred_pack_idx = -1;
+               }
+       } else {
+               /*
+                * otherwise don't mark any pack as preferred to avoid
+                * interfering with expiration logic below
+                */
+               ctx.preferred_pack_idx = -1;
+       }
+
+       if (ctx.preferred_pack_idx > -1) {
+               struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
+               if (!preferred->num_objects) {
+                       error(_("cannot select preferred pack %s with no objects"),
+                             preferred->pack_name);
+                       result = 1;
+                       goto cleanup;
+               }
+       }
+
+       ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
+                                        ctx.preferred_pack_idx);
+
+       ctx.large_offsets_needed = 0;
+       for (i = 0; i < ctx.entries_nr; i++) {
+               if (ctx.entries[i].offset > 0x7fffffff)
+                       ctx.num_large_offsets++;
+               if (ctx.entries[i].offset > 0xffffffff)
+                       ctx.large_offsets_needed = 1;
+       }
+
+       QSORT(ctx.info, ctx.nr, pack_info_compare);
+
+       if (packs_to_drop && packs_to_drop->nr) {
+               int drop_index = 0;
+               int missing_drops = 0;
+
+               for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
+                       int cmp = strcmp(ctx.info[i].pack_name,
+                                        packs_to_drop->items[drop_index].string);
+
+                       if (!cmp) {
+                               drop_index++;
+                               ctx.info[i].expired = 1;
+                       } else if (cmp > 0) {
+                               error(_("did not see pack-file %s to drop"),
+                                     packs_to_drop->items[drop_index].string);
+                               drop_index++;
+                               missing_drops++;
+                               i--;
+                       } else {
+                               ctx.info[i].expired = 0;
+                       }
+               }
+
+               if (missing_drops) {
+                       result = 1;
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * pack_perm stores a permutation between pack-int-ids from the
+        * previous multi-pack-index to the new one we are writing:
+        *
+        * pack_perm[old_id] = new_id
+        */
+       ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
+       for (i = 0; i < ctx.nr; i++) {
+               if (ctx.info[i].expired) {
+                       dropped_packs++;
+                       ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
+               } else {
+                       ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
+               }
+       }
+
+       for (i = 0; i < ctx.nr; i++) {
+               if (ctx.info[i].expired)
+                       continue;
+               pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
+               bitmapped_packs_concat_len += 2 * sizeof(uint32_t);
+       }
+
+       /* Check that the preferred pack wasn't expired (if given). */
+       if (preferred_pack_name) {
+               struct pack_info *preferred = bsearch(preferred_pack_name,
+                                                     ctx.info, ctx.nr,
+                                                     sizeof(*ctx.info),
+                                                     idx_or_pack_name_cmp);
+               if (preferred) {
+                       uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+                       if (perm == PACK_EXPIRED)
+                               warning(_("preferred pack '%s' is expired"),
+                                       preferred_pack_name);
+               }
+       }
+
+       if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
+               pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
+                                       (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
+
+       hold_lock_file_for_update(&lk, midx_name.buf, LOCK_DIE_ON_ERROR);
+       f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
+
+       if (ctx.nr - dropped_packs == 0) {
+               error(_("no pack files to index."));
+               result = 1;
+               goto cleanup;
+       }
+
+       if (!ctx.entries_nr) {
+               if (flags & MIDX_WRITE_BITMAP)
+                       warning(_("refusing to write multi-pack .bitmap without any objects"));
+               flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
+       }
+
+       cf = init_chunkfile(f);
+
+       add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
+                 write_midx_pack_names);
+       add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
+                 write_midx_oid_fanout);
+       add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
+                 st_mult(ctx.entries_nr, the_hash_algo->rawsz),
+                 write_midx_oid_lookup);
+       add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
+                 st_mult(ctx.entries_nr, MIDX_CHUNK_OFFSET_WIDTH),
+                 write_midx_object_offsets);
+
+       if (ctx.large_offsets_needed)
+               add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
+                       st_mult(ctx.num_large_offsets,
+                               MIDX_CHUNK_LARGE_OFFSET_WIDTH),
+                       write_midx_large_offsets);
+
+       if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
+               ctx.pack_order = midx_pack_order(&ctx);
+               add_chunk(cf, MIDX_CHUNKID_REVINDEX,
+                         st_mult(ctx.entries_nr, sizeof(uint32_t)),
+                         write_midx_revindex);
+               add_chunk(cf, MIDX_CHUNKID_BITMAPPEDPACKS,
+                         bitmapped_packs_concat_len,
+                         write_midx_bitmapped_packs);
+       }
+
+       write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
+       write_chunkfile(cf, &ctx);
+
+       finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
+                         CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+       free_chunkfile(cf);
+
+       if (flags & MIDX_WRITE_REV_INDEX &&
+           git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
+               write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
+
+       if (flags & MIDX_WRITE_BITMAP) {
+               struct packing_data pdata;
+               struct commit **commits;
+               uint32_t commits_nr;
+
+               if (!ctx.entries_nr)
+                       BUG("cannot write a bitmap without any objects");
+
+               prepare_midx_packing_data(&pdata, &ctx);
+
+               commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx);
+
+               /*
+                * The previous steps translated the information from
+                * 'entries' into information suitable for constructing
+                * bitmaps. We no longer need that array, so clear it to
+                * reduce memory pressure.
+                */
+               FREE_AND_NULL(ctx.entries);
+               ctx.entries_nr = 0;
+
+               if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata,
+                                     commits, commits_nr, ctx.pack_order,
+                                     flags) < 0) {
+                       error(_("could not write multi-pack bitmap"));
+                       result = 1;
+                       clear_packing_data(&pdata);
+                       free(commits);
+                       goto cleanup;
+               }
+
+               clear_packing_data(&pdata);
+               free(commits);
+       }
+       /*
+        * NOTE: Do not use ctx.entries beyond this point, since it might
+        * have been freed in the previous if block.
+        */
+
+       if (ctx.m)
+               close_object_store(the_repository->objects);
+
+       if (commit_lock_file(&lk) < 0)
+               die_errno(_("could not write multi-pack-index"));
+
+       clear_midx_files_ext(object_dir, ".bitmap", midx_hash);
+       clear_midx_files_ext(object_dir, ".rev", midx_hash);
+
+cleanup:
+       for (i = 0; i < ctx.nr; i++) {
+               if (ctx.info[i].p) {
+                       close_pack(ctx.info[i].p);
+                       free(ctx.info[i].p);
+               }
+               free(ctx.info[i].pack_name);
+       }
+
+       free(ctx.info);
+       free(ctx.entries);
+       free(ctx.pack_perm);
+       free(ctx.pack_order);
+       strbuf_release(&midx_name);
+
+       trace2_region_leave("midx", "write_midx_internal", the_repository);
+
+       return result;
+}
+
+int write_midx_file(const char *object_dir,
+                   const char *preferred_pack_name,
+                   const char *refs_snapshot,
+                   unsigned flags)
+{
+       return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
+                                  refs_snapshot, flags);
+}
+
+int write_midx_file_only(const char *object_dir,
+                        struct string_list *packs_to_include,
+                        const char *preferred_pack_name,
+                        const char *refs_snapshot,
+                        unsigned flags)
+{
+       return write_midx_internal(object_dir, packs_to_include, NULL,
+                                  preferred_pack_name, refs_snapshot, flags);
+}
+
+int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
+{
+       uint32_t i, *count, result = 0;
+       struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
+       struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+       struct progress *progress = NULL;
+
+       if (!m)
+               return 0;
+
+       CALLOC_ARRAY(count, m->num_packs);
+
+       if (flags & MIDX_PROGRESS)
+               progress = start_delayed_progress(_("Counting referenced objects"),
+                                         m->num_objects);
+       for (i = 0; i < m->num_objects; i++) {
+               int pack_int_id = nth_midxed_pack_int_id(m, i);
+               count[pack_int_id]++;
+               display_progress(progress, i + 1);
+       }
+       stop_progress(&progress);
+
+       if (flags & MIDX_PROGRESS)
+               progress = start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
+                                         m->num_packs);
+       for (i = 0; i < m->num_packs; i++) {
+               char *pack_name;
+               display_progress(progress, i + 1);
+
+               if (count[i])
+                       continue;
+
+               if (prepare_midx_pack(r, m, i))
+                       continue;
+
+               if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
+                       continue;
+
+               pack_name = xstrdup(m->packs[i]->pack_name);
+               close_pack(m->packs[i]);
+
+               string_list_insert(&packs_to_drop, m->pack_names[i]);
+               unlink_pack_path(pack_name, 0);
+               free(pack_name);
+       }
+       stop_progress(&progress);
+
+       free(count);
+
+       if (packs_to_drop.nr)
+               result = write_midx_internal(object_dir, NULL, &packs_to_drop, NULL, NULL, flags);
+
+       string_list_clear(&packs_to_drop, 0);
+
+       return result;
+}
+
+struct repack_info {
+       timestamp_t mtime;
+       uint32_t referenced_objects;
+       uint32_t pack_int_id;
+};
+
+static int compare_by_mtime(const void *a_, const void *b_)
+{
+       const struct repack_info *a, *b;
+
+       a = (const struct repack_info *)a_;
+       b = (const struct repack_info *)b_;
+
+       if (a->mtime < b->mtime)
+               return -1;
+       if (a->mtime > b->mtime)
+               return 1;
+       return 0;
+}
+
+static int want_included_pack(struct repository *r,
+                             struct multi_pack_index *m,
+                             int pack_kept_objects,
+                             uint32_t pack_int_id)
+{
+       struct packed_git *p;
+       if (prepare_midx_pack(r, m, pack_int_id))
+               return 0;
+       p = m->packs[pack_int_id];
+       if (!pack_kept_objects && p->pack_keep)
+               return 0;
+       if (p->is_cruft)
+               return 0;
+       if (open_pack_index(p) || !p->num_objects)
+               return 0;
+       return 1;
+}
+
+static void fill_included_packs_all(struct repository *r,
+                                   struct multi_pack_index *m,
+                                   unsigned char *include_pack)
+{
+       uint32_t i;
+       int pack_kept_objects = 0;
+
+       repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
+
+       for (i = 0; i < m->num_packs; i++) {
+               if (!want_included_pack(r, m, pack_kept_objects, i))
+                       continue;
+
+               include_pack[i] = 1;
+       }
+}
+
+static void fill_included_packs_batch(struct repository *r,
+                                     struct multi_pack_index *m,
+                                     unsigned char *include_pack,
+                                     size_t batch_size)
+{
+       uint32_t i;
+       size_t total_size;
+       struct repack_info *pack_info;
+       int pack_kept_objects = 0;
+
+       CALLOC_ARRAY(pack_info, m->num_packs);
+
+       repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
+
+       for (i = 0; i < m->num_packs; i++) {
+               pack_info[i].pack_int_id = i;
+
+               if (prepare_midx_pack(r, m, i))
+                       continue;
+
+               pack_info[i].mtime = m->packs[i]->mtime;
+       }
+
+       for (i = 0; i < m->num_objects; i++) {
+               uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
+               pack_info[pack_int_id].referenced_objects++;
+       }
+
+       QSORT(pack_info, m->num_packs, compare_by_mtime);
+
+       total_size = 0;
+       for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
+               int pack_int_id = pack_info[i].pack_int_id;
+               struct packed_git *p = m->packs[pack_int_id];
+               size_t expected_size;
+
+               if (!want_included_pack(r, m, pack_kept_objects, pack_int_id))
+                       continue;
+
+               expected_size = st_mult(p->pack_size,
+                                       pack_info[i].referenced_objects);
+               expected_size /= p->num_objects;
+
+               if (expected_size >= batch_size)
+                       continue;
+
+               total_size += expected_size;
+               include_pack[pack_int_id] = 1;
+       }
+
+       free(pack_info);
+}
+
+int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
+{
+       int result = 0;
+       uint32_t i, packs_to_repack = 0;
+       unsigned char *include_pack;
+       struct child_process cmd = CHILD_PROCESS_INIT;
+       FILE *cmd_in;
+       struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+
+       /*
+        * When updating the default for these configuration
+        * variables in builtin/repack.c, these must be adjusted
+        * to match.
+        */
+       int delta_base_offset = 1;
+       int use_delta_islands = 0;
+
+       if (!m)
+               return 0;
+
+       CALLOC_ARRAY(include_pack, m->num_packs);
+
+       if (batch_size)
+               fill_included_packs_batch(r, m, include_pack, batch_size);
+       else
+               fill_included_packs_all(r, m, include_pack);
+
+       for (i = 0; i < m->num_packs; i++) {
+               if (include_pack[i])
+                       packs_to_repack++;
+       }
+       if (packs_to_repack <= 1)
+               goto cleanup;
+
+       repo_config_get_bool(r, "repack.usedeltabaseoffset", &delta_base_offset);
+       repo_config_get_bool(r, "repack.usedeltaislands", &use_delta_islands);
+
+       strvec_pushl(&cmd.args, "pack-objects", "--stdin-packs", "--non-empty",
+                    NULL);
+
+       strvec_pushf(&cmd.args, "%s/pack/pack", object_dir);
+
+       if (delta_base_offset)
+               strvec_push(&cmd.args, "--delta-base-offset");
+       if (use_delta_islands)
+               strvec_push(&cmd.args, "--delta-islands");
+
+       if (flags & MIDX_PROGRESS)
+               strvec_push(&cmd.args, "--progress");
+       else
+               strvec_push(&cmd.args, "-q");
+
+       cmd.git_cmd = 1;
+       cmd.in = cmd.out = -1;
+
+       if (start_command(&cmd)) {
+               error(_("could not start pack-objects"));
+               result = 1;
+               goto cleanup;
+       }
+
+       cmd_in = xfdopen(cmd.in, "w");
+       for (i = 0; i < m->num_packs; i++) {
+               struct packed_git *p = m->packs[i];
+               if (!p)
+                       continue;
+
+               if (include_pack[i])
+                       fprintf(cmd_in, "%s\n", pack_basename(p));
+               else
+                       fprintf(cmd_in, "^%s\n", pack_basename(p));
+       }
+       fclose(cmd_in);
+
+       if (finish_command(&cmd)) {
+               error(_("could not finish pack-objects"));
+               result = 1;
+               goto cleanup;
+       }
+
+       result = write_midx_internal(object_dir, NULL, NULL, NULL, NULL, flags);
+
+cleanup:
+       free(include_pack);
+       return result;
+}
diff --git a/midx.c b/midx.c
index 41521e019c66d25ba538f71262d8dd73ff5cfa1d..ae3b49166c7018a08c2b9bd7c7e867b9a3939578 100644 (file)
--- a/midx.c
+++ b/midx.c
@@ -1,52 +1,22 @@
 #include "git-compat-util.h"
-#include "abspath.h"
 #include "config.h"
-#include "csum-file.h"
 #include "dir.h"
-#include "gettext.h"
 #include "hex.h"
-#include "lockfile.h"
 #include "packfile.h"
 #include "object-file.h"
-#include "object-store-ll.h"
 #include "hash-lookup.h"
 #include "midx.h"
 #include "progress.h"
 #include "trace2.h"
-#include "run-command.h"
-#include "repository.h"
 #include "chunk-format.h"
-#include "pack.h"
 #include "pack-bitmap.h"
-#include "refs.h"
-#include "revision.h"
-#include "list-objects.h"
 #include "pack-revindex.h"
 
-#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
-#define MIDX_VERSION 1
-#define MIDX_BYTE_FILE_VERSION 4
-#define MIDX_BYTE_HASH_VERSION 5
-#define MIDX_BYTE_NUM_CHUNKS 6
-#define MIDX_BYTE_NUM_PACKS 8
-#define MIDX_HEADER_SIZE 12
-#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
-
-#define MIDX_CHUNK_ALIGNMENT 4
-#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
-#define MIDX_CHUNKID_BITMAPPEDPACKS 0x42544d50 /* "BTMP" */
-#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
-#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
-#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
-#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
-#define MIDX_CHUNKID_REVINDEX 0x52494458 /* "RIDX" */
-#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
-#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
-#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
-#define MIDX_CHUNK_BITMAPPED_PACKS_WIDTH (2 * sizeof(uint32_t))
-#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
-
-#define PACK_EXPIRED UINT_MAX
+int midx_checksum_valid(struct multi_pack_index *m);
+void clear_midx_files_ext(const char *object_dir, const char *ext,
+                         unsigned char *keep_hash);
+int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+                        const char *idx_name);
 
 const unsigned char *get_midx_checksum(struct multi_pack_index *m)
 {
@@ -115,6 +85,8 @@ static int midx_read_object_offsets(const unsigned char *chunk_start,
        return 0;
 }
 
+#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
+
 struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local)
 {
        struct multi_pack_index *m = NULL;
@@ -294,6 +266,8 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t
        return 0;
 }
 
+#define MIDX_CHUNK_BITMAPPED_PACKS_WIDTH (2 * sizeof(uint32_t))
+
 int nth_bitmapped_pack(struct repository *r, struct multi_pack_index *m,
                       struct bitmapped_pack *bp, uint32_t pack_int_id)
 {
@@ -400,8 +374,8 @@ int fill_midx_entry(struct repository *r,
 }
 
 /* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
-static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
-                               const char *idx_name)
+int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+                        const char *idx_name)
 {
        /* Skip past any initial matching prefix. */
        while (*idx_name && *idx_name == *idx_or_pack_name) {
@@ -508,1731 +482,232 @@ int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, i
        return 0;
 }
 
-static size_t write_midx_header(struct hashfile *f,
-                               unsigned char num_chunks,
-                               uint32_t num_packs)
-{
-       hashwrite_be32(f, MIDX_SIGNATURE);
-       hashwrite_u8(f, MIDX_VERSION);
-       hashwrite_u8(f, oid_version(the_hash_algo));
-       hashwrite_u8(f, num_chunks);
-       hashwrite_u8(f, 0); /* unused */
-       hashwrite_be32(f, num_packs);
-
-       return MIDX_HEADER_SIZE;
-}
-
-#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
-
-struct pack_info {
-       uint32_t orig_pack_int_id;
-       char *pack_name;
-       struct packed_git *p;
-
-       uint32_t bitmap_pos;
-       uint32_t bitmap_nr;
-
-       unsigned expired : 1;
-};
-
-static void fill_pack_info(struct pack_info *info,
-                          struct packed_git *p, const char *pack_name,
-                          uint32_t orig_pack_int_id)
-{
-       memset(info, 0, sizeof(struct pack_info));
-
-       info->orig_pack_int_id = orig_pack_int_id;
-       info->pack_name = xstrdup(pack_name);
-       info->p = p;
-       info->bitmap_pos = BITMAP_POS_UNKNOWN;
-}
-
-static int pack_info_compare(const void *_a, const void *_b)
-{
-       struct pack_info *a = (struct pack_info *)_a;
-       struct pack_info *b = (struct pack_info *)_b;
-       return strcmp(a->pack_name, b->pack_name);
-}
-
-static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+int midx_checksum_valid(struct multi_pack_index *m)
 {
-       const char *pack_name = _va;
-       const struct pack_info *compar = _vb;
-
-       return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+       return hashfile_checksum_valid(m->data, m->data_len);
 }
 
-struct write_midx_context {
-       struct pack_info *info;
-       size_t nr;
-       size_t alloc;
-       struct multi_pack_index *m;
-       struct progress *progress;
-       unsigned pack_paths_checked;
-
-       struct pack_midx_entry *entries;
-       size_t entries_nr;
-
-       uint32_t *pack_perm;
-       uint32_t *pack_order;
-       unsigned large_offsets_needed:1;
-       uint32_t num_large_offsets;
-
-       int preferred_pack_idx;
-
-       struct string_list *to_include;
+struct clear_midx_data {
+       char *keep;
+       const char *ext;
 };
 
-static void add_pack_to_midx(const char *full_path, size_t full_path_len,
-                            const char *file_name, void *data)
+static void clear_midx_file_ext(const char *full_path, size_t full_path_len UNUSED,
+                               const char *file_name, void *_data)
 {
-       struct write_midx_context *ctx = data;
-       struct packed_git *p;
-
-       if (ends_with(file_name, ".idx")) {
-               display_progress(ctx->progress, ++ctx->pack_paths_checked);
-               /*
-                * Note that at most one of ctx->m and ctx->to_include are set,
-                * so we are testing midx_contains_pack() and
-                * string_list_has_string() independently (guarded by the
-                * appropriate NULL checks).
-                *
-                * We could support passing to_include while reusing an existing
-                * MIDX, but don't currently since the reuse process drags
-                * forward all packs from an existing MIDX (without checking
-                * whether or not they appear in the to_include list).
-                *
-                * If we added support for that, these next two conditional
-                * should be performed independently (likely checking
-                * to_include before the existing MIDX).
-                */
-               if (ctx->m && midx_contains_pack(ctx->m, file_name))
-                       return;
-               else if (ctx->to_include &&
-                        !string_list_has_string(ctx->to_include, file_name))
-                       return;
-
-               ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
-
-               p = add_packed_git(full_path, full_path_len, 0);
-               if (!p) {
-                       warning(_("failed to add packfile '%s'"),
-                               full_path);
-                       return;
-               }
+       struct clear_midx_data *data = _data;
 
-               if (open_pack_index(p)) {
-                       warning(_("failed to open pack-index '%s'"),
-                               full_path);
-                       close_pack(p);
-                       free(p);
-                       return;
-               }
+       if (!(starts_with(file_name, "multi-pack-index-") &&
+             ends_with(file_name, data->ext)))
+               return;
+       if (data->keep && !strcmp(data->keep, file_name))
+               return;
 
-               fill_pack_info(&ctx->info[ctx->nr], p, file_name, ctx->nr);
-               ctx->nr++;
-       }
+       if (unlink(full_path))
+               die_errno(_("failed to remove %s"), full_path);
 }
 
-struct pack_midx_entry {
-       struct object_id oid;
-       uint32_t pack_int_id;
-       time_t pack_mtime;
-       uint64_t offset;
-       unsigned preferred : 1;
-};
-
-static int midx_oid_compare(const void *_a, const void *_b)
+void clear_midx_files_ext(const char *object_dir, const char *ext,
+                         unsigned char *keep_hash)
 {
-       const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
-       const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
-       int cmp = oidcmp(&a->oid, &b->oid);
-
-       if (cmp)
-               return cmp;
+       struct clear_midx_data data;
+       memset(&data, 0, sizeof(struct clear_midx_data));
 
-       /* Sort objects in a preferred pack first when multiple copies exist. */
-       if (a->preferred > b->preferred)
-               return -1;
-       if (a->preferred < b->preferred)
-               return 1;
+       if (keep_hash)
+               data.keep = xstrfmt("multi-pack-index-%s%s",
+                                   hash_to_hex(keep_hash), ext);
+       data.ext = ext;
 
-       if (a->pack_mtime > b->pack_mtime)
-               return -1;
-       else if (a->pack_mtime < b->pack_mtime)
-               return 1;
+       for_each_file_in_pack_dir(object_dir,
+                                 clear_midx_file_ext,
+                                 &data);
 
-       return a->pack_int_id - b->pack_int_id;
+       free(data.keep);
 }
 
-static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
-                                     struct pack_midx_entry *e,
-                                     uint32_t pos)
+void clear_midx_file(struct repository *r)
 {
-       if (pos >= m->num_objects)
-               return 1;
+       struct strbuf midx = STRBUF_INIT;
 
-       nth_midxed_object_oid(&e->oid, m, pos);
-       e->pack_int_id = nth_midxed_pack_int_id(m, pos);
-       e->offset = nth_midxed_offset(m, pos);
+       get_midx_filename(&midx, r->objects->odb->path);
 
-       /* consider objects in midx to be from "old" packs */
-       e->pack_mtime = 0;
-       return 0;
-}
+       if (r->objects && r->objects->multi_pack_index) {
+               close_midx(r->objects->multi_pack_index);
+               r->objects->multi_pack_index = NULL;
+       }
 
-static void fill_pack_entry(uint32_t pack_int_id,
-                           struct packed_git *p,
-                           uint32_t cur_object,
-                           struct pack_midx_entry *entry,
-                           int preferred)
-{
-       if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
-               die(_("failed to locate object %d in packfile"), cur_object);
+       if (remove_path(midx.buf))
+               die(_("failed to clear multi-pack-index at %s"), midx.buf);
 
-       entry->pack_int_id = pack_int_id;
-       entry->pack_mtime = p->mtime;
+       clear_midx_files_ext(r->objects->odb->path, ".bitmap", NULL);
+       clear_midx_files_ext(r->objects->odb->path, ".rev", NULL);
 
-       entry->offset = nth_packed_object_offset(p, cur_object);
-       entry->preferred = !!preferred;
+       strbuf_release(&midx);
 }
 
-struct midx_fanout {
-       struct pack_midx_entry *entries;
-       size_t nr, alloc;
-};
+static int verify_midx_error;
 
-static void midx_fanout_grow(struct midx_fanout *fanout, size_t nr)
+__attribute__((format (printf, 1, 2)))
+static void midx_report(const char *fmt, ...)
 {
-       if (nr < fanout->nr)
-               BUG("negative growth in midx_fanout_grow() (%"PRIuMAX" < %"PRIuMAX")",
-                   (uintmax_t)nr, (uintmax_t)fanout->nr);
-       ALLOC_GROW(fanout->entries, nr, fanout->alloc);
+       va_list ap;
+       verify_midx_error = 1;
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       fprintf(stderr, "\n");
+       va_end(ap);
 }
 
-static void midx_fanout_sort(struct midx_fanout *fanout)
+struct pair_pos_vs_id
 {
-       QSORT(fanout->entries, fanout->nr, midx_oid_compare);
-}
+       uint32_t pos;
+       uint32_t pack_int_id;
+};
 
-static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
-                                       struct multi_pack_index *m,
-                                       uint32_t cur_fanout,
-                                       int preferred_pack)
+static int compare_pair_pos_vs_id(const void *_a, const void *_b)
 {
-       uint32_t start = 0, end;
-       uint32_t cur_object;
-
-       if (cur_fanout)
-               start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
-       end = ntohl(m->chunk_oid_fanout[cur_fanout]);
-
-       for (cur_object = start; cur_object < end; cur_object++) {
-               if ((preferred_pack > -1) &&
-                   (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
-                       /*
-                        * Objects from preferred packs are added
-                        * separately.
-                        */
-                       continue;
-               }
-
-               midx_fanout_grow(fanout, fanout->nr + 1);
-               nth_midxed_pack_midx_entry(m,
-                                          &fanout->entries[fanout->nr],
-                                          cur_object);
-               fanout->entries[fanout->nr].preferred = 0;
-               fanout->nr++;
-       }
-}
+       struct pair_pos_vs_id *a = (struct pair_pos_vs_id *)_a;
+       struct pair_pos_vs_id *b = (struct pair_pos_vs_id *)_b;
 
-static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
-                                       struct pack_info *info,
-                                       uint32_t cur_pack,
-                                       int preferred,
-                                       uint32_t cur_fanout)
-{
-       struct packed_git *pack = info[cur_pack].p;
-       uint32_t start = 0, end;
-       uint32_t cur_object;
-
-       if (cur_fanout)
-               start = get_pack_fanout(pack, cur_fanout - 1);
-       end = get_pack_fanout(pack, cur_fanout);
-
-       for (cur_object = start; cur_object < end; cur_object++) {
-               midx_fanout_grow(fanout, fanout->nr + 1);
-               fill_pack_entry(cur_pack,
-                               info[cur_pack].p,
-                               cur_object,
-                               &fanout->entries[fanout->nr],
-                               preferred);
-               fanout->nr++;
-       }
+       return b->pack_int_id - a->pack_int_id;
 }
 
 /*
- * It is possible to artificially get into a state where there are many
- * duplicate copies of objects. That can create high memory pressure if
- * we are to create a list of all objects before de-duplication. To reduce
- * this memory pressure without a significant performance drop, automatically
- * group objects by the first byte of their object id. Use the IDX fanout
- * tables to group the data, copy to a local array, then sort.
- *
- * Copy only the de-duplicated entries (selected by most-recent modified time
- * of a packfile containing the object).
+ * Limit calls to display_progress() for performance reasons.
+ * The interval here was arbitrarily chosen.
  */
-static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
-                                                 struct pack_info *info,
-                                                 uint32_t nr_packs,
-                                                 size_t *nr_objects,
-                                                 int preferred_pack)
-{
-       uint32_t cur_fanout, cur_pack, cur_object;
-       size_t alloc_objects, total_objects = 0;
-       struct midx_fanout fanout = { 0 };
-       struct pack_midx_entry *deduplicated_entries = NULL;
-       uint32_t start_pack = m ? m->num_packs : 0;
-
-       for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
-               total_objects = st_add(total_objects,
-                                      info[cur_pack].p->num_objects);
-
-       /*
-        * As we de-duplicate by fanout value, we expect the fanout
-        * slices to be evenly distributed, with some noise. Hence,
-        * allocate slightly more than one 256th.
-        */
-       alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16;
-
-       ALLOC_ARRAY(fanout.entries, fanout.alloc);
-       ALLOC_ARRAY(deduplicated_entries, alloc_objects);
-       *nr_objects = 0;
-
-       for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
-               fanout.nr = 0;
-
-               if (m)
-                       midx_fanout_add_midx_fanout(&fanout, m, cur_fanout,
-                                                   preferred_pack);
+#define SPARSE_PROGRESS_INTERVAL (1 << 12)
+#define midx_display_sparse_progress(progress, n) \
+       do { \
+               uint64_t _n = (n); \
+               if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
+                       display_progress(progress, _n); \
+       } while (0)
 
-               for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
-                       int preferred = cur_pack == preferred_pack;
-                       midx_fanout_add_pack_fanout(&fanout,
-                                                   info, cur_pack,
-                                                   preferred, cur_fanout);
-               }
+int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags)
+{
+       struct pair_pos_vs_id *pairs = NULL;
+       uint32_t i;
+       struct progress *progress = NULL;
+       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+       verify_midx_error = 0;
 
-               if (-1 < preferred_pack && preferred_pack < start_pack)
-                       midx_fanout_add_pack_fanout(&fanout, info,
-                                                   preferred_pack, 1,
-                                                   cur_fanout);
+       if (!m) {
+               int result = 0;
+               struct stat sb;
+               struct strbuf filename = STRBUF_INIT;
 
-               midx_fanout_sort(&fanout);
+               get_midx_filename(&filename, object_dir);
 
-               /*
-                * The batch is now sorted by OID and then mtime (descending).
-                * Take only the first duplicate.
-                */
-               for (cur_object = 0; cur_object < fanout.nr; cur_object++) {
-                       if (cur_object && oideq(&fanout.entries[cur_object - 1].oid,
-                                               &fanout.entries[cur_object].oid))
-                               continue;
-
-                       ALLOC_GROW(deduplicated_entries, st_add(*nr_objects, 1),
-                                  alloc_objects);
-                       memcpy(&deduplicated_entries[*nr_objects],
-                              &fanout.entries[cur_object],
-                              sizeof(struct pack_midx_entry));
-                       (*nr_objects)++;
+               if (!stat(filename.buf, &sb)) {
+                       error(_("multi-pack-index file exists, but failed to parse"));
+                       result = 1;
                }
+               strbuf_release(&filename);
+               return result;
        }
 
-       free(fanout.entries);
-       return deduplicated_entries;
-}
-
-static int write_midx_pack_names(struct hashfile *f, void *data)
-{
-       struct write_midx_context *ctx = data;
-       uint32_t i;
-       unsigned char padding[MIDX_CHUNK_ALIGNMENT];
-       size_t written = 0;
-
-       for (i = 0; i < ctx->nr; i++) {
-               size_t writelen;
-
-               if (ctx->info[i].expired)
-                       continue;
+       if (!midx_checksum_valid(m))
+               midx_report(_("incorrect checksum"));
 
-               if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
-                       BUG("incorrect pack-file order: %s before %s",
-                           ctx->info[i - 1].pack_name,
-                           ctx->info[i].pack_name);
+       if (flags & MIDX_PROGRESS)
+               progress = start_delayed_progress(_("Looking for referenced packfiles"),
+                                         m->num_packs);
+       for (i = 0; i < m->num_packs; i++) {
+               if (prepare_midx_pack(r, m, i))
+                       midx_report("failed to load pack in position %d", i);
 
-               writelen = strlen(ctx->info[i].pack_name) + 1;
-               hashwrite(f, ctx->info[i].pack_name, writelen);
-               written += writelen;
+               display_progress(progress, i + 1);
        }
+       stop_progress(&progress);
 
-       /* add padding to be aligned */
-       i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
-       if (i < MIDX_CHUNK_ALIGNMENT) {
-               memset(padding, 0, sizeof(padding));
-               hashwrite(f, padding, i);
+       if (m->num_objects == 0) {
+               midx_report(_("the midx contains no oid"));
+               /*
+                * Remaining tests assume that we have objects, so we can
+                * return here.
+                */
+               goto cleanup;
        }
 
-       return 0;
-}
-
-static int write_midx_bitmapped_packs(struct hashfile *f, void *data)
-{
-       struct write_midx_context *ctx = data;
-       size_t i;
+       if (flags & MIDX_PROGRESS)
+               progress = start_sparse_progress(_("Verifying OID order in multi-pack-index"),
+                                                m->num_objects - 1);
+       for (i = 0; i < m->num_objects - 1; i++) {
+               struct object_id oid1, oid2;
 
-       for (i = 0; i < ctx->nr; i++) {
-               struct pack_info *pack = &ctx->info[i];
-               if (pack->expired)
-                       continue;
+               nth_midxed_object_oid(&oid1, m, i);
+               nth_midxed_object_oid(&oid2, m, i + 1);
 
-               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN && pack->bitmap_nr)
-                       BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
-                           pack->pack_name, pack->bitmap_nr);
+               if (oidcmp(&oid1, &oid2) >= 0)
+                       midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
+                                   i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
 
-               hashwrite_be32(f, pack->bitmap_pos);
-               hashwrite_be32(f, pack->bitmap_nr);
+               midx_display_sparse_progress(progress, i + 1);
        }
-       return 0;
-}
-
-static int write_midx_oid_fanout(struct hashfile *f,
-                                void *data)
-{
-       struct write_midx_context *ctx = data;
-       struct pack_midx_entry *list = ctx->entries;
-       struct pack_midx_entry *last = ctx->entries + ctx->entries_nr;
-       uint32_t count = 0;
-       uint32_t i;
+       stop_progress(&progress);
 
        /*
-       * Write the first-level table (the list is sorted,
-       * but we use a 256-entry lookup to be able to avoid
-       * having to do eight extra binary search iterations).
-       */
-       for (i = 0; i < 256; i++) {
-               struct pack_midx_entry *next = list;
+        * Create an array mapping each object to its packfile id.  Sort it
+        * to group the objects by packfile.  Use this permutation to visit
+        * each of the objects and only require 1 packfile to be open at a
+        * time.
+        */
+       ALLOC_ARRAY(pairs, m->num_objects);
+       for (i = 0; i < m->num_objects; i++) {
+               pairs[i].pos = i;
+               pairs[i].pack_int_id = nth_midxed_pack_int_id(m, i);
+       }
 
-               while (next < last && next->oid.hash[0] == i) {
-                       count++;
-                       next++;
-               }
+       if (flags & MIDX_PROGRESS)
+               progress = start_sparse_progress(_("Sorting objects by packfile"),
+                                                m->num_objects);
+       display_progress(progress, 0); /* TODO: Measure QSORT() progress */
+       QSORT(pairs, m->num_objects, compare_pair_pos_vs_id);
+       stop_progress(&progress);
 
-               hashwrite_be32(f, count);
-               list = next;
-       }
+       if (flags & MIDX_PROGRESS)
+               progress = start_sparse_progress(_("Verifying object offsets"), m->num_objects);
+       for (i = 0; i < m->num_objects; i++) {
+               struct object_id oid;
+               struct pack_entry e;
+               off_t m_offset, p_offset;
 
-       return 0;
-}
+               if (i > 0 && pairs[i-1].pack_int_id != pairs[i].pack_int_id &&
+                   m->packs[pairs[i-1].pack_int_id])
+               {
+                       close_pack_fd(m->packs[pairs[i-1].pack_int_id]);
+                       close_pack_index(m->packs[pairs[i-1].pack_int_id]);
+               }
 
-static int write_midx_oid_lookup(struct hashfile *f,
-                                void *data)
-{
-       struct write_midx_context *ctx = data;
-       unsigned char hash_len = the_hash_algo->rawsz;
-       struct pack_midx_entry *list = ctx->entries;
-       uint32_t i;
+               nth_midxed_object_oid(&oid, m, pairs[i].pos);
 
-       for (i = 0; i < ctx->entries_nr; i++) {
-               struct pack_midx_entry *obj = list++;
+               if (!fill_midx_entry(r, &oid, &e, m)) {
+                       midx_report(_("failed to load pack entry for oid[%d] = %s"),
+                                   pairs[i].pos, oid_to_hex(&oid));
+                       continue;
+               }
 
-               if (i < ctx->entries_nr - 1) {
-                       struct pack_midx_entry *next = list;
-                       if (oidcmp(&obj->oid, &next->oid) >= 0)
-                               BUG("OIDs not in order: %s >= %s",
-                                   oid_to_hex(&obj->oid),
-                                   oid_to_hex(&next->oid));
+               if (open_pack_index(e.p)) {
+                       midx_report(_("failed to load pack-index for packfile %s"),
+                                   e.p->pack_name);
+                       break;
                }
 
-               hashwrite(f, obj->oid.hash, (int)hash_len);
-       }
+               m_offset = e.offset;
+               p_offset = find_pack_entry_one(oid.hash, e.p);
 
-       return 0;
-}
+               if (m_offset != p_offset)
+                       midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
+                                   pairs[i].pos, oid_to_hex(&oid), m_offset, p_offset);
 
-static int write_midx_object_offsets(struct hashfile *f,
-                                    void *data)
-{
-       struct write_midx_context *ctx = data;
-       struct pack_midx_entry *list = ctx->entries;
-       uint32_t i, nr_large_offset = 0;
-
-       for (i = 0; i < ctx->entries_nr; i++) {
-               struct pack_midx_entry *obj = list++;
-
-               if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
-                       BUG("object %s is in an expired pack with int-id %d",
-                           oid_to_hex(&obj->oid),
-                           obj->pack_int_id);
-
-               hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
-
-               if (ctx->large_offsets_needed && obj->offset >> 31)
-                       hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
-               else if (!ctx->large_offsets_needed && obj->offset >> 32)
-                       BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
-                           oid_to_hex(&obj->oid),
-                           obj->offset);
-               else
-                       hashwrite_be32(f, (uint32_t)obj->offset);
+               midx_display_sparse_progress(progress, i + 1);
        }
+       stop_progress(&progress);
 
-       return 0;
-}
-
-static int write_midx_large_offsets(struct hashfile *f,
-                                   void *data)
-{
-       struct write_midx_context *ctx = data;
-       struct pack_midx_entry *list = ctx->entries;
-       struct pack_midx_entry *end = ctx->entries + ctx->entries_nr;
-       uint32_t nr_large_offset = ctx->num_large_offsets;
-
-       while (nr_large_offset) {
-               struct pack_midx_entry *obj;
-               uint64_t offset;
-
-               if (list >= end)
-                       BUG("too many large-offset objects");
-
-               obj = list++;
-               offset = obj->offset;
-
-               if (!(offset >> 31))
-                       continue;
-
-               hashwrite_be64(f, offset);
-
-               nr_large_offset--;
-       }
-
-       return 0;
-}
-
-static int write_midx_revindex(struct hashfile *f,
-                              void *data)
-{
-       struct write_midx_context *ctx = data;
-       uint32_t i;
-
-       for (i = 0; i < ctx->entries_nr; i++)
-               hashwrite_be32(f, ctx->pack_order[i]);
-
-       return 0;
-}
-
-struct midx_pack_order_data {
-       uint32_t nr;
-       uint32_t pack;
-       off_t offset;
-};
-
-static int midx_pack_order_cmp(const void *va, const void *vb)
-{
-       const struct midx_pack_order_data *a = va, *b = vb;
-       if (a->pack < b->pack)
-               return -1;
-       else if (a->pack > b->pack)
-               return 1;
-       else if (a->offset < b->offset)
-               return -1;
-       else if (a->offset > b->offset)
-               return 1;
-       else
-               return 0;
-}
-
-static uint32_t *midx_pack_order(struct write_midx_context *ctx)
-{
-       struct midx_pack_order_data *data;
-       uint32_t *pack_order;
-       uint32_t i;
-
-       trace2_region_enter("midx", "midx_pack_order", the_repository);
-
-       ALLOC_ARRAY(data, ctx->entries_nr);
-       for (i = 0; i < ctx->entries_nr; i++) {
-               struct pack_midx_entry *e = &ctx->entries[i];
-               data[i].nr = i;
-               data[i].pack = ctx->pack_perm[e->pack_int_id];
-               if (!e->preferred)
-                       data[i].pack |= (1U << 31);
-               data[i].offset = e->offset;
-       }
-
-       QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
-
-       ALLOC_ARRAY(pack_order, ctx->entries_nr);
-       for (i = 0; i < ctx->entries_nr; i++) {
-               struct pack_midx_entry *e = &ctx->entries[data[i].nr];
-               struct pack_info *pack = &ctx->info[ctx->pack_perm[e->pack_int_id]];
-               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
-                       pack->bitmap_pos = i;
-               pack->bitmap_nr++;
-               pack_order[i] = data[i].nr;
-       }
-       for (i = 0; i < ctx->nr; i++) {
-               struct pack_info *pack = &ctx->info[ctx->pack_perm[i]];
-               if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
-                       pack->bitmap_pos = 0;
-       }
-       free(data);
-
-       trace2_region_leave("midx", "midx_pack_order", the_repository);
-
-       return pack_order;
-}
-
-static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
-                                    struct write_midx_context *ctx)
-{
-       struct strbuf buf = STRBUF_INIT;
-       const char *tmp_file;
-
-       trace2_region_enter("midx", "write_midx_reverse_index", the_repository);
-
-       strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
-
-       tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
-                                       midx_hash, WRITE_REV);
-
-       if (finalize_object_file(tmp_file, buf.buf))
-               die(_("cannot store reverse index file"));
-
-       strbuf_release(&buf);
-
-       trace2_region_leave("midx", "write_midx_reverse_index", the_repository);
-}
-
-static void clear_midx_files_ext(const char *object_dir, const char *ext,
-                                unsigned char *keep_hash);
-
-static int midx_checksum_valid(struct multi_pack_index *m)
-{
-       return hashfile_checksum_valid(m->data, m->data_len);
-}
-
-static void prepare_midx_packing_data(struct packing_data *pdata,
-                                     struct write_midx_context *ctx)
-{
-       uint32_t i;
-
-       trace2_region_enter("midx", "prepare_midx_packing_data", the_repository);
-
-       memset(pdata, 0, sizeof(struct packing_data));
-       prepare_packing_data(the_repository, pdata);
-
-       for (i = 0; i < ctx->entries_nr; i++) {
-               struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]];
-               struct object_entry *to = packlist_alloc(pdata, &from->oid);
-
-               oe_set_in_pack(pdata, to,
-                              ctx->info[ctx->pack_perm[from->pack_int_id]].p);
-       }
-
-       trace2_region_leave("midx", "prepare_midx_packing_data", the_repository);
-}
-
-static int add_ref_to_pending(const char *refname,
-                             const struct object_id *oid,
-                             int flag, void *cb_data)
-{
-       struct rev_info *revs = (struct rev_info*)cb_data;
-       struct object_id peeled;
-       struct object *object;
-
-       if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
-               warning("symbolic ref is dangling: %s", refname);
-               return 0;
-       }
-
-       if (!peel_iterated_oid(oid, &peeled))
-               oid = &peeled;
-
-       object = parse_object_or_die(oid, refname);
-       if (object->type != OBJ_COMMIT)
-               return 0;
-
-       add_pending_object(revs, object, "");
-       if (bitmap_is_preferred_refname(revs->repo, refname))
-               object->flags |= NEEDS_BITMAP;
-       return 0;
-}
-
-struct bitmap_commit_cb {
-       struct commit **commits;
-       size_t commits_nr, commits_alloc;
-
-       struct write_midx_context *ctx;
-};
-
-static const struct object_id *bitmap_oid_access(size_t index,
-                                                const void *_entries)
-{
-       const struct pack_midx_entry *entries = _entries;
-       return &entries[index].oid;
-}
-
-static void bitmap_show_commit(struct commit *commit, void *_data)
-{
-       struct bitmap_commit_cb *data = _data;
-       int pos = oid_pos(&commit->object.oid, data->ctx->entries,
-                         data->ctx->entries_nr,
-                         bitmap_oid_access);
-       if (pos < 0)
-               return;
-
-       ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc);
-       data->commits[data->commits_nr++] = commit;
-}
-
-static int read_refs_snapshot(const char *refs_snapshot,
-                             struct rev_info *revs)
-{
-       struct strbuf buf = STRBUF_INIT;
-       struct object_id oid;
-       FILE *f = xfopen(refs_snapshot, "r");
-
-       while (strbuf_getline(&buf, f) != EOF) {
-               struct object *object;
-               int preferred = 0;
-               char *hex = buf.buf;
-               const char *end = NULL;
-
-               if (buf.len && *buf.buf == '+') {
-                       preferred = 1;
-                       hex = &buf.buf[1];
-               }
-
-               if (parse_oid_hex(hex, &oid, &end) < 0)
-                       die(_("could not parse line: %s"), buf.buf);
-               if (*end)
-                       die(_("malformed line: %s"), buf.buf);
-
-               object = parse_object_or_die(&oid, NULL);
-               if (preferred)
-                       object->flags |= NEEDS_BITMAP;
-
-               add_pending_object(revs, object, "");
-       }
-
-       fclose(f);
-       strbuf_release(&buf);
-       return 0;
-}
-
-static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p,
-                                                   const char *refs_snapshot,
-                                                   struct write_midx_context *ctx)
-{
-       struct rev_info revs;
-       struct bitmap_commit_cb cb = {0};
-
-       trace2_region_enter("midx", "find_commits_for_midx_bitmap",
-                           the_repository);
-
-       cb.ctx = ctx;
-
-       repo_init_revisions(the_repository, &revs, NULL);
-       if (refs_snapshot) {
-               read_refs_snapshot(refs_snapshot, &revs);
-       } else {
-               setup_revisions(0, NULL, &revs, NULL);
-               for_each_ref(add_ref_to_pending, &revs);
-       }
-
-       /*
-        * Skipping promisor objects here is intentional, since it only excludes
-        * them from the list of reachable commits that we want to select from
-        * when computing the selection of MIDX'd commits to receive bitmaps.
-        *
-        * Reachability bitmaps do require that their objects be closed under
-        * reachability, but fetching any objects missing from promisors at this
-        * point is too late. But, if one of those objects can be reached from
-        * an another object that is included in the bitmap, then we will
-        * complain later that we don't have reachability closure (and fail
-        * appropriately).
-        */
-       fetch_if_missing = 0;
-       revs.exclude_promisor_objects = 1;
-
-       if (prepare_revision_walk(&revs))
-               die(_("revision walk setup failed"));
-
-       traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb);
-       if (indexed_commits_nr_p)
-               *indexed_commits_nr_p = cb.commits_nr;
-
-       release_revisions(&revs);
-
-       trace2_region_leave("midx", "find_commits_for_midx_bitmap",
-                           the_repository);
-
-       return cb.commits;
-}
-
-static int write_midx_bitmap(const char *midx_name,
-                            const unsigned char *midx_hash,
-                            struct packing_data *pdata,
-                            struct commit **commits,
-                            uint32_t commits_nr,
-                            uint32_t *pack_order,
-                            unsigned flags)
-{
-       int ret, i;
-       uint16_t options = 0;
-       struct pack_idx_entry **index;
-       char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
-                                       hash_to_hex(midx_hash));
-
-       trace2_region_enter("midx", "write_midx_bitmap", the_repository);
-
-       if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
-               options |= BITMAP_OPT_HASH_CACHE;
-
-       if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE)
-               options |= BITMAP_OPT_LOOKUP_TABLE;
-
-       /*
-        * Build the MIDX-order index based on pdata.objects (which is already
-        * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
-        * this order).
-        */
-       ALLOC_ARRAY(index, pdata->nr_objects);
-       for (i = 0; i < pdata->nr_objects; i++)
-               index[i] = &pdata->objects[i].idx;
-
-       bitmap_writer_show_progress(flags & MIDX_PROGRESS);
-       bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
-
-       /*
-        * bitmap_writer_finish expects objects in lex order, but pack_order
-        * gives us exactly that. use it directly instead of re-sorting the
-        * array.
-        *
-        * This changes the order of objects in 'index' between
-        * bitmap_writer_build_type_index and bitmap_writer_finish.
-        *
-        * The same re-ordering takes place in the single-pack bitmap code via
-        * write_idx_file(), which is called by finish_tmp_packfile(), which
-        * happens between bitmap_writer_build_type_index() and
-        * bitmap_writer_finish().
-        */
-       for (i = 0; i < pdata->nr_objects; i++)
-               index[pack_order[i]] = &pdata->objects[i].idx;
-
-       bitmap_writer_select_commits(commits, commits_nr, -1);
-       ret = bitmap_writer_build(pdata);
-       if (ret < 0)
-               goto cleanup;
-
-       bitmap_writer_set_checksum(midx_hash);
-       bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options);
-
-cleanup:
-       free(index);
-       free(bitmap_name);
-
-       trace2_region_leave("midx", "write_midx_bitmap", the_repository);
-
-       return ret;
-}
-
-static struct multi_pack_index *lookup_multi_pack_index(struct repository *r,
-                                                       const char *object_dir)
-{
-       struct multi_pack_index *result = NULL;
-       struct multi_pack_index *cur;
-       char *obj_dir_real = real_pathdup(object_dir, 1);
-       struct strbuf cur_path_real = STRBUF_INIT;
-
-       /* Ensure the given object_dir is local, or a known alternate. */
-       find_odb(r, obj_dir_real);
-
-       for (cur = get_multi_pack_index(r); cur; cur = cur->next) {
-               strbuf_realpath(&cur_path_real, cur->object_dir, 1);
-               if (!strcmp(obj_dir_real, cur_path_real.buf)) {
-                       result = cur;
-                       goto cleanup;
-               }
-       }
-
-cleanup:
-       free(obj_dir_real);
-       strbuf_release(&cur_path_real);
-       return result;
-}
-
-static int write_midx_internal(const char *object_dir,
-                              struct string_list *packs_to_include,
-                              struct string_list *packs_to_drop,
-                              const char *preferred_pack_name,
-                              const char *refs_snapshot,
-                              unsigned flags)
-{
-       struct strbuf midx_name = STRBUF_INIT;
-       unsigned char midx_hash[GIT_MAX_RAWSZ];
-       uint32_t i;
-       struct hashfile *f = NULL;
-       struct lock_file lk;
-       struct write_midx_context ctx = { 0 };
-       int bitmapped_packs_concat_len = 0;
-       int pack_name_concat_len = 0;
-       int dropped_packs = 0;
-       int result = 0;
-       struct chunkfile *cf;
-
-       trace2_region_enter("midx", "write_midx_internal", the_repository);
-
-       get_midx_filename(&midx_name, object_dir);
-       if (safe_create_leading_directories(midx_name.buf))
-               die_errno(_("unable to create leading directories of %s"),
-                         midx_name.buf);
-
-       if (!packs_to_include) {
-               /*
-                * Only reference an existing MIDX when not filtering which
-                * packs to include, since all packs and objects are copied
-                * blindly from an existing MIDX if one is present.
-                */
-               ctx.m = lookup_multi_pack_index(the_repository, object_dir);
-       }
-
-       if (ctx.m && !midx_checksum_valid(ctx.m)) {
-               warning(_("ignoring existing multi-pack-index; checksum mismatch"));
-               ctx.m = NULL;
-       }
-
-       ctx.nr = 0;
-       ctx.alloc = ctx.m ? ctx.m->num_packs : 16;
-       ctx.info = NULL;
-       ALLOC_ARRAY(ctx.info, ctx.alloc);
-
-       if (ctx.m) {
-               for (i = 0; i < ctx.m->num_packs; i++) {
-                       ALLOC_GROW(ctx.info, ctx.nr + 1, ctx.alloc);
-
-                       if (flags & MIDX_WRITE_REV_INDEX) {
-                               /*
-                                * If generating a reverse index, need to have
-                                * packed_git's loaded to compare their
-                                * mtimes and object count.
-                                */
-                               if (prepare_midx_pack(the_repository, ctx.m, i)) {
-                                       error(_("could not load pack"));
-                                       result = 1;
-                                       goto cleanup;
-                               }
-
-                               if (open_pack_index(ctx.m->packs[i]))
-                                       die(_("could not open index for %s"),
-                                           ctx.m->packs[i]->pack_name);
-                       }
-
-                       fill_pack_info(&ctx.info[ctx.nr++], ctx.m->packs[i],
-                                      ctx.m->pack_names[i], i);
-               }
-       }
-
-       ctx.pack_paths_checked = 0;
-       if (flags & MIDX_PROGRESS)
-               ctx.progress = start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
-       else
-               ctx.progress = NULL;
-
-       ctx.to_include = packs_to_include;
-
-       for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
-       stop_progress(&ctx.progress);
-
-       if ((ctx.m && ctx.nr == ctx.m->num_packs) &&
-           !(packs_to_include || packs_to_drop)) {
-               struct bitmap_index *bitmap_git;
-               int bitmap_exists;
-               int want_bitmap = flags & MIDX_WRITE_BITMAP;
-
-               bitmap_git = prepare_midx_bitmap_git(ctx.m);
-               bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
-               free_bitmap_index(bitmap_git);
-
-               if (bitmap_exists || !want_bitmap) {
-                       /*
-                        * The correct MIDX already exists, and so does a
-                        * corresponding bitmap (or one wasn't requested).
-                        */
-                       if (!want_bitmap)
-                               clear_midx_files_ext(object_dir, ".bitmap",
-                                                    NULL);
-                       goto cleanup;
-               }
-       }
-
-       if (preferred_pack_name) {
-               ctx.preferred_pack_idx = -1;
-
-               for (i = 0; i < ctx.nr; i++) {
-                       if (!cmp_idx_or_pack_name(preferred_pack_name,
-                                                 ctx.info[i].pack_name)) {
-                               ctx.preferred_pack_idx = i;
-                               break;
-                       }
-               }
-
-               if (ctx.preferred_pack_idx == -1)
-                       warning(_("unknown preferred pack: '%s'"),
-                               preferred_pack_name);
-       } else if (ctx.nr &&
-                  (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
-               struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
-               ctx.preferred_pack_idx = 0;
-
-               if (packs_to_drop && packs_to_drop->nr)
-                       BUG("cannot write a MIDX bitmap during expiration");
-
-               /*
-                * set a preferred pack when writing a bitmap to ensure that
-                * the pack from which the first object is selected in pseudo
-                * pack-order has all of its objects selected from that pack
-                * (and not another pack containing a duplicate)
-                */
-               for (i = 1; i < ctx.nr; i++) {
-                       struct packed_git *p = ctx.info[i].p;
-
-                       if (!oldest->num_objects || p->mtime < oldest->mtime) {
-                               oldest = p;
-                               ctx.preferred_pack_idx = i;
-                       }
-               }
-
-               if (!oldest->num_objects) {
-                       /*
-                        * If all packs are empty; unset the preferred index.
-                        * This is acceptable since there will be no duplicate
-                        * objects to resolve, so the preferred value doesn't
-                        * matter.
-                        */
-                       ctx.preferred_pack_idx = -1;
-               }
-       } else {
-               /*
-                * otherwise don't mark any pack as preferred to avoid
-                * interfering with expiration logic below
-                */
-               ctx.preferred_pack_idx = -1;
-       }
-
-       if (ctx.preferred_pack_idx > -1) {
-               struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
-               if (!preferred->num_objects) {
-                       error(_("cannot select preferred pack %s with no objects"),
-                             preferred->pack_name);
-                       result = 1;
-                       goto cleanup;
-               }
-       }
-
-       ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
-                                        ctx.preferred_pack_idx);
-
-       ctx.large_offsets_needed = 0;
-       for (i = 0; i < ctx.entries_nr; i++) {
-               if (ctx.entries[i].offset > 0x7fffffff)
-                       ctx.num_large_offsets++;
-               if (ctx.entries[i].offset > 0xffffffff)
-                       ctx.large_offsets_needed = 1;
-       }
-
-       QSORT(ctx.info, ctx.nr, pack_info_compare);
-
-       if (packs_to_drop && packs_to_drop->nr) {
-               int drop_index = 0;
-               int missing_drops = 0;
-
-               for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
-                       int cmp = strcmp(ctx.info[i].pack_name,
-                                        packs_to_drop->items[drop_index].string);
-
-                       if (!cmp) {
-                               drop_index++;
-                               ctx.info[i].expired = 1;
-                       } else if (cmp > 0) {
-                               error(_("did not see pack-file %s to drop"),
-                                     packs_to_drop->items[drop_index].string);
-                               drop_index++;
-                               missing_drops++;
-                               i--;
-                       } else {
-                               ctx.info[i].expired = 0;
-                       }
-               }
-
-               if (missing_drops) {
-                       result = 1;
-                       goto cleanup;
-               }
-       }
-
-       /*
-        * pack_perm stores a permutation between pack-int-ids from the
-        * previous multi-pack-index to the new one we are writing:
-        *
-        * pack_perm[old_id] = new_id
-        */
-       ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
-       for (i = 0; i < ctx.nr; i++) {
-               if (ctx.info[i].expired) {
-                       dropped_packs++;
-                       ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
-               } else {
-                       ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
-               }
-       }
-
-       for (i = 0; i < ctx.nr; i++) {
-               if (ctx.info[i].expired)
-                       continue;
-               pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
-               bitmapped_packs_concat_len += 2 * sizeof(uint32_t);
-       }
-
-       /* Check that the preferred pack wasn't expired (if given). */
-       if (preferred_pack_name) {
-               struct pack_info *preferred = bsearch(preferred_pack_name,
-                                                     ctx.info, ctx.nr,
-                                                     sizeof(*ctx.info),
-                                                     idx_or_pack_name_cmp);
-               if (preferred) {
-                       uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
-                       if (perm == PACK_EXPIRED)
-                               warning(_("preferred pack '%s' is expired"),
-                                       preferred_pack_name);
-               }
-       }
-
-       if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
-               pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
-                                       (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
-
-       hold_lock_file_for_update(&lk, midx_name.buf, LOCK_DIE_ON_ERROR);
-       f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
-
-       if (ctx.nr - dropped_packs == 0) {
-               error(_("no pack files to index."));
-               result = 1;
-               goto cleanup;
-       }
-
-       if (!ctx.entries_nr) {
-               if (flags & MIDX_WRITE_BITMAP)
-                       warning(_("refusing to write multi-pack .bitmap without any objects"));
-               flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
-       }
-
-       cf = init_chunkfile(f);
-
-       add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
-                 write_midx_pack_names);
-       add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
-                 write_midx_oid_fanout);
-       add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
-                 st_mult(ctx.entries_nr, the_hash_algo->rawsz),
-                 write_midx_oid_lookup);
-       add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
-                 st_mult(ctx.entries_nr, MIDX_CHUNK_OFFSET_WIDTH),
-                 write_midx_object_offsets);
-
-       if (ctx.large_offsets_needed)
-               add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
-                       st_mult(ctx.num_large_offsets,
-                               MIDX_CHUNK_LARGE_OFFSET_WIDTH),
-                       write_midx_large_offsets);
-
-       if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
-               ctx.pack_order = midx_pack_order(&ctx);
-               add_chunk(cf, MIDX_CHUNKID_REVINDEX,
-                         st_mult(ctx.entries_nr, sizeof(uint32_t)),
-                         write_midx_revindex);
-               add_chunk(cf, MIDX_CHUNKID_BITMAPPEDPACKS,
-                         bitmapped_packs_concat_len,
-                         write_midx_bitmapped_packs);
-       }
-
-       write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
-       write_chunkfile(cf, &ctx);
-
-       finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
-                         CSUM_FSYNC | CSUM_HASH_IN_STREAM);
-       free_chunkfile(cf);
-
-       if (flags & MIDX_WRITE_REV_INDEX &&
-           git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
-               write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
-
-       if (flags & MIDX_WRITE_BITMAP) {
-               struct packing_data pdata;
-               struct commit **commits;
-               uint32_t commits_nr;
-
-               if (!ctx.entries_nr)
-                       BUG("cannot write a bitmap without any objects");
-
-               prepare_midx_packing_data(&pdata, &ctx);
-
-               commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx);
-
-               /*
-                * The previous steps translated the information from
-                * 'entries' into information suitable for constructing
-                * bitmaps. We no longer need that array, so clear it to
-                * reduce memory pressure.
-                */
-               FREE_AND_NULL(ctx.entries);
-               ctx.entries_nr = 0;
-
-               if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata,
-                                     commits, commits_nr, ctx.pack_order,
-                                     flags) < 0) {
-                       error(_("could not write multi-pack bitmap"));
-                       result = 1;
-                       clear_packing_data(&pdata);
-                       free(commits);
-                       goto cleanup;
-               }
-
-               clear_packing_data(&pdata);
-               free(commits);
-       }
-       /*
-        * NOTE: Do not use ctx.entries beyond this point, since it might
-        * have been freed in the previous if block.
-        */
-
-       if (ctx.m)
-               close_object_store(the_repository->objects);
-
-       if (commit_lock_file(&lk) < 0)
-               die_errno(_("could not write multi-pack-index"));
-
-       clear_midx_files_ext(object_dir, ".bitmap", midx_hash);
-       clear_midx_files_ext(object_dir, ".rev", midx_hash);
-
-cleanup:
-       for (i = 0; i < ctx.nr; i++) {
-               if (ctx.info[i].p) {
-                       close_pack(ctx.info[i].p);
-                       free(ctx.info[i].p);
-               }
-               free(ctx.info[i].pack_name);
-       }
-
-       free(ctx.info);
-       free(ctx.entries);
-       free(ctx.pack_perm);
-       free(ctx.pack_order);
-       strbuf_release(&midx_name);
-
-       trace2_region_leave("midx", "write_midx_internal", the_repository);
-
-       return result;
-}
-
-int write_midx_file(const char *object_dir,
-                   const char *preferred_pack_name,
-                   const char *refs_snapshot,
-                   unsigned flags)
-{
-       return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
-                                  refs_snapshot, flags);
-}
-
-int write_midx_file_only(const char *object_dir,
-                        struct string_list *packs_to_include,
-                        const char *preferred_pack_name,
-                        const char *refs_snapshot,
-                        unsigned flags)
-{
-       return write_midx_internal(object_dir, packs_to_include, NULL,
-                                  preferred_pack_name, refs_snapshot, flags);
-}
-
-struct clear_midx_data {
-       char *keep;
-       const char *ext;
-};
-
-static void clear_midx_file_ext(const char *full_path, size_t full_path_len UNUSED,
-                               const char *file_name, void *_data)
-{
-       struct clear_midx_data *data = _data;
-
-       if (!(starts_with(file_name, "multi-pack-index-") &&
-             ends_with(file_name, data->ext)))
-               return;
-       if (data->keep && !strcmp(data->keep, file_name))
-               return;
-
-       if (unlink(full_path))
-               die_errno(_("failed to remove %s"), full_path);
-}
-
-static void clear_midx_files_ext(const char *object_dir, const char *ext,
-                                unsigned char *keep_hash)
-{
-       struct clear_midx_data data;
-       memset(&data, 0, sizeof(struct clear_midx_data));
-
-       if (keep_hash)
-               data.keep = xstrfmt("multi-pack-index-%s%s",
-                                   hash_to_hex(keep_hash), ext);
-       data.ext = ext;
-
-       for_each_file_in_pack_dir(object_dir,
-                                 clear_midx_file_ext,
-                                 &data);
-
-       free(data.keep);
-}
-
-void clear_midx_file(struct repository *r)
-{
-       struct strbuf midx = STRBUF_INIT;
-
-       get_midx_filename(&midx, r->objects->odb->path);
-
-       if (r->objects && r->objects->multi_pack_index) {
-               close_midx(r->objects->multi_pack_index);
-               r->objects->multi_pack_index = NULL;
-       }
-
-       if (remove_path(midx.buf))
-               die(_("failed to clear multi-pack-index at %s"), midx.buf);
-
-       clear_midx_files_ext(r->objects->odb->path, ".bitmap", NULL);
-       clear_midx_files_ext(r->objects->odb->path, ".rev", NULL);
-
-       strbuf_release(&midx);
-}
-
-static int verify_midx_error;
-
-__attribute__((format (printf, 1, 2)))
-static void midx_report(const char *fmt, ...)
-{
-       va_list ap;
-       verify_midx_error = 1;
-       va_start(ap, fmt);
-       vfprintf(stderr, fmt, ap);
-       fprintf(stderr, "\n");
-       va_end(ap);
-}
-
-struct pair_pos_vs_id
-{
-       uint32_t pos;
-       uint32_t pack_int_id;
-};
-
-static int compare_pair_pos_vs_id(const void *_a, const void *_b)
-{
-       struct pair_pos_vs_id *a = (struct pair_pos_vs_id *)_a;
-       struct pair_pos_vs_id *b = (struct pair_pos_vs_id *)_b;
-
-       return b->pack_int_id - a->pack_int_id;
-}
-
-/*
- * Limit calls to display_progress() for performance reasons.
- * The interval here was arbitrarily chosen.
- */
-#define SPARSE_PROGRESS_INTERVAL (1 << 12)
-#define midx_display_sparse_progress(progress, n) \
-       do { \
-               uint64_t _n = (n); \
-               if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
-                       display_progress(progress, _n); \
-       } while (0)
-
-int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags)
-{
-       struct pair_pos_vs_id *pairs = NULL;
-       uint32_t i;
-       struct progress *progress = NULL;
-       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
-       verify_midx_error = 0;
-
-       if (!m) {
-               int result = 0;
-               struct stat sb;
-               struct strbuf filename = STRBUF_INIT;
-
-               get_midx_filename(&filename, object_dir);
-
-               if (!stat(filename.buf, &sb)) {
-                       error(_("multi-pack-index file exists, but failed to parse"));
-                       result = 1;
-               }
-               strbuf_release(&filename);
-               return result;
-       }
-
-       if (!midx_checksum_valid(m))
-               midx_report(_("incorrect checksum"));
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_delayed_progress(_("Looking for referenced packfiles"),
-                                         m->num_packs);
-       for (i = 0; i < m->num_packs; i++) {
-               if (prepare_midx_pack(r, m, i))
-                       midx_report("failed to load pack in position %d", i);
-
-               display_progress(progress, i + 1);
-       }
-       stop_progress(&progress);
-
-       if (m->num_objects == 0) {
-               midx_report(_("the midx contains no oid"));
-               /*
-                * Remaining tests assume that we have objects, so we can
-                * return here.
-                */
-               goto cleanup;
-       }
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_sparse_progress(_("Verifying OID order in multi-pack-index"),
-                                                m->num_objects - 1);
-       for (i = 0; i < m->num_objects - 1; i++) {
-               struct object_id oid1, oid2;
-
-               nth_midxed_object_oid(&oid1, m, i);
-               nth_midxed_object_oid(&oid2, m, i + 1);
-
-               if (oidcmp(&oid1, &oid2) >= 0)
-                       midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
-                                   i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
-
-               midx_display_sparse_progress(progress, i + 1);
-       }
-       stop_progress(&progress);
-
-       /*
-        * Create an array mapping each object to its packfile id.  Sort it
-        * to group the objects by packfile.  Use this permutation to visit
-        * each of the objects and only require 1 packfile to be open at a
-        * time.
-        */
-       ALLOC_ARRAY(pairs, m->num_objects);
-       for (i = 0; i < m->num_objects; i++) {
-               pairs[i].pos = i;
-               pairs[i].pack_int_id = nth_midxed_pack_int_id(m, i);
-       }
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_sparse_progress(_("Sorting objects by packfile"),
-                                                m->num_objects);
-       display_progress(progress, 0); /* TODO: Measure QSORT() progress */
-       QSORT(pairs, m->num_objects, compare_pair_pos_vs_id);
-       stop_progress(&progress);
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_sparse_progress(_("Verifying object offsets"), m->num_objects);
-       for (i = 0; i < m->num_objects; i++) {
-               struct object_id oid;
-               struct pack_entry e;
-               off_t m_offset, p_offset;
-
-               if (i > 0 && pairs[i-1].pack_int_id != pairs[i].pack_int_id &&
-                   m->packs[pairs[i-1].pack_int_id])
-               {
-                       close_pack_fd(m->packs[pairs[i-1].pack_int_id]);
-                       close_pack_index(m->packs[pairs[i-1].pack_int_id]);
-               }
-
-               nth_midxed_object_oid(&oid, m, pairs[i].pos);
-
-               if (!fill_midx_entry(r, &oid, &e, m)) {
-                       midx_report(_("failed to load pack entry for oid[%d] = %s"),
-                                   pairs[i].pos, oid_to_hex(&oid));
-                       continue;
-               }
-
-               if (open_pack_index(e.p)) {
-                       midx_report(_("failed to load pack-index for packfile %s"),
-                                   e.p->pack_name);
-                       break;
-               }
-
-               m_offset = e.offset;
-               p_offset = find_pack_entry_one(oid.hash, e.p);
-
-               if (m_offset != p_offset)
-                       midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
-                                   pairs[i].pos, oid_to_hex(&oid), m_offset, p_offset);
-
-               midx_display_sparse_progress(progress, i + 1);
-       }
-       stop_progress(&progress);
-
-cleanup:
-       free(pairs);
-       close_midx(m);
+cleanup:
+       free(pairs);
+       close_midx(m);
 
        return verify_midx_error;
 }
-
-int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
-{
-       uint32_t i, *count, result = 0;
-       struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
-       struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
-       struct progress *progress = NULL;
-
-       if (!m)
-               return 0;
-
-       CALLOC_ARRAY(count, m->num_packs);
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_delayed_progress(_("Counting referenced objects"),
-                                         m->num_objects);
-       for (i = 0; i < m->num_objects; i++) {
-               int pack_int_id = nth_midxed_pack_int_id(m, i);
-               count[pack_int_id]++;
-               display_progress(progress, i + 1);
-       }
-       stop_progress(&progress);
-
-       if (flags & MIDX_PROGRESS)
-               progress = start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
-                                         m->num_packs);
-       for (i = 0; i < m->num_packs; i++) {
-               char *pack_name;
-               display_progress(progress, i + 1);
-
-               if (count[i])
-                       continue;
-
-               if (prepare_midx_pack(r, m, i))
-                       continue;
-
-               if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
-                       continue;
-
-               pack_name = xstrdup(m->packs[i]->pack_name);
-               close_pack(m->packs[i]);
-
-               string_list_insert(&packs_to_drop, m->pack_names[i]);
-               unlink_pack_path(pack_name, 0);
-               free(pack_name);
-       }
-       stop_progress(&progress);
-
-       free(count);
-
-       if (packs_to_drop.nr)
-               result = write_midx_internal(object_dir, NULL, &packs_to_drop, NULL, NULL, flags);
-
-       string_list_clear(&packs_to_drop, 0);
-
-       return result;
-}
-
-struct repack_info {
-       timestamp_t mtime;
-       uint32_t referenced_objects;
-       uint32_t pack_int_id;
-};
-
-static int compare_by_mtime(const void *a_, const void *b_)
-{
-       const struct repack_info *a, *b;
-
-       a = (const struct repack_info *)a_;
-       b = (const struct repack_info *)b_;
-
-       if (a->mtime < b->mtime)
-               return -1;
-       if (a->mtime > b->mtime)
-               return 1;
-       return 0;
-}
-
-static int fill_included_packs_all(struct repository *r,
-                                  struct multi_pack_index *m,
-                                  unsigned char *include_pack)
-{
-       uint32_t i, count = 0;
-       int pack_kept_objects = 0;
-
-       repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
-
-       for (i = 0; i < m->num_packs; i++) {
-               if (prepare_midx_pack(r, m, i))
-                       continue;
-               if (!pack_kept_objects && m->packs[i]->pack_keep)
-                       continue;
-               if (m->packs[i]->is_cruft)
-                       continue;
-
-               include_pack[i] = 1;
-               count++;
-       }
-
-       return count < 2;
-}
-
-static int fill_included_packs_batch(struct repository *r,
-                                    struct multi_pack_index *m,
-                                    unsigned char *include_pack,
-                                    size_t batch_size)
-{
-       uint32_t i, packs_to_repack;
-       size_t total_size;
-       struct repack_info *pack_info;
-       int pack_kept_objects = 0;
-
-       CALLOC_ARRAY(pack_info, m->num_packs);
-
-       repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
-
-       for (i = 0; i < m->num_packs; i++) {
-               pack_info[i].pack_int_id = i;
-
-               if (prepare_midx_pack(r, m, i))
-                       continue;
-
-               pack_info[i].mtime = m->packs[i]->mtime;
-       }
-
-       for (i = 0; i < m->num_objects; i++) {
-               uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
-               pack_info[pack_int_id].referenced_objects++;
-       }
-
-       QSORT(pack_info, m->num_packs, compare_by_mtime);
-
-       total_size = 0;
-       packs_to_repack = 0;
-       for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
-               int pack_int_id = pack_info[i].pack_int_id;
-               struct packed_git *p = m->packs[pack_int_id];
-               size_t expected_size;
-
-               if (!p)
-                       continue;
-               if (!pack_kept_objects && p->pack_keep)
-                       continue;
-               if (p->is_cruft)
-                       continue;
-               if (open_pack_index(p) || !p->num_objects)
-                       continue;
-
-               expected_size = st_mult(p->pack_size,
-                                       pack_info[i].referenced_objects);
-               expected_size /= p->num_objects;
-
-               if (expected_size >= batch_size)
-                       continue;
-
-               packs_to_repack++;
-               total_size += expected_size;
-               include_pack[pack_int_id] = 1;
-       }
-
-       free(pack_info);
-
-       if (packs_to_repack < 2)
-               return 1;
-
-       return 0;
-}
-
-int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
-{
-       int result = 0;
-       uint32_t i;
-       unsigned char *include_pack;
-       struct child_process cmd = CHILD_PROCESS_INIT;
-       FILE *cmd_in;
-       struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
-
-       /*
-        * When updating the default for these configuration
-        * variables in builtin/repack.c, these must be adjusted
-        * to match.
-        */
-       int delta_base_offset = 1;
-       int use_delta_islands = 0;
-
-       if (!m)
-               return 0;
-
-       CALLOC_ARRAY(include_pack, m->num_packs);
-
-       if (batch_size) {
-               if (fill_included_packs_batch(r, m, include_pack, batch_size))
-                       goto cleanup;
-       } else if (fill_included_packs_all(r, m, include_pack))
-               goto cleanup;
-
-       repo_config_get_bool(r, "repack.usedeltabaseoffset", &delta_base_offset);
-       repo_config_get_bool(r, "repack.usedeltaislands", &use_delta_islands);
-
-       strvec_push(&cmd.args, "pack-objects");
-
-       strvec_pushf(&cmd.args, "%s/pack/pack", object_dir);
-
-       if (delta_base_offset)
-               strvec_push(&cmd.args, "--delta-base-offset");
-       if (use_delta_islands)
-               strvec_push(&cmd.args, "--delta-islands");
-
-       if (flags & MIDX_PROGRESS)
-               strvec_push(&cmd.args, "--progress");
-       else
-               strvec_push(&cmd.args, "-q");
-
-       cmd.git_cmd = 1;
-       cmd.in = cmd.out = -1;
-
-       if (start_command(&cmd)) {
-               error(_("could not start pack-objects"));
-               result = 1;
-               goto cleanup;
-       }
-
-       cmd_in = xfdopen(cmd.in, "w");
-
-       for (i = 0; i < m->num_objects; i++) {
-               struct object_id oid;
-               uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
-
-               if (!include_pack[pack_int_id])
-                       continue;
-
-               nth_midxed_object_oid(&oid, m, i);
-               fprintf(cmd_in, "%s\n", oid_to_hex(&oid));
-       }
-       fclose(cmd_in);
-
-       if (finish_command(&cmd)) {
-               error(_("could not finish pack-objects"));
-               result = 1;
-               goto cleanup;
-       }
-
-       result = write_midx_internal(object_dir, NULL, NULL, NULL, NULL, flags);
-
-cleanup:
-       free(include_pack);
-       return result;
-}
diff --git a/midx.h b/midx.h
index b374a7afafb867a9ef9f0fd9913ec2552914859c..dc477dff4413747f84258791c070303047256926 100644 (file)
--- a/midx.h
+++ b/midx.h
@@ -8,6 +8,25 @@ struct pack_entry;
 struct repository;
 struct bitmapped_pack;
 
+#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
+#define MIDX_VERSION 1
+#define MIDX_BYTE_FILE_VERSION 4
+#define MIDX_BYTE_HASH_VERSION 5
+#define MIDX_BYTE_NUM_CHUNKS 6
+#define MIDX_BYTE_NUM_PACKS 8
+#define MIDX_HEADER_SIZE 12
+
+#define MIDX_CHUNK_ALIGNMENT 4
+#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
+#define MIDX_CHUNKID_BITMAPPEDPACKS 0x42544d50 /* "BTMP" */
+#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
+#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
+#define MIDX_CHUNKID_REVINDEX 0x52494458 /* "RIDX" */
+#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
+#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
+
 #define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX"
 #define GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP \
        "GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP"
diff --git a/path.c b/path.c
index 8bb223c92c91c2d963ab4fefa9efb0ac7c3b026c..67229edb9c2d4c23d97b4418442992ef490cfc55 100644 (file)
--- a/path.c
+++ b/path.c
@@ -28,8 +28,6 @@ static int get_st_mode_bits(const char *path, int *mode)
        return 0;
 }
 
-static char bad_path[] = "/bad-path/";
-
 static struct strbuf *get_pathname(void)
 {
        static struct strbuf pathname_array[4] = {
@@ -59,21 +57,6 @@ static void strbuf_cleanup_path(struct strbuf *sb)
                strbuf_remove(sb, 0, path - sb->buf);
 }
 
-char *mksnpath(char *buf, size_t n, const char *fmt, ...)
-{
-       va_list args;
-       unsigned len;
-
-       va_start(args, fmt);
-       len = vsnprintf(buf, n, fmt, args);
-       va_end(args);
-       if (len >= n) {
-               strlcpy(buf, bad_path, n);
-               return buf;
-       }
-       return (char *)cleanup_path(buf);
-}
-
 static int dir_prefix(const char *buf, const char *dir)
 {
        int len = strlen(dir);
diff --git a/path.h b/path.h
index e053effef20cbad3115095d54989cb9d4d91cfff..ea96487b292bdb149061dc2b35c07e7529ed01ef 100644 (file)
--- a/path.h
+++ b/path.h
@@ -23,12 +23,6 @@ const char *mkpath(const char *fmt, ...)
 char *mkpathdup(const char *fmt, ...)
        __attribute__((format (printf, 1, 2)));
 
-/*
- * Construct a path and place the result in the provided buffer `buf`.
- */
-char *mksnpath(char *buf, size_t n, const char *fmt, ...)
-       __attribute__((format (printf, 3, 4)));
-
 /*
  * The `git_common_path` family of functions will construct a path into a
  * repository's common git directory, which is shared by all worktrees.
index 2a50a784f0ec6ae4da94fed41345e625c5bbce1c..09414afd0472ed79a4a7f082278328c1446f5e8a 100644 (file)
@@ -480,8 +480,8 @@ extern int verify_ce_order;
 int cmp_cache_name_compare(const void *a_, const void *b_);
 
 int add_files_to_cache(struct repository *repo, const char *prefix,
-                      const struct pathspec *pathspec, int include_sparse,
-                      int flags);
+                      const struct pathspec *pathspec, char *ps_matched,
+                      int include_sparse, int flags);
 
 void overlay_tree_on_index(struct index_state *istate,
                           const char *tree_name, const char *prefix);
index f546cf7875cbfefddbec2449e8303786e485a8dd..e1723ad796f198823f4bf5ac3712881d80866b63 100644 (file)
@@ -3958,8 +3958,8 @@ static void update_callback(struct diff_queue_struct *q,
 }
 
 int add_files_to_cache(struct repository *repo, const char *prefix,
-                      const struct pathspec *pathspec, int include_sparse,
-                      int flags)
+                      const struct pathspec *pathspec, char *ps_matched,
+                      int include_sparse, int flags)
 {
        struct update_callback_data data;
        struct rev_info rev;
@@ -3971,8 +3971,10 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
 
        repo_init_revisions(repo, &rev, prefix);
        setup_revisions(0, NULL, &rev, NULL);
-       if (pathspec)
+       if (pathspec) {
                copy_pathspec(&rev.prune_data, pathspec);
+               rev.ps_matched = ps_matched;
+       }
        rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
        rev.diffopt.format_callback = update_callback;
        rev.diffopt.format_callback_data = &data;
diff --git a/refs.h b/refs.h
index 298caf6c6184cc3a23acf78d1a0e3dc8c7d8614c..d278775e086bfa7990999c226ad1db2f488e890d 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -66,12 +66,6 @@ const char *ref_storage_format_to_name(unsigned int ref_storage_format);
 #define RESOLVE_REF_NO_RECURSE 0x02
 #define RESOLVE_REF_ALLOW_BAD_NAME 0x04
 
-struct pack_refs_opts {
-       unsigned int flags;
-       struct ref_exclusions *exclusions;
-       struct string_list *includes;
-};
-
 const char *refs_resolve_ref_unsafe(struct ref_store *refs,
                                    const char *refname,
                                    int resolve_flags,
@@ -428,10 +422,18 @@ void warn_dangling_symrefs(FILE *fp, const char *msg_fmt,
 /*
  * Flags for controlling behaviour of pack_refs()
  * PACK_REFS_PRUNE: Prune loose refs after packing
- * PACK_REFS_ALL:   Pack _all_ refs, not just tags and already packed refs
+ * PACK_REFS_AUTO: Pack refs on a best effort basis. The heuristics and end
+ *                 result are decided by the ref backend. Backends may ignore
+ *                 this flag and fall back to a normal repack.
  */
-#define PACK_REFS_PRUNE 0x0001
-#define PACK_REFS_ALL   0x0002
+#define PACK_REFS_PRUNE (1 << 0)
+#define PACK_REFS_AUTO  (1 << 1)
+
+struct pack_refs_opts {
+       unsigned int flags;
+       struct ref_exclusions *exclusions;
+       struct string_list *includes;
+};
 
 /*
  * Write a packed-refs file for the current repository.
index e206d5a073ced9c24d9020f03a674ca207abdd06..0bed6d2ab4844a1eb6b548c77577f3fc13105a81 100644 (file)
@@ -1203,9 +1203,16 @@ static int reftable_be_pack_refs(struct ref_store *ref_store,
        if (!stack)
                stack = refs->main_stack;
 
-       ret = reftable_stack_compact_all(stack, NULL);
-       if (ret)
+       if (opts->flags & PACK_REFS_AUTO)
+               ret = reftable_stack_auto_compact(stack);
+       else
+               ret = reftable_stack_compact_all(stack, NULL);
+       if (ret < 0) {
+               ret = error(_("unable to compact stack: %s"),
+                           reftable_error_str(ret));
                goto out;
+       }
+
        ret = reftable_stack_clean(stack);
        if (ret)
                goto out;
index 0785aff941bdc2d84181651a48f35cfaaefaede9..fea711db7e23e680fbf1937df43d625bb9c055fb 100644 (file)
@@ -27,7 +27,7 @@ void put_be16(uint8_t *out, uint16_t i)
        out[1] = (uint8_t)(i & 0xff);
 }
 
-int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
 {
        size_t lo = 0;
        size_t hi = sz;
@@ -39,8 +39,11 @@ int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
         */
        while (hi - lo > 1) {
                size_t mid = lo + (hi - lo) / 2;
+               int ret = f(mid, args);
+               if (ret < 0)
+                       return sz;
 
-               if (f(mid, args))
+               if (ret > 0)
                        hi = mid;
                else
                        lo = mid;
index 91f3533efee501faf3e461f7206dbc7a964da98e..523ecd530762f6e4cde48edfa3761b3139b21b92 100644 (file)
@@ -22,13 +22,14 @@ uint32_t get_be24(uint8_t *in);
 void put_be16(uint8_t *out, uint16_t i);
 
 /*
- * find smallest index i in [0, sz) at which f(i) is true, assuming
- * that f is ascending. Return sz if f(i) is false for all indices.
+ * find smallest index i in [0, sz) at which `f(i) > 0`, assuming that f is
+ * ascending. Return sz if `f(i) == 0` for all indices. The search is aborted
+ * and `sz` is returned in case `f(i) < 0`.
  *
  * Contrary to bsearch(3), this returns something useful if the argument is not
  * found.
  */
-int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args);
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args);
 
 /*
  * Frees a NULL terminated array of malloced strings. The array itself is also
index 1fcd2297256760c9c49c3844a23e73a1e91d0c75..997c4d9e0113ba52fcaaaa423aabe08c511322af 100644 (file)
@@ -12,40 +12,47 @@ https://developers.google.com/open-source/licenses/bsd
 #include "test_framework.h"
 #include "reftable-tests.h"
 
-struct binsearch_args {
-       int key;
-       int *arr;
+struct integer_needle_lesseq_args {
+       int needle;
+       int *haystack;
 };
 
-static int binsearch_func(size_t i, void *void_args)
+static int integer_needle_lesseq(size_t i, void *_args)
 {
-       struct binsearch_args *args = void_args;
-
-       return args->key < args->arr[i];
+       struct integer_needle_lesseq_args *args = _args;
+       return args->needle <= args->haystack[i];
 }
 
 static void test_binsearch(void)
 {
-       int arr[] = { 2, 4, 6, 8, 10 };
-       size_t sz = ARRAY_SIZE(arr);
-       struct binsearch_args args = {
-               .arr = arr,
+       int haystack[] = { 2, 4, 6, 8, 10 };
+       struct {
+               int needle;
+               size_t expected_idx;
+       } testcases[] = {
+               {-9000, 0},
+               {-1, 0},
+               {0, 0},
+               {2, 0},
+               {3, 1},
+               {4, 1},
+               {7, 3},
+               {9, 4},
+               {10, 4},
+               {11, 5},
+               {9000, 5},
        };
+       size_t i = 0;
 
-       int i = 0;
-       for (i = 1; i < 11; i++) {
-               int res;
-               args.key = i;
-               res = binsearch(sz, &binsearch_func, &args);
+       for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+               struct integer_needle_lesseq_args args = {
+                       .haystack = haystack,
+                       .needle = testcases[i].needle,
+               };
+               size_t idx;
 
-               if (res < sz) {
-                       EXPECT(args.key < arr[res]);
-                       if (res > 0) {
-                               EXPECT(args.key >= arr[res - 1]);
-                       }
-               } else {
-                       EXPECT(args.key == 10 || args.key == 11);
-               }
+               idx = binsearch(ARRAY_SIZE(haystack), &integer_needle_lesseq, &args);
+               EXPECT(idx == testcases[i].expected_idx);
        }
 }
 
index e2a2cee58d2a35c6183e7083d3320d2520a397f4..298e8c56b9e2085b798f50a05c136ad67c5cdbfe 100644 (file)
@@ -273,35 +273,46 @@ void block_reader_start(struct block_reader *br, struct block_iter *it)
        it->next_off = br->header_off + 4;
 }
 
-struct restart_find_args {
+struct restart_needle_less_args {
        int error;
-       struct strbuf key;
-       struct block_reader *r;
+       struct strbuf needle;
+       struct block_reader *reader;
 };
 
-static int restart_key_less(size_t idx, void *args)
+static int restart_needle_less(size_t idx, void *_args)
 {
-       struct restart_find_args *a = args;
-       uint32_t off = block_reader_restart_offset(a->r, idx);
+       struct restart_needle_less_args *args = _args;
+       uint32_t off = block_reader_restart_offset(args->reader, idx);
        struct string_view in = {
-               .buf = a->r->block.data + off,
-               .len = a->r->block_len - off,
+               .buf = args->reader->block.data + off,
+               .len = args->reader->block_len - off,
        };
+       uint64_t prefix_len, suffix_len;
+       uint8_t extra;
+       int n;
+
+       /*
+        * Records at restart points are stored without prefix compression, so
+        * there is no need to fully decode the record key here. This removes
+        * the need for allocating memory.
+        */
+       n = reftable_decode_keylen(in, &prefix_len, &suffix_len, &extra);
+       if (n < 0 || prefix_len) {
+               args->error = 1;
+               return -1;
+       }
 
-       /* the restart key is verbatim in the block, so this could avoid the
-          alloc for decoding the key */
-       struct strbuf rkey = STRBUF_INIT;
-       uint8_t unused_extra;
-       int n = reftable_decode_key(&rkey, &unused_extra, in);
-       int result;
-       if (n < 0) {
-               a->error = 1;
+       string_view_consume(&in, n);
+       if (suffix_len > in.len) {
+               args->error = 1;
                return -1;
        }
 
-       result = strbuf_cmp(&a->key, &rkey);
-       strbuf_release(&rkey);
-       return result < 0;
+       n = memcmp(args->needle.buf, in.buf,
+                  args->needle.len < suffix_len ? args->needle.len : suffix_len);
+       if (n)
+               return n < 0;
+       return args->needle.len < suffix_len;
 }
 
 void block_iter_copy_from(struct block_iter *dest, struct block_iter *src)
@@ -376,20 +387,51 @@ void block_iter_close(struct block_iter *it)
 int block_reader_seek(struct block_reader *br, struct block_iter *it,
                      struct strbuf *want)
 {
-       struct restart_find_args args = {
-               .key = *want,
-               .r = br,
+       struct restart_needle_less_args args = {
+               .needle = *want,
+               .reader = br,
        };
        struct block_iter next = BLOCK_ITER_INIT;
        struct reftable_record rec;
-       int err = 0, i;
-
+       int err = 0;
+       size_t i;
+
+       /*
+        * Perform a binary search over the block's restart points, which
+        * avoids doing a linear scan over the whole block. Like this, we
+        * identify the section of the block that should contain our key.
+        *
+        * Note that we explicitly search for the first restart point _greater_
+        * than the sought-after record, not _greater or equal_ to it. In case
+        * the sought-after record is located directly at the restart point we
+        * would otherwise start doing the linear search at the preceding
+        * restart point. While that works alright, we would end up scanning
+        * too many record.
+        */
+       i = binsearch(br->restart_count, &restart_needle_less, &args);
        if (args.error) {
                err = REFTABLE_FORMAT_ERROR;
                goto done;
        }
 
-       i = binsearch(br->restart_count, &restart_key_less, &args);
+       /*
+        * Now there are multiple cases:
+        *
+        *   - `i == 0`: The wanted record is smaller than the record found at
+        *     the first restart point. As the first restart point is the first
+        *     record in the block, our wanted record cannot be located in this
+        *     block at all. We still need to position the iterator so that the
+        *     next call to `block_iter_next()` will yield an end-of-iterator
+        *     signal.
+        *
+        *   - `i == restart_count`: The wanted record was not found at any of
+        *     the restart points. As there is no restart point at the end of
+        *     the section the record may thus be contained in the last block.
+        *
+        *   - `i > 0`: The wanted record must be contained in the section
+        *     before the found restart point. We thus do a linear search
+        *     starting from the preceding restart point.
+        */
        if (i > 0)
                it->next_off = block_reader_restart_offset(br, i - 1);
        else
@@ -398,21 +440,34 @@ int block_reader_seek(struct block_reader *br, struct block_iter *it,
 
        reftable_record_init(&rec, block_reader_type(br));
 
-       /* We're looking for the last entry less/equal than the wanted key, so
-          we have to go one entry too far and then back up.
-       */
+       /*
+        * We're looking for the last entry less than the wanted key so that
+        * the next call to `block_reader_next()` would yield the wanted
+        * record. We thus don't want to position our reader at the sought
+        * after record, but one before. To do so, we have to go one entry too
+        * far and then back up.
+        */
        while (1) {
                block_iter_copy_from(&next, it);
                err = block_iter_next(&next, &rec);
                if (err < 0)
                        goto done;
-
-               reftable_record_key(&rec, &it->last_key);
-               if (err > 0 || strbuf_cmp(&it->last_key, want) >= 0) {
+               if (err > 0) {
                        err = 0;
                        goto done;
                }
 
+               /*
+                * Check whether the current key is greater or equal to the
+                * sought-after key. In case it is greater we know that the
+                * record does not exist in the block and can thus abort early.
+                * In case it is equal to the sought-after key we have found
+                * the desired record.
+                */
+               reftable_record_key(&rec, &it->last_key);
+               if (strbuf_cmp(&it->last_key, want) >= 0)
+                       goto done;
+
                block_iter_copy_from(it, &next);
        }
 
index 0d1766735e8ff0f1c5c057d1882728aa4292d1ee..cfb7a0fda4a24dc190892b22cffc713c3e41c2a3 100644 (file)
@@ -22,7 +22,7 @@ const char *reftable_error_str(int err)
        case REFTABLE_NOT_EXIST_ERROR:
                return "file does not exist";
        case REFTABLE_LOCK_ERROR:
-               return "data is outdated";
+               return "data is locked";
        case REFTABLE_API_ERROR:
                return "misuse of the reftable API";
        case REFTABLE_ZLIB_ERROR:
@@ -35,6 +35,8 @@ const char *reftable_error_str(int err)
                return "invalid refname";
        case REFTABLE_ENTRY_TOO_BIG_ERROR:
                return "entry too large";
+       case REFTABLE_OUTDATED_ERROR:
+               return "data concurrently modified";
        case -1:
                return "general error";
        default:
index 23b497adab8b3478026959a802bddb58fb1c6002..5506f3e913860eb2b76c5db2e9f48cede4965bf3 100644 (file)
@@ -159,26 +159,42 @@ int reftable_encode_key(int *restart, struct string_view dest,
        return start.len - dest.len;
 }
 
-int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
-                       struct string_view in)
+int reftable_decode_keylen(struct string_view in,
+                          uint64_t *prefix_len,
+                          uint64_t *suffix_len,
+                          uint8_t *extra)
 {
-       int start_len = in.len;
-       uint64_t prefix_len = 0;
-       uint64_t suffix_len = 0;
+       size_t start_len = in.len;
        int n;
 
-       n = get_var_int(&prefix_len, &in);
+       n = get_var_int(prefix_len, &in);
        if (n < 0)
                return -1;
        string_view_consume(&in, n);
 
-       n = get_var_int(&suffix_len, &in);
+       n = get_var_int(suffix_len, &in);
        if (n <= 0)
                return -1;
        string_view_consume(&in, n);
 
-       *extra = (uint8_t)(suffix_len & 0x7);
-       suffix_len >>= 3;
+       *extra = (uint8_t)(*suffix_len & 0x7);
+       *suffix_len >>= 3;
+
+       return start_len - in.len;
+}
+
+int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
+                       struct string_view in)
+{
+       int start_len = in.len;
+       uint64_t prefix_len = 0;
+       uint64_t suffix_len = 0;
+       int n;
+
+       n = reftable_decode_keylen(in, &prefix_len, &suffix_len, extra);
+       if (n < 0)
+               return -1;
+       string_view_consume(&in, n);
 
        if (in.len < suffix_len ||
            prefix_len > last_key->len)
index 826ee1c55c3b64d2bf4cfe1a170cf99ca3511259..d778133e6ec56ccc1e17ad2d5b0435420dce6e29 100644 (file)
@@ -86,6 +86,12 @@ int reftable_encode_key(int *is_restart, struct string_view dest,
                        struct strbuf prev_key, struct strbuf key,
                        uint8_t extra);
 
+/* Decode a record's key lengths. */
+int reftable_decode_keylen(struct string_view in,
+                          uint64_t *prefix_len,
+                          uint64_t *suffix_len,
+                          uint8_t *extra);
+
 /*
  * Decode into `last_key` and `extra` from `in`. `last_key` is expected to
  * contain the decoded key of the preceding record, if any.
index 7570e4acf9eec740057a4c6ba8a615878f48799f..bbfde15754a227c9bd418572b778d77827a59db4 100644 (file)
 #include "refname.h"
 #include "reftable-iterator.h"
 
-struct find_arg {
-       char **names;
-       const char *want;
+struct refname_needle_lesseq_args {
+       char **haystack;
+       const char *needle;
 };
 
-static int find_name(size_t k, void *arg)
+static int refname_needle_lesseq(size_t k, void *_args)
 {
-       struct find_arg *f_arg = arg;
-       return strcmp(f_arg->names[k], f_arg->want) >= 0;
+       struct refname_needle_lesseq_args *args = _args;
+       return strcmp(args->needle, args->haystack[k]) <= 0;
 }
 
 static int modification_has_ref(struct modification *mod, const char *name)
@@ -29,25 +29,23 @@ static int modification_has_ref(struct modification *mod, const char *name)
        int err = 0;
 
        if (mod->add_len > 0) {
-               struct find_arg arg = {
-                       .names = mod->add,
-                       .want = name,
+               struct refname_needle_lesseq_args args = {
+                       .haystack = mod->add,
+                       .needle = name,
                };
-               int idx = binsearch(mod->add_len, find_name, &arg);
-               if (idx < mod->add_len && !strcmp(mod->add[idx], name)) {
+               size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
+               if (idx < mod->add_len && !strcmp(mod->add[idx], name))
                        return 0;
-               }
        }
 
        if (mod->del_len > 0) {
-               struct find_arg arg = {
-                       .names = mod->del,
-                       .want = name,
+               struct refname_needle_lesseq_args args = {
+                       .haystack = mod->del,
+                       .needle = name,
                };
-               int idx = binsearch(mod->del_len, find_name, &arg);
-               if (idx < mod->del_len && !strcmp(mod->del[idx], name)) {
+               size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
+               if (idx < mod->del_len && !strcmp(mod->del[idx], name))
                        return 1;
-               }
        }
 
        err = reftable_table_read_ref(&mod->tab, name, &ref);
@@ -73,11 +71,11 @@ static int modification_has_ref_with_prefix(struct modification *mod,
        int err = 0;
 
        if (mod->add_len > 0) {
-               struct find_arg arg = {
-                       .names = mod->add,
-                       .want = prefix,
+               struct refname_needle_lesseq_args args = {
+                       .haystack = mod->add,
+                       .needle = prefix,
                };
-               int idx = binsearch(mod->add_len, find_name, &arg);
+               size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
                if (idx < mod->add_len &&
                    !strncmp(prefix, mod->add[idx], strlen(prefix)))
                        goto done;
@@ -92,15 +90,14 @@ static int modification_has_ref_with_prefix(struct modification *mod,
                        goto done;
 
                if (mod->del_len > 0) {
-                       struct find_arg arg = {
-                               .names = mod->del,
-                               .want = ref.refname,
+                       struct refname_needle_lesseq_args args = {
+                               .haystack = mod->del,
+                               .needle = ref.refname,
                        };
-                       int idx = binsearch(mod->del_len, find_name, &arg);
+                       size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
                        if (idx < mod->del_len &&
-                           !strcmp(ref.refname, mod->del[idx])) {
+                           !strcmp(ref.refname, mod->del[idx]))
                                continue;
-                       }
                }
 
                if (strncmp(ref.refname, prefix, strlen(prefix))) {
index 4c457aaaf8906384e65edfeb06057a86dcb594e4..e9b07c9f3623ec69db5447ced5474993f7b98d0f 100644 (file)
@@ -25,7 +25,7 @@ enum reftable_error {
         */
        REFTABLE_NOT_EXIST_ERROR = -4,
 
-       /* Trying to write out-of-date data. */
+       /* Trying to access locked data. */
        REFTABLE_LOCK_ERROR = -5,
 
        /* Misuse of the API:
@@ -57,6 +57,9 @@ enum reftable_error {
        /* Entry does not fit. This can happen when writing outsize reflog
           messages. */
        REFTABLE_ENTRY_TOO_BIG_ERROR = -11,
+
+       /* Trying to write out-of-date data. */
+       REFTABLE_OUTDATED_ERROR = -12,
 };
 
 /* convert the numeric error code to a string. The string should not be
index 1ecf1b9751ce4975580ab7deed2bbe2a9a7847bc..dde50b61d696befd29bb16452e0e2588f0823761 100644 (file)
@@ -529,9 +529,9 @@ int reftable_stack_add(struct reftable_stack *st,
 {
        int err = stack_try_add(st, write, arg);
        if (err < 0) {
-               if (err == REFTABLE_LOCK_ERROR) {
+               if (err == REFTABLE_OUTDATED_ERROR) {
                        /* Ignore error return, we want to propagate
-                          REFTABLE_LOCK_ERROR.
+                          REFTABLE_OUTDATED_ERROR.
                        */
                        reftable_stack_reload(st);
                }
@@ -590,9 +590,8 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
        err = stack_uptodate(st);
        if (err < 0)
                goto done;
-
-       if (err > 1) {
-               err = REFTABLE_LOCK_ERROR;
+       if (err > 0) {
+               err = REFTABLE_OUTDATED_ERROR;
                goto done;
        }
 
@@ -681,8 +680,19 @@ int reftable_addition_commit(struct reftable_addition *add)
        if (err)
                goto done;
 
-       if (!add->stack->disable_auto_compact)
+       if (!add->stack->disable_auto_compact) {
+               /*
+                * Auto-compact the stack to keep the number of tables in
+                * control. It is possible that a concurrent writer is already
+                * trying to compact parts of the stack, which would lead to a
+                * `REFTABLE_LOCK_ERROR` because parts of the stack are locked
+                * already. This is a benign error though, so we ignore it.
+                */
                err = reftable_stack_auto_compact(add->stack);
+               if (err < 0 && err != REFTABLE_LOCK_ERROR)
+                       goto done;
+               err = 0;
+       }
 
 done:
        reftable_addition_close(add);
@@ -713,10 +723,6 @@ static int stack_try_add(struct reftable_stack *st,
        int err = reftable_stack_init_addition(&add, st);
        if (err < 0)
                goto done;
-       if (err > 0) {
-               err = REFTABLE_LOCK_ERROR;
-               goto done;
-       }
 
        err = reftable_addition_add(&add, write_table, arg);
        if (err < 0)
@@ -978,7 +984,15 @@ done:
        return err;
 }
 
-/* <  0: error. 0 == OK, > 0 attempt failed; could retry. */
+/*
+ * Compact all tables in the range `[first, last)` into a single new table.
+ *
+ * This function returns `0` on success or a code `< 0` on failure. When the
+ * stack or any of the tables in the specified range are already locked then
+ * this function returns `REFTABLE_LOCK_ERROR`. This is a benign error that
+ * callers can either ignore, or they may choose to retry compaction after some
+ * amount of time.
+ */
 static int stack_compact_range(struct reftable_stack *st,
                               size_t first, size_t last,
                               struct reftable_log_expiry_config *expiry)
@@ -1008,7 +1022,7 @@ static int stack_compact_range(struct reftable_stack *st,
                                        LOCK_NO_DEREF);
        if (err < 0) {
                if (errno == EEXIST)
-                       err = 1;
+                       err = REFTABLE_LOCK_ERROR;
                else
                        err = REFTABLE_IO_ERROR;
                goto done;
@@ -1030,7 +1044,7 @@ static int stack_compact_range(struct reftable_stack *st,
                                                table_name.buf, LOCK_NO_DEREF);
                if (err < 0) {
                        if (errno == EEXIST)
-                               err = 1;
+                               err = REFTABLE_LOCK_ERROR;
                        else
                                err = REFTABLE_IO_ERROR;
                        goto done;
@@ -1080,7 +1094,7 @@ static int stack_compact_range(struct reftable_stack *st,
                                        LOCK_NO_DEREF);
        if (err < 0) {
                if (errno == EEXIST)
-                       err = 1;
+                       err = REFTABLE_LOCK_ERROR;
                else
                        err = REFTABLE_IO_ERROR;
                goto done;
@@ -1192,7 +1206,7 @@ static int stack_compact_range_stats(struct reftable_stack *st,
                                     struct reftable_log_expiry_config *config)
 {
        int err = stack_compact_range(st, first, last, config);
-       if (err > 0)
+       if (err == REFTABLE_LOCK_ERROR)
                st->stats.failures++;
        return err;
 }
index 0dc9a44648e45ec3b61b9b4572ecd504035f11d8..351e35bd86d8465ec7f741374845ae104bcf23f9 100644 (file)
@@ -242,7 +242,7 @@ static void test_reftable_stack_uptodate(void)
        EXPECT_ERR(err);
 
        err = reftable_stack_add(st2, &write_test_ref, &ref2);
-       EXPECT(err == REFTABLE_LOCK_ERROR);
+       EXPECT(err == REFTABLE_OUTDATED_ERROR);
 
        err = reftable_stack_reload(st2);
        EXPECT_ERR(err);
@@ -353,6 +353,49 @@ static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
        clear_dir(dir);
 }
 
+static void test_reftable_stack_auto_compaction_fails_gracefully(void)
+{
+       struct reftable_ref_record ref = {
+               .refname = "refs/heads/master",
+               .update_index = 1,
+               .value_type = REFTABLE_REF_VAL1,
+               .value.val1 = {0x01},
+       };
+       struct reftable_write_options cfg = {0};
+       struct reftable_stack *st;
+       struct strbuf table_path = STRBUF_INIT;
+       char *dir = get_tmp_dir(__LINE__);
+       int err;
+
+       err = reftable_new_stack(&st, dir, cfg);
+       EXPECT_ERR(err);
+
+       err = reftable_stack_add(st, write_test_ref, &ref);
+       EXPECT_ERR(err);
+       EXPECT(st->merged->stack_len == 1);
+       EXPECT(st->stats.attempts == 0);
+       EXPECT(st->stats.failures == 0);
+
+       /*
+        * Lock the newly written table such that it cannot be compacted.
+        * Adding a new table to the stack should not be impacted by this, even
+        * though auto-compaction will now fail.
+        */
+       strbuf_addf(&table_path, "%s/%s.lock", dir, st->readers[0]->name);
+       write_file_buf(table_path.buf, "", 0);
+
+       ref.update_index = 2;
+       err = reftable_stack_add(st, write_test_ref, &ref);
+       EXPECT_ERR(err);
+       EXPECT(st->merged->stack_len == 2);
+       EXPECT(st->stats.attempts == 1);
+       EXPECT(st->stats.failures == 1);
+
+       reftable_stack_destroy(st);
+       strbuf_release(&table_path);
+       clear_dir(dir);
+}
+
 static void test_reftable_stack_validate_refname(void)
 {
        struct reftable_write_options cfg = { 0 };
@@ -1095,6 +1138,7 @@ int stack_test_main(int argc, const char *argv[])
        RUN_TEST(test_reftable_stack_tombstone);
        RUN_TEST(test_reftable_stack_transaction_api);
        RUN_TEST(test_reftable_stack_transaction_api_performs_auto_compaction);
+       RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
        RUN_TEST(test_reftable_stack_update_index_check);
        RUN_TEST(test_reftable_stack_uptodate);
        RUN_TEST(test_reftable_stack_validate_refname);
index 31b02b8840969d17c34715fb3b902c6d2b4a4f2d..0b6d7815fddd1e36be4aae007d5109cf5ba32cf1 100644 (file)
@@ -1,4 +1,5 @@
 #include "git-compat-util.h"
+#include "git-curl-compat.h"
 #include "config.h"
 #include "environment.h"
 #include "gettext.h"
@@ -955,7 +956,9 @@ retry:
                /* The request body is large and the size cannot be predicted.
                 * We must use chunked encoding to send it.
                 */
+#ifdef GIT_CURL_NEED_TRANSFER_ENCODING_HEADER
                headers = curl_slist_append(headers, "Transfer-Encoding: chunked");
+#endif
                rpc->initial_buffer = 1;
                curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, rpc_out);
                curl_easy_setopt(slot->curl, CURLOPT_INFILE, rpc);
index 94c43138bc3e68651accecf79cdf4c28ba98582f..0e470d1df19f690586378a006d9c9145fb7386c7 100644 (file)
@@ -142,6 +142,7 @@ struct rev_info {
        /* Basic information */
        const char *prefix;
        const char *def;
+       char *ps_matched; /* optionally record matches of prune_data */
        struct pathspec prune_data;
 
        /*
index 7a0f6cac53d2433ade595ce95ab2d30e214b2ede..82bbf6e2e68b9b57b8c2d159439631aa36a16d20 100644 (file)
@@ -112,25 +112,6 @@ static const char **get_store(const char **argv, struct ref_store **refs)
        return argv + 1;
 }
 
-static struct flag_definition pack_flags[] = { FLAG_DEF(PACK_REFS_PRUNE),
-                                              FLAG_DEF(PACK_REFS_ALL),
-                                              { NULL, 0 } };
-
-static int cmd_pack_refs(struct ref_store *refs, const char **argv)
-{
-       unsigned int flags = arg_flags(*argv++, "flags", pack_flags);
-       static struct ref_exclusions exclusions = REF_EXCLUSIONS_INIT;
-       static struct string_list included_refs = STRING_LIST_INIT_NODUP;
-       struct pack_refs_opts pack_opts = { .flags = flags,
-                                           .exclusions = &exclusions,
-                                           .includes = &included_refs };
-
-       if (pack_opts.flags & PACK_REFS_ALL)
-               string_list_append(pack_opts.includes, "*");
-
-       return refs_pack_refs(refs, &pack_opts);
-}
-
 static int cmd_create_symref(struct ref_store *refs, const char **argv)
 {
        const char *refname = notnull(*argv++, "refname");
@@ -326,7 +307,6 @@ struct command {
 };
 
 static struct command commands[] = {
-       { "pack-refs", cmd_pack_refs },
        { "create-symref", cmd_create_symref },
        { "delete-refs", cmd_delete_refs },
        { "rename-ref", cmd_rename_ref },
index d0736dd1a00d59cb1774860568136e94f8d23f04..b8a5bcb187653c960d9dec38374a4cd3587c1654 100644 (file)
@@ -15,3 +15,15 @@ empty_blob sha256:473a0f4c3be8a93681a267e3b1e9a7dcda1185436fe141f7749120a3037218
 
 empty_tree sha1:4b825dc642cb6eb9a060e54bf8d69288fbee4904
 empty_tree sha256:6ef19b41225c5369f1c104d45d8d85efa9b057b53b14b4b9b939dd74decc5321
+
+blob17_1 sha1:263
+blob17_1 sha256:34
+
+blob17_2 sha1:410
+blob17_2 sha256:174
+
+blob17_3 sha1:523
+blob17_3 sha256:313
+
+blob17_4 sha1:790
+blob17_4 sha256:481
index 8300faadea9a76f19e3d2c82f5ff600f38bfe18f..f2c146fa2a1dd79fd68bd0c71f836667d5c55cf3 100755 (executable)
@@ -8,6 +8,14 @@ test -z "$NO_UNIX_SOCKETS" || {
        skip_all='skipping credential-cache tests, unix sockets not available'
        test_done
 }
+if test_have_prereq MINGW
+then
+       service_running=$(sc query afunix | grep "4  RUNNING")
+       test -z "$service_running" || {
+               skip_all='skipping credential-cache tests, unix sockets not available'
+               test_done
+       }
+fi
 
 uname_s=$(uname -s)
 case $uname_s in
index cd3969e852bb06fa3361cd9e8e13d71c5fb514c4..69917d7b8459c6f3ad71326b25c9fabe731a5b4a 100755 (executable)
@@ -59,7 +59,9 @@ txt_to_synopsis () {
                -e '/^\[verse\]$/,/^$/ {
                        /^$/d;
                        /^\[verse\]$/d;
-
+                       s/_//g;
+                       s/++//g;
+                       s/`//g;
                        s/{litdd}/--/g;
                        s/'\''\(git[ a-z-]*\)'\''/\1/g;
 
index c309d2bae8a19816907b81d82cef9099b2fa21e9..7d4ab0b91aad75f8363f0f019664c81ae00b49cd 100755 (executable)
@@ -32,11 +32,16 @@ test_expect_success 'prepare a trivial repository' '
        HEAD=$(git rev-parse --verify HEAD)
 '
 
-test_expect_success 'pack_refs(PACK_REFS_ALL | PACK_REFS_PRUNE)' '
-       N=`find .git/refs -type f | wc -l` &&
+test_expect_success 'pack-refs --prune --all' '
+       test_path_is_missing .git/packed-refs &&
+       git pack-refs --no-prune --all &&
+       test_path_is_file .git/packed-refs &&
+       N=$(find .git/refs -type f | wc -l) &&
        test "$N" != 0 &&
-       test-tool ref-store main pack-refs PACK_REFS_PRUNE,PACK_REFS_ALL &&
-       N=`find .git/refs -type f` &&
+
+       git pack-refs --prune --all &&
+       test_path_is_file .git/packed-refs &&
+       N=$(find .git/refs -type f) &&
        test -z "$N"
 '
 
@@ -159,6 +164,13 @@ test_expect_success 'test --exclude takes precedence over --include' '
        git pack-refs --include "refs/heads/pack*" --exclude "refs/heads/pack*" &&
        test -f .git/refs/heads/dont_pack5'
 
+test_expect_success '--auto packs and prunes refs as usual' '
+       git branch auto &&
+       test_path_is_file .git/refs/heads/auto &&
+       git pack-refs --auto --all &&
+       test_path_is_missing .git/refs/heads/auto
+'
+
 test_expect_success 'see if up-to-date packed refs are preserved' '
        git branch q &&
        git pack-refs --all --prune &&
@@ -358,4 +370,14 @@ test_expect_success 'pack-refs does not drop broken refs during deletion' '
        test_cmp expect actual
 '
 
+test_expect_success 'maintenance --auto unconditionally packs loose refs' '
+       git update-ref refs/heads/something HEAD &&
+       test_path_is_file .git/refs/heads/something &&
+       git rev-parse refs/heads/something >expect &&
+       git maintenance run --task=pack-refs --auto &&
+       test_path_is_missing .git/refs/heads/something &&
+       git rev-parse refs/heads/something >actual &&
+       test_cmp expect actual
+'
+
 test_done
index c8074ebab286f45eaaba11d6919e911336dffcec..238d4923c433b8e434db6c5eac2e552abc4270aa 100755 (executable)
@@ -96,23 +96,54 @@ test_expect_perms () {
        esac
 }
 
-for umask in 002 022
-do
-       test_expect_success POSIXPERM 'init: honors core.sharedRepository' '
+test_expect_reftable_perms () {
+       local umask="$1"
+       local shared="$2"
+       local expect="$3"
+
+       test_expect_success POSIXPERM "init: honors --shared=$shared with umask $umask" '
                test_when_finished "rm -rf repo" &&
                (
                        umask $umask &&
-                       git init --shared=true repo &&
-                       test 1 = "$(git -C repo config core.sharedrepository)"
+                       git init --shared=$shared repo
                ) &&
-               test_expect_perms "-rw-rw-r--" repo/.git/reftable/tables.list &&
+               test_expect_perms "$expect" repo/.git/reftable/tables.list &&
                for table in repo/.git/reftable/*.ref
                do
-                       test_expect_perms "-rw-rw-r--" "$table" ||
+                       test_expect_perms "$expect" "$table" ||
                        return 1
                done
        '
-done
+
+       test_expect_success POSIXPERM "pack-refs: honors --shared=$shared with umask $umask" '
+               test_when_finished "rm -rf repo" &&
+               (
+                       umask $umask &&
+                       git init --shared=$shared repo &&
+                       test_commit -C repo A &&
+                       test_line_count = 3 repo/.git/reftable/tables.list &&
+                       git -C repo pack-refs
+               ) &&
+               test_expect_perms "$expect" repo/.git/reftable/tables.list &&
+               for table in repo/.git/reftable/*.ref
+               do
+                       test_expect_perms "$expect" "$table" ||
+                       return 1
+               done
+       '
+}
+
+test_expect_reftable_perms 002 umask "-rw-rw-r--"
+test_expect_reftable_perms 022 umask "-rw-r--r--"
+test_expect_reftable_perms 027 umask "-rw-r-----"
+
+test_expect_reftable_perms 002 group "-rw-rw-r--"
+test_expect_reftable_perms 022 group "-rw-rw-r--"
+test_expect_reftable_perms 027 group "-rw-rw----"
+
+test_expect_reftable_perms 002 world "-rw-rw-r--"
+test_expect_reftable_perms 022 world "-rw-rw-r--"
+test_expect_reftable_perms 027 world "-rw-rw-r--"
 
 test_expect_success 'clone: can clone reftable repository' '
        test_when_finished "rm -rf repo clone" &&
@@ -340,6 +371,26 @@ test_expect_success 'ref transaction: empty transaction in empty repo' '
        EOF
 '
 
+test_expect_success 'ref transaction: fails gracefully when auto compaction fails' '
+       test_when_finished "rm -rf repo" &&
+       git init repo &&
+       (
+               cd repo &&
+
+               test_commit A &&
+               for i in $(test_seq 10)
+               do
+                       git branch branch-$i &&
+                       for table in .git/reftable/*.ref
+                       do
+                               touch "$table.lock" || exit 1
+                       done ||
+                       exit 1
+               done &&
+               test_line_count = 13 .git/reftable/tables.list
+       )
+'
+
 test_expect_success 'pack-refs: compacts tables' '
        test_when_finished "rm -rf repo" &&
        git init repo &&
@@ -355,6 +406,65 @@ test_expect_success 'pack-refs: compacts tables' '
        test_line_count = 1 repo/.git/reftable/tables.list
 '
 
+test_expect_success 'pack-refs: compaction raises locking errors' '
+       test_when_finished "rm -rf repo" &&
+       git init repo &&
+       test_commit -C repo A &&
+       touch repo/.git/reftable/tables.list.lock &&
+       cat >expect <<-EOF &&
+       error: unable to compact stack: data is locked
+       EOF
+       test_must_fail git -C repo pack-refs 2>err &&
+       test_cmp expect err
+'
+
+for command in pack-refs gc "maintenance run --task=pack-refs"
+do
+test_expect_success "$command: auto compaction" '
+       test_when_finished "rm -rf repo" &&
+       git init repo &&
+       (
+               cd repo &&
+
+               test_commit A &&
+
+               # We need a bit of setup to ensure that git-gc(1) actually
+               # triggers, and that it does not write anything to the refdb.
+               git config gc.auto 1 &&
+               git config gc.autoDetach 0 &&
+               git config gc.reflogExpire never &&
+               git config gc.reflogExpireUnreachable never &&
+               test_oid blob17_1 | git hash-object -w --stdin &&
+
+               # The tables should have been auto-compacted, and thus auto
+               # compaction should not have to do anything.
+               ls -1 .git/reftable >tables-expect &&
+               test_line_count = 4 tables-expect &&
+               git $command --auto &&
+               ls -1 .git/reftable >tables-actual &&
+               test_cmp tables-expect tables-actual &&
+
+               test_oid blob17_2 | git hash-object -w --stdin &&
+
+               # Lock all tables write some refs. Auto-compaction will be
+               # unable to compact tables and thus fails gracefully, leaving
+               # the stack in a sub-optimal state.
+               ls .git/reftable/*.ref |
+               while read table
+               do
+                       touch "$table.lock" || exit 1
+               done &&
+               git branch B &&
+               git branch C &&
+               rm .git/reftable/*.lock &&
+               test_line_count = 5 .git/reftable/tables.list &&
+
+               git $command --auto &&
+               test_line_count = 1 .git/reftable/tables.list
+       )
+'
+done
+
 test_expect_success 'pack-refs: prunes stale tables' '
        test_when_finished "rm -rf repo" &&
        git init repo &&
@@ -371,26 +481,6 @@ test_expect_success 'pack-refs: does not prune non-table files' '
        test_path_is_file repo/.git/reftable/garbage
 '
 
-for umask in 002 022
-do
-       test_expect_success POSIXPERM 'pack-refs: honors core.sharedRepository' '
-               test_when_finished "rm -rf repo" &&
-               (
-                       umask $umask &&
-                       git init --shared=true repo &&
-                       test_commit -C repo A &&
-                       test_line_count = 3 repo/.git/reftable/tables.list
-               ) &&
-               git -C repo pack-refs &&
-               test_expect_perms "-rw-rw-r--" repo/.git/reftable/tables.list &&
-               for table in repo/.git/reftable/*.ref
-               do
-                       test_expect_perms "-rw-rw-r--" "$table" ||
-                       return 1
-               done
-       '
-done
-
 test_expect_success 'packed-refs: writes are synced' '
        test_when_finished "rm -rf repo" &&
        git init repo &&
index 6ebc3ef9453b71dbc7d90a052834af482dfd48ec..ec3443cc8786d126cbc0085de184b2e9af019566 100755 (executable)
@@ -622,7 +622,7 @@ test_expect_success 'stdin fails create with no ref' '
 test_expect_success 'stdin fails create with no new value' '
        echo "create $a" >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: create $a: missing <newvalue>" err
+       grep "fatal: create $a: missing <new-oid>" err
 '
 
 test_expect_success 'stdin fails create with too many arguments' '
@@ -640,7 +640,7 @@ test_expect_success 'stdin fails update with no ref' '
 test_expect_success 'stdin fails update with no new value' '
        echo "update $a" >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: update $a: missing <newvalue>" err
+       grep "fatal: update $a: missing <new-oid>" err
 '
 
 test_expect_success 'stdin fails update with too many arguments' '
@@ -765,21 +765,21 @@ test_expect_success 'stdin update ref fails with wrong old value' '
 test_expect_success 'stdin update ref fails with bad old value' '
        echo "update $c $m does-not-exist" >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: update $c: invalid <oldvalue>: does-not-exist" err &&
+       grep "fatal: update $c: invalid <old-oid>: does-not-exist" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
 test_expect_success 'stdin create ref fails with bad new value' '
        echo "create $c does-not-exist" >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: create $c: invalid <newvalue>: does-not-exist" err &&
+       grep "fatal: create $c: invalid <new-oid>: does-not-exist" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
 test_expect_success 'stdin create ref fails with zero new value' '
        echo "create $c " >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: create $c: zero <newvalue>" err &&
+       grep "fatal: create $c: zero <new-oid>" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
@@ -803,7 +803,7 @@ test_expect_success 'stdin delete ref fails with wrong old value' '
 test_expect_success 'stdin delete ref fails with zero old value' '
        echo "delete $a " >stdin &&
        test_must_fail git update-ref --stdin <stdin 2>err &&
-       grep "fatal: delete $a: zero <oldvalue>" err &&
+       grep "fatal: delete $a: zero <old-oid>" err &&
        git rev-parse $m >expect &&
        git rev-parse $a >actual &&
        test_cmp expect actual
@@ -1027,7 +1027,7 @@ test_expect_success 'stdin -z fails create with no ref' '
 test_expect_success 'stdin -z fails create with no new value' '
        printf $F "create $a" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: create $a: unexpected end of input when reading <newvalue>" err
+       grep "fatal: create $a: unexpected end of input when reading <new-oid>" err
 '
 
 test_expect_success 'stdin -z fails create with too many arguments' '
@@ -1045,27 +1045,27 @@ test_expect_success 'stdin -z fails update with no ref' '
 test_expect_success 'stdin -z fails update with too few args' '
        printf $F "update $a" "$m" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: update $a: unexpected end of input when reading <oldvalue>" err
+       grep "fatal: update $a: unexpected end of input when reading <old-oid>" err
 '
 
 test_expect_success 'stdin -z emits warning with empty new value' '
        git update-ref $a $m &&
        printf $F "update $a" "" "" >stdin &&
        git update-ref -z --stdin <stdin 2>err &&
-       grep "warning: update $a: missing <newvalue>, treating as zero" err &&
+       grep "warning: update $a: missing <new-oid>, treating as zero" err &&
        test_must_fail git rev-parse --verify -q $a
 '
 
 test_expect_success 'stdin -z fails update with no new value' '
        printf $F "update $a" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: update $a: unexpected end of input when reading <newvalue>" err
+       grep "fatal: update $a: unexpected end of input when reading <new-oid>" err
 '
 
 test_expect_success 'stdin -z fails update with no old value' '
        printf $F "update $a" "$m" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: update $a: unexpected end of input when reading <oldvalue>" err
+       grep "fatal: update $a: unexpected end of input when reading <old-oid>" err
 '
 
 test_expect_success 'stdin -z fails update with too many arguments' '
@@ -1083,7 +1083,7 @@ test_expect_success 'stdin -z fails delete with no ref' '
 test_expect_success 'stdin -z fails delete with no old value' '
        printf $F "delete $a" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: delete $a: unexpected end of input when reading <oldvalue>" err
+       grep "fatal: delete $a: unexpected end of input when reading <old-oid>" err
 '
 
 test_expect_success 'stdin -z fails delete with too many arguments' '
@@ -1101,7 +1101,7 @@ test_expect_success 'stdin -z fails verify with too many arguments' '
 test_expect_success 'stdin -z fails verify with no old value' '
        printf $F "verify $a" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: verify $a: unexpected end of input when reading <oldvalue>" err
+       grep "fatal: verify $a: unexpected end of input when reading <old-oid>" err
 '
 
 test_expect_success 'stdin -z fails option with unknown name' '
@@ -1160,7 +1160,7 @@ test_expect_success 'stdin -z update ref fails with wrong old value' '
 test_expect_success 'stdin -z update ref fails with bad old value' '
        printf $F "update $c" "$m" "does-not-exist" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: update $c: invalid <oldvalue>: does-not-exist" err &&
+       grep "fatal: update $c: invalid <old-oid>: does-not-exist" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
@@ -1178,14 +1178,14 @@ test_expect_success 'stdin -z create ref fails with bad new value' '
        git update-ref -d "$c" &&
        printf $F "create $c" "does-not-exist" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: create $c: invalid <newvalue>: does-not-exist" err &&
+       grep "fatal: create $c: invalid <new-oid>: does-not-exist" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
 test_expect_success 'stdin -z create ref fails with empty new value' '
        printf $F "create $c" "" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: create $c: missing <newvalue>" err &&
+       grep "fatal: create $c: missing <new-oid>" err &&
        test_must_fail git rev-parse --verify -q $c
 '
 
@@ -1209,7 +1209,7 @@ test_expect_success 'stdin -z delete ref fails with wrong old value' '
 test_expect_success 'stdin -z delete ref fails with zero old value' '
        printf $F "delete $a" "$Z" >stdin &&
        test_must_fail git update-ref -z --stdin <stdin 2>err &&
-       grep "fatal: delete $a: zero <oldvalue>" err &&
+       grep "fatal: delete $a: zero <old-oid>" err &&
        git rev-parse $m >expect &&
        git rev-parse $a >actual &&
        test_cmp expect actual
index bce284c2978848967ffe88764e396fad5059df54..8d90d0285045294b3f39233957ea4487bc7929e0 100755 (executable)
@@ -176,7 +176,10 @@ test_expect_success 'tracking count is accurate after orphan check' '
        git config branch.child.merge refs/heads/main &&
        git checkout child^ &&
        git checkout child >stdout &&
-       test_cmp expect stdout
+       test_cmp expect stdout &&
+
+       git checkout --detach child >stdout &&
+       test_grep ! "can be fast-forwarded\." stdout
 '
 
 test_expect_success 'no advice given for explicit detached head state' '
index 0bab134d71d3e785194562779ef63085de4e6545..7ec7f30b442b3bc04be5ac64cbf1898c0b5268b6 100755 (executable)
@@ -11,27 +11,27 @@ TEST_PASSES_SANITIZE_LEAK=true
 sane_unset GIT_TEST_SPLIT_INDEX
 
 test_set_index_version () {
-    GIT_INDEX_VERSION="$1"
-    export GIT_INDEX_VERSION
+       GIT_INDEX_VERSION="$1"
+       export GIT_INDEX_VERSION
 }
 
 test_set_index_version 3
 
-cat >expect.full <<EOF
-H 1
-H 2
-H sub/1
-H sub/2
-EOF
+test_expect_success 'setup' '
+       cat >expect.full <<-\EOF &&
+       H 1
+       H 2
+       H sub/1
+       H sub/2
+       EOF
 
-cat >expect.skip <<EOF
-S 1
-H 2
-S sub/1
-H sub/2
-EOF
+       cat >expect.skip <<-\EOF &&
+       S 1
+       H 2
+       S sub/1
+       H sub/2
+       EOF
 
-test_expect_success 'setup' '
        mkdir sub &&
        touch ./1 ./2 sub/1 sub/2 &&
        git add 1 2 sub/1 sub/2 &&
index c01492f33f860db2d6ae8764c94c084429abeef5..df235ac306e7126f7090bc62c451e906b174c387 100755 (executable)
@@ -65,6 +65,16 @@ test_expect_success 'update did not touch untracked files' '
        test_must_be_empty out
 '
 
+test_expect_success 'error out when passing untracked path' '
+       git reset --hard &&
+       echo content >>baz &&
+       echo content >>top &&
+       test_must_fail git add -u baz top 2>err &&
+       test_grep -e "error: pathspec .baz. did not match any file(s) known to git" err &&
+       git diff --cached --name-only >actual &&
+       test_must_be_empty actual
+'
+
 test_expect_success 'cache tree has not been corrupted' '
 
        git ls-files -s |
index d3bbd00b818a4f26e2c19ae0ec432138200afc62..ccfa6a720d090c2f7f2a085f60065bdcfaf8d1d9 100755 (executable)
@@ -1154,9 +1154,9 @@ test_expect_success 'avoid ambiguous track and advise' '
        hint: tracking ref '\''refs/heads/main'\'':
        hint:   ambi1
        hint:   ambi2
-       hint: ''
+       hint:
        hint: This is typically a configuration error.
-       hint: ''
+       hint:
        hint: To support setting up tracking branches, ensure that
        hint: different remotes'\'' fetch refspecs map into different
        hint: tracking namespaces.
index f23d39f0d52ec6f5035acfb029550babc67859da..839c904745a2861487e04363060a951aaeb902c9 100755 (executable)
@@ -28,6 +28,16 @@ test_expect_success 'Test of git add' '
        touch foo && git add foo
 '
 
+test_expect_success 'Test with no pathspecs' '
+       cat >expect <<-EOF &&
+       Nothing specified, nothing added.
+       hint: Maybe you wanted to say ${SQ}git add .${SQ}?
+       hint: Disable this message with "git config advice.addEmptyPathspec false"
+       EOF
+       git add 2>actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'Post-check that foo is in the index' '
        git ls-files foo | grep foo
 '
@@ -339,6 +349,40 @@ test_expect_success '"git add ." in empty repo' '
        )
 '
 
+test_expect_success '"git add" a embedded repository' '
+       rm -fr outer && git init outer &&
+       (
+               cd outer &&
+               for i in 1 2
+               do
+                       name=inner$i &&
+                       git init $name &&
+                       git -C $name commit --allow-empty -m $name ||
+                               return 1
+               done &&
+               git add . 2>actual &&
+               cat >expect <<-EOF &&
+               warning: adding embedded git repository: inner1
+               hint: You${SQ}ve added another git repository inside your current repository.
+               hint: Clones of the outer repository will not contain the contents of
+               hint: the embedded repository and will not know how to obtain it.
+               hint: If you meant to add a submodule, use:
+               hint:
+               hint:   git submodule add <url> inner1
+               hint:
+               hint: If you added this path by mistake, you can remove it from the
+               hint: index with:
+               hint:
+               hint:   git rm --cached inner1
+               hint:
+               hint: See "git help submodule" for more information.
+               hint: Disable this message with "git config advice.addEmbeddedRepo false"
+               warning: adding embedded git repository: inner2
+               EOF
+               test_cmp expect actual
+       )
+'
+
 test_expect_success 'error on a repository with no commits' '
        rm -fr empty &&
        git init empty &&
@@ -370,8 +414,7 @@ cat >expect.err <<\EOF
 The following paths are ignored by one of your .gitignore files:
 ignored-file
 hint: Use -f if you really want to add them.
-hint: Turn this message off by running
-hint: "git config advice.addIgnoredFile false"
+hint: Disable this message with "git config advice.addIgnoredFile false"
 EOF
 cat >expect.out <<\EOF
 add 'track-this'
index 0b5339ac6ca8248582ce723e3d552a8d4513e294..bc55255b0a8da397ad14212035da9c4b9cb8d7d8 100755 (executable)
@@ -325,9 +325,9 @@ test_expect_success 'different prompts for mode change/deleted' '
        git -c core.filemode=true add -p >actual &&
        sed -n "s/^\(([0-9/]*) Stage .*?\).*/\1/p" actual >actual.filtered &&
        cat >expect <<-\EOF &&
-       (1/1) Stage deletion [y,n,q,a,d,?]?
-       (1/2) Stage mode change [y,n,q,a,d,j,J,g,/,?]?
-       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,?]?
+       (1/1) Stage deletion [y,n,q,a,d,p,?]?
+       (1/2) Stage mode change [y,n,q,a,d,j,J,g,/,p,?]?
+       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,p,?]?
        EOF
        test_cmp expect actual.filtered
 '
@@ -514,13 +514,13 @@ test_expect_success 'split hunk setup' '
 test_expect_success 'goto hunk' '
        test_when_finished "git reset" &&
        tr _ " " >expect <<-EOF &&
-       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,?]? + 1:  -1,2 +1,3          +15
+       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,p,?]? + 1:  -1,2 +1,3          +15
        _ 2:  -2,4 +3,8          +21
        go to which hunk? @@ -1,2 +1,3 @@
        _10
        +15
        _20
-       (1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,?]?_
+       (1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,p,?]?_
        EOF
        test_write_lines s y g 1 | git add -p >actual &&
        tail -n 7 <actual >actual.trimmed &&
@@ -530,11 +530,11 @@ test_expect_success 'goto hunk' '
 test_expect_success 'navigate to hunk via regex' '
        test_when_finished "git reset" &&
        tr _ " " >expect <<-EOF &&
-       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,?]? @@ -1,2 +1,3 @@
+       (2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,p,?]? @@ -1,2 +1,3 @@
        _10
        +15
        _20
-       (1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,?]?_
+       (1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,p,?]?_
        EOF
        test_write_lines s y /1,2 | git add -p >actual &&
        tail -n 5 <actual >actual.trimmed &&
@@ -715,21 +715,21 @@ test_expect_success 'colors can be overridden' '
        <BLUE>+<RESET><BLUE>new<RESET>
        <CYAN> more-context<RESET>
        <BLUE>+<RESET><BLUE>another-one<RESET>
-       <YELLOW>(1/1) Stage this hunk [y,n,q,a,d,s,e,?]? <RESET><BOLD>Split into 2 hunks.<RESET>
+       <YELLOW>(1/1) Stage this hunk [y,n,q,a,d,s,e,p,?]? <RESET><BOLD>Split into 2 hunks.<RESET>
        <MAGENTA>@@ -1,3 +1,3 @@<RESET>
        <CYAN> context<RESET>
        <BOLD>-old<RESET>
        <BLUE>+<RESET><BLUE>new<RESET>
        <CYAN> more-context<RESET>
-       <YELLOW>(1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,?]? <RESET><MAGENTA>@@ -3 +3,2 @@<RESET>
+       <YELLOW>(1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,p,?]? <RESET><MAGENTA>@@ -3 +3,2 @@<RESET>
        <CYAN> more-context<RESET>
        <BLUE>+<RESET><BLUE>another-one<RESET>
-       <YELLOW>(2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,?]? <RESET><MAGENTA>@@ -1,3 +1,3 @@<RESET>
+       <YELLOW>(2/2) Stage this hunk [y,n,q,a,d,K,g,/,e,p,?]? <RESET><MAGENTA>@@ -1,3 +1,3 @@<RESET>
        <CYAN> context<RESET>
        <BOLD>-old<RESET>
        <BLUE>+new<RESET>
        <CYAN> more-context<RESET>
-       <YELLOW>(1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,?]? <RESET>
+       <YELLOW>(1/2) Stage this hunk [y,n,q,a,d,j,J,g,/,e,p,?]? <RESET>
        EOF
        test_cmp expect actual
 '
index ece9fae207dbdbcc28a4ac999a351bd059c0968a..56210b5609919dbd625cfe12c49ab7e858534896 100755 (executable)
@@ -66,4 +66,28 @@ test_expect_success 'apply --index create' '
        git diff --exit-code
 '
 
+test_expect_success !MINGW 'apply with no-contents and a funny pathname' '
+       test_when_finished "rm -fr \"funny \"; git reset --hard" &&
+
+       mkdir "funny " &&
+       >"funny /empty" &&
+       git add "funny /empty" &&
+       git diff HEAD -- "funny /" >sample.patch &&
+       git diff -R HEAD -- "funny /" >elpmas.patch &&
+
+       git reset --hard &&
+
+       git apply --stat --check --apply sample.patch &&
+       test_must_be_empty "funny /empty" &&
+
+       git apply --stat --check --apply elpmas.patch &&
+       test_path_is_missing "funny /empty" &&
+
+       git apply -R --stat --check --apply elpmas.patch &&
+       test_must_be_empty "funny /empty" &&
+
+       git apply -R --stat --check --apply sample.patch &&
+       test_path_is_missing "funny /empty"
+'
+
 test_done
index 18fe1c25e6a04b75d2fcdf9ac0c60da04c618ba8..43d40175f8dac940cfa669605899d78b235041c1 100755 (executable)
@@ -11,23 +11,7 @@ test_expect_success 'setup' '
        # behavior, make sure we always pack everything to one pack by
        # default
        git config gc.bigPackThreshold 2g &&
-
-       # These are simply values which, when hashed as a blob with a newline,
-       # produce a hash where the first byte is 0x17 in their respective
-       # algorithms.
-       test_oid_cache <<-EOF
-       obj1 sha1:263
-       obj1 sha256:34
-
-       obj2 sha1:410
-       obj2 sha256:174
-
-       obj3 sha1:523
-       obj3 sha256:313
-
-       obj4 sha1:790
-       obj4 sha256:481
-       EOF
+       test_oid_init
 '
 
 test_expect_success 'gc empty repository' '
@@ -114,8 +98,8 @@ test_expect_success 'pre-auto-gc hook can stop auto gc' '
                # We need to create two object whose sha1s start with 17
                # since this is what git gc counts.  As it happens, these
                # two blobs will do so.
-               test_commit "$(test_oid obj1)" &&
-               test_commit "$(test_oid obj2)" &&
+               test_commit "$(test_oid blob17_1)" &&
+               test_commit "$(test_oid blob17_2)" &&
 
                git gc --auto >../out.actual 2>../err.actual
        ) &&
@@ -146,13 +130,13 @@ test_expect_success 'auto gc with too many loose objects does not attempt to cre
        # We need to create two object whose sha1s start with 17
        # since this is what git gc counts.  As it happens, these
        # two blobs will do so.
-       test_commit "$(test_oid obj1)" &&
-       test_commit "$(test_oid obj2)" &&
+       test_commit "$(test_oid blob17_1)" &&
+       test_commit "$(test_oid blob17_2)" &&
        # Our first gc will create a pack; our second will create a second pack
        git gc --auto &&
        ls .git/objects/pack/pack-*.pack | sort >existing_packs &&
-       test_commit "$(test_oid obj3)" &&
-       test_commit "$(test_oid obj4)" &&
+       test_commit "$(test_oid blob17_3)" &&
+       test_commit "$(test_oid blob17_4)" &&
 
        git gc --auto 2>err &&
        test_grep ! "^warning:" err &&
index b41a47eb943a03b1588bdc87802b0645944ce2ec..696866d7794e1fdd72760ed093b9ff9737d97969 100755 (executable)
@@ -1777,10 +1777,10 @@ test_expect_success '--points-at finds annotated tags of tags' '
 '
 
 test_expect_success 'recursive tagging should give advice' '
-       sed -e "s/|$//" <<-EOF >expect &&
+       cat >expect <<-EOF &&
        hint: You have created a nested tag. The object referred to by your new tag is
        hint: already a tag. If you meant to tag the object that it points to, use:
-       hint: |
+       hint:
        hint:   git tag -f nested annotated-v4.0^{}
        hint: Disable this message with "git config advice.nestedTag false"
        EOF
index 1f7201eb60caf9b31fd3db1a83dc834bb145157c..0aae0dee67078b73855d0152d18863e6a9ad104d 100755 (executable)
@@ -5,6 +5,7 @@
 
 test_description='git clean basic tests'
 
+TEST_PASSES_SANITIZE_LEAK=true
 . ./test-lib.sh
 
 git config clean.requireForce no
index 00c1f1aab1304c127a5dccdaeff62d7213b7a9e5..5c4a89df5c81dcd5559d56de5924aa0140cb6772 100755 (executable)
@@ -212,8 +212,7 @@ test_expect_success 'submodule add to .gitignored path fails' '
                The following paths are ignored by one of your .gitignore files:
                submod
                hint: Use -f if you really want to add them.
-               hint: Turn this message off by running
-               hint: "git config advice.addIgnoredFile false"
+               hint: Disable this message with "git config advice.addIgnoredFile false"
                EOF
                # Does not use test_commit due to the ignore
                echo "*" > .gitignore &&
index bced44a0fc915f430ccc41d54fbd8bc48df2cef8..cc12f99f11534b898d11a0fc7bbb5d339730ccc8 100755 (executable)
@@ -101,22 +101,8 @@ test_expect_success 'fail to commit untracked file (even with --include/--only)'
        test_must_fail git commit --only -m "baz" baz 2>err &&
        test_grep -e "$error" err &&
 
-       # TODO: as for --include, the below command will fail because
-       # nothing is staged. If something was staged, it would not fail
-       # even though the provided pathspec does not match any tracked
-       # path. (However, the untracked paths that match the pathspec are
-       # not committed and only the staged changes get committed.)
-       # In either cases, no error is returned to stderr like in (--only
-       # and without --only/--include) cases. In a similar manner,
-       # "git add -u baz" also does not error out.
-       #
-       # Therefore, the below test is just to document the current behavior
-       # and is not an endorsement to the current behavior, and we may
-       # want to fix this. And when that happens, this test should be
-       # updated accordingly.
-
        test_must_fail git commit --include -m "baz" baz 2>err &&
-       test_must_be_empty err
+       test_grep -e "$error" err
 '
 
 test_expect_success 'setup: non-initial commit' '
index c8af8dab795604998475e5fdff21137534c7f39b..79d3e0e7d9b32dd2938e635dc94acc6b49000569 100644 (file)
@@ -1962,6 +1962,7 @@ test_lazy_prereq DEFAULT_REPO_FORMAT '
 # Tests that verify the scheduler integration must set this locally
 # to avoid errors.
 GIT_TEST_MAINT_SCHEDULER="none:exit 1"
+export GIT_TEST_MAINT_SCHEDULER
 
 # Does this platform support `git fsmonitor--daemon`
 #
index 535834636131872b6f6f45bccea9a4bc868e31f2..7a4e5780e1c7095239867e026d424395032c671a 100644 (file)
@@ -66,43 +66,26 @@ static void test_prio_queue(int *input, size_t input_size,
        clear_prio_queue(&pq);
 }
 
-#define BASIC_INPUT 2, 6, 3, 10, 9, 5, 7, 4, 5, 8, 1, DUMP
-#define BASIC_RESULT 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10
-
-#define MIXED_PUT_GET_INPUT 6, 2, 4, GET, 5, 3, GET, GET, 1, DUMP
-#define MIXED_PUT_GET_RESULT 2, 3, 4, 1, 5, 6
-
-#define EMPTY_QUEUE_INPUT 1, 2, GET, GET, GET, 1, 2, GET, GET, GET
-#define EMPTY_QUEUE_RESULT 1, 2, MISSING, 1, 2, MISSING
-
-#define STACK_INPUT STACK, 8, 1, 5, 4, 6, 2, 3, DUMP
-#define STACK_RESULT 3, 2, 6, 4, 5, 1, 8
-
-#define REVERSE_STACK_INPUT STACK, 1, 2, 3, 4, 5, 6, REVERSE, DUMP
-#define REVERSE_STACK_RESULT 1, 2, 3, 4, 5, 6
-
-#define TEST_INPUT(INPUT, RESULT, name)                        \
-  static void test_##name(void)                                \
-{                                                              \
-       int input[] = {INPUT};                                  \
-       int result[] = {RESULT};                                \
-       test_prio_queue(input, ARRAY_SIZE(input),               \
-                       result, ARRAY_SIZE(result));            \
-}
-
-TEST_INPUT(BASIC_INPUT, BASIC_RESULT, basic)
-TEST_INPUT(MIXED_PUT_GET_INPUT, MIXED_PUT_GET_RESULT, mixed)
-TEST_INPUT(EMPTY_QUEUE_INPUT, EMPTY_QUEUE_RESULT, empty)
-TEST_INPUT(STACK_INPUT, STACK_RESULT, stack)
-TEST_INPUT(REVERSE_STACK_INPUT, REVERSE_STACK_RESULT, reverse)
+#define TEST_INPUT(input, result) \
+       test_prio_queue(input, ARRAY_SIZE(input), result, ARRAY_SIZE(result))
 
 int cmd_main(int argc, const char **argv)
 {
-       TEST(test_basic(), "prio-queue works for basic input");
-       TEST(test_mixed(), "prio-queue works for mixed put & get commands");
-       TEST(test_empty(), "prio-queue works when queue is empty");
-       TEST(test_stack(), "prio-queue works when used as a LIFO stack");
-       TEST(test_reverse(), "prio-queue works when LIFO stack is reversed");
+       TEST(TEST_INPUT(((int []){ 2, 6, 3, 10, 9, 5, 7, 4, 5, 8, 1, DUMP }),
+                       ((int []){ 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10 })),
+            "prio-queue works for basic input");
+       TEST(TEST_INPUT(((int []){ 6, 2, 4, GET, 5, 3, GET, GET, 1, DUMP }),
+                       ((int []){ 2, 3, 4, 1, 5, 6 })),
+            "prio-queue works for mixed put & get commands");
+       TEST(TEST_INPUT(((int []){ 1, 2, GET, GET, GET, 1, 2, GET, GET, GET }),
+                       ((int []){ 1, 2, MISSING, 1, 2, MISSING })),
+            "prio-queue works when queue is empty");
+       TEST(TEST_INPUT(((int []){ STACK, 8, 1, 5, 4, 6, 2, 3, DUMP }),
+                       ((int []){ 3, 2, 6, 4, 5, 1, 8 })),
+            "prio-queue works when used as a LIFO stack");
+       TEST(TEST_INPUT(((int []){ STACK, 1, 2, 3, 4, 5, 6, REVERSE, DUMP }),
+                       ((int []){ 1, 2, 3, 4, 5, 6 })),
+            "prio-queue works when LIFO stack is reversed");
 
        return test_done();
 }