]> git.ipfire.org Git - thirdparty/git.git/commitdiff
Merge branch 'vd/scalar-enables-fsmonitor'
authorJunio C Hamano <gitster@pobox.com>
Mon, 29 Aug 2022 21:55:12 +0000 (14:55 -0700)
committerJunio C Hamano <gitster@pobox.com>
Mon, 29 Aug 2022 21:55:12 +0000 (14:55 -0700)
"scalar" now enables built-in fsmonitor on enlisted repositories,
when able.

* vd/scalar-enables-fsmonitor:
  scalar: update technical doc roadmap with FSMonitor support
  scalar unregister: stop FSMonitor daemon
  scalar: enable built-in FSMonitor on `register`
  scalar: move config setting logic into its own function
  scalar-delete: do not 'die()' in 'delete_enlistment()'
  scalar-[un]register: clearly indicate source of error
  scalar-unregister: handle error codes greater than 0
  scalar: constrain enlistment search

92 files changed:
.gitignore
Documentation/Makefile
Documentation/RelNotes/2.37.3.txt [new file with mode: 0644]
Documentation/RelNotes/2.38.0.txt
Documentation/config/log.txt
Documentation/git-add.txt
Documentation/git-bugreport.txt
Documentation/git-diagnose.txt [new file with mode: 0644]
Documentation/git-log.txt
Documentation/rev-list-options.txt
Documentation/technical/bundle-uri.txt [new file with mode: 0644]
Documentation/technical/scalar.txt
Makefile
builtin.h
builtin/bugreport.c
builtin/checkout.c
builtin/diagnose.c [new file with mode: 0644]
builtin/fetch.c
builtin/gc.c
builtin/log.c
builtin/replace.c
builtin/reset.c
builtin/rev-list.c
builtin/rm.c
cache.h
compat/disk.h [new file with mode: 0644]
compat/nonblock.c [new file with mode: 0644]
compat/nonblock.h [new file with mode: 0644]
contrib/scalar/scalar.c
contrib/scalar/t/t9099-scalar.sh
diagnose.c [new file with mode: 0644]
diagnose.h [new file with mode: 0644]
diff-lib.c
environment.c
fetch-pack.c
fsck.c
fsck.h
git-compat-util.h
git.c
log-tree.c
merge-ort.c
mergetools/vimdiff
notes.c
object.h
packfile.c
pathspec.c
pathspec.h
pkt-line.c
pkt-line.h
read-cache.c
refs.c
refs.h
revision.c
revision.h
run-command.c
t/helper/test-rot13-filter.c [new file with mode: 0644]
t/helper/test-tool.c
t/helper/test-tool.h
t/perf/p2000-sparse-operations.sh
t/t0021-conversion.sh
t/t0021/rot13-filter.pl [deleted file]
t/t0091-bugreport.sh
t/t0092-diagnose.sh [new file with mode: 0755]
t/t1092-sparse-checkout-compatibility.sh
t/t1450-fsck.sh
t/t2080-parallel-checkout-basics.sh
t/t2082-parallel-checkout-attributes.sh
t/t3701-add-interactive.sh
t/t4013-diff-various.sh
t/t4013/diff.log_--decorate=full_--all
t/t4013/diff.log_--decorate=full_--clear-decorations_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate=full_--decorate-all_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate_--all
t/t4013/diff.log_--decorate_--clear-decorations_--all [new file with mode: 0644]
t/t4013/diff.log_--decorate_--decorate-all_--all [new file with mode: 0644]
t/t4202-log.sh
t/t4207-log-decoration-colors.sh
t/t5500-fetch-pack.sh
t/t5504-fetch-receive-strict.sh
t/t5516-fetch-push.sh
t/t5601-clone.sh
t/t5616-partial-clone.sh
t/t5703-upload-pack-ref-in-want.sh
t/t6019-rev-list-ancestry-path.sh
t/t6115-rev-list-du.sh
t/t6437-submodule-merge.sh
t/t7402-submodule-rebase.sh
t/t7900-maintenance.sh
tree-walk.c
tree-walk.h
unpack-trees.c
wrapper.c

index 42fd7253b4435e014690ad173125e2a291c2ca5a..80b530bbed2c80814ac74956d329d277d85bba86 100644 (file)
@@ -53,6 +53,7 @@
 /git-cvsimport
 /git-cvsserver
 /git-daemon
+/git-diagnose
 /git-diff
 /git-diff-files
 /git-diff-index
index 346bbcf8be7bc38a0bc8262287a17f12b8eda8be..bd6b6fcb93085d550846defa9802b9d15a487034 100644 (file)
@@ -106,6 +106,7 @@ TECH_DOCS += MyFirstObjectWalk
 TECH_DOCS += SubmittingPatches
 TECH_DOCS += ToolsForGit
 TECH_DOCS += technical/bitmap-format
+TECH_DOCS += technical/bundle-uri
 TECH_DOCS += technical/hash-function-transition
 TECH_DOCS += technical/long-running-process-protocol
 TECH_DOCS += technical/multi-pack-index
diff --git a/Documentation/RelNotes/2.37.3.txt b/Documentation/RelNotes/2.37.3.txt
new file mode 100644 (file)
index 0000000..dc57b7b
--- /dev/null
@@ -0,0 +1,46 @@
+Git 2.37.3 Release Notes
+========================
+
+This primarily is to backport various fixes accumulated on the 'master'
+front since 2.37.2.
+
+Fixes since v2.37.2
+-------------------
+
+ * Fix build procedure for Windows that uses CMake so that it can pick
+   up the shell interpreter from local installation location.
+
+ * Conditionally allow building Python interpreter on Windows
+
+ * Fix to lstat() emulation on Windows.
+
+ * Older gcc with -Wall complains about the universal zero initializer
+   "struct s = { 0 };" idiom, which makes developers' lives
+   inconvenient (as -Werror is enabled by DEVELOPER=YesPlease).  The
+   build procedure has been tweaked to help these compilers.
+
+ * Plug memory leaks in the failure code path in the "merge-ort" merge
+   strategy backend.
+
+ * Avoid repeatedly running getconf to ask libc version in the test
+   suite, and instead just as it once per script.
+
+ * Platform-specific code that determines if a directory is OK to use
+   as a repository has been taught to report more details, especially
+   on Windows.
+
+ * "vimdiff3" regression fix.
+
+ * "git fsck" reads mode from tree objects but canonicalizes the mode
+   before passing it to the logic to check object sanity, which has
+   hid broken tree objects from the checking logic.  This has been
+   corrected, but to help exiting projects with broken tree objects
+   that they cannot fix retroactively, the severity of anomalies this
+   code detects has been demoted to "info" for now.
+
+ * Fixes to sparse index compatibility work for "reset" and "checkout"
+   commands.
+
+ * Documentation for "git add --renormalize" has been improved.
+
+Also contains other minor documentation updates and code clean-ups.
index ee469d7939215e90428865dd23f39e370a97ac4d..cd64c62b207882e503ae19000a3f26013d96e4c6 100644 (file)
@@ -48,6 +48,16 @@ UI, Workflows & Features
  * Operating modes like "--batch" of "git cat-file" command learned to
    take NUL-terminated input, instead of one-item-per-line.
 
+ * "git rm" has become more aware of the sparse-index feature.
+
+ * "git rev-list --disk-usage" learned to take an optional value
+   "human" to show the reported value in human-readable format, like
+   "3.40MiB".
+
+ * The "diagnose" feature to create a zip archive for diagnostic
+   material has been lifted from "scalar" and made into a feature of
+   "git bugreport".
+
 
 Performance, Internal Implementation, Development Support etc.
 
@@ -107,6 +117,11 @@ Performance, Internal Implementation, Development Support etc.
  * "git fetch" client logs the partial clone filter used in the trace2
    output.
 
+ * The "bundle URI" design gets documented.
+
+ * The common ancestor negotiation exchange during a "git fetch"
+   session now leaves trace log.
+
 
 Fixes since v2.37
 -----------------
@@ -240,5 +255,30 @@ Fixes since v2.37
    on Windows.
    (merge 3f7207e2ea js/safe-directory-plus later to maint).
 
+ * "vimdiff3" regression fix.
+   (merge 34133d9658 fc/vimdiff-layout-vimdiff3-fix later to maint).
+
+ * "git fsck" reads mode from tree objects but canonicalizes the mode
+   before passing it to the logic to check object sanity, which has
+   hid broken tree objects from the checking logic.  This has been
+   corrected, but to help exiting projects with broken tree objects
+   that they cannot fix retroactively, the severity of anomalies this
+   code detects has been demoted to "info" for now.
+   (merge 4dd3b045f5 jk/fsck-tree-mode-bits-fix later to maint).
+
+ * Fixes to sparse index compatibility work for "reset" and "checkout"
+   commands.
+   (merge b15207b8cf vd/sparse-reset-checkout-fixes later to maint).
+
+ * An earlier optimization discarded a tree-object buffer that is
+   still in use, which has been corrected.
+   (merge 1490d7d82d jk/is-promisor-object-keep-tree-in-use later to maint).
+
+ * Fix deadlocks between main Git process and subprocess spawned via
+   the pipe_command() API, that can kill "git add -p" that was
+   reimplemented in C recently.
+   (merge 716c1f649e jk/pipe-command-nonblock later to maint).
+
  * Other code cleanup, docfix, build fix, etc.
    (merge 94955d576b gc/git-reflog-doc-markup later to maint).
+   (merge efae7ce692 po/doc-add-renormalize later to maint).
index 456eb07800cb1eef63e9d26f96a1ed35b6e65116..5250ba45fb4ea583e1fb122b6f5f375aabeca5c8 100644 (file)
@@ -18,6 +18,11 @@ log.decorate::
        names are shown. This is the same as the `--decorate` option
        of the `git log`.
 
+log.initialDecorationSet::
+       By default, `git log` only shows decorations for certain known ref
+       namespaces. If 'all' is specified, then show all refs as
+       decorations.
+
 log.excludeDecoration::
        Exclude the specified patterns from the log decorations. This is
        similar to the `--decorate-refs-exclude` command-line option, but
index 11eb70f16c7287d53b567368754a19f33d4c7fb2..9b37f356542d1d8787f0344abf3243996fb89476 100644 (file)
@@ -188,7 +188,9 @@ for "git add --no-all <pathspec>...", i.e. ignored removed files.
        forcibly add them again to the index.  This is useful after
        changing `core.autocrlf` configuration or the `text` attribute
        in order to correct files added with wrong CRLF/LF line endings.
-       This option implies `-u`.
+       This option implies `-u`. Lone CR characters are untouched, thus
+       while a CRLF cleans to LF, a CRCRLF sequence is only partially
+       cleaned to CRLF.
 
 --chmod=(+|-)x::
        Override the executable bit of the added files.  The executable
index d8817bf3cec3995cc7f3fa4a87c8f8ff3f9db865..eca726e57911af2cc1f0643cafdcf887c20bfa32 100644 (file)
@@ -9,6 +9,7 @@ SYNOPSIS
 --------
 [verse]
 'git bugreport' [(-o | --output-directory) <path>] [(-s | --suffix) <format>]
+               [--diagnose[=<mode>]]
 
 DESCRIPTION
 -----------
@@ -31,6 +32,10 @@ The following information is captured automatically:
  - A list of enabled hooks
  - $SHELL
 
+Additional information may be gathered into a separate zip archive using the
+`--diagnose` option, and can be attached alongside the bugreport document to
+provide additional context to readers.
+
 This tool is invoked via the typical Git setup process, which means that in some
 cases, it might not be able to launch - for example, if a relevant config file
 is unreadable. In this kind of scenario, it may be helpful to manually gather
@@ -49,6 +54,19 @@ OPTIONS
        named 'git-bugreport-<formatted suffix>'. This should take the form of a
        strftime(3) format string; the current local time will be used.
 
+--no-diagnose::
+--diagnose[=<mode>]::
+       Create a zip archive of supplemental information about the user's
+       machine, Git client, and repository state. The archive is written to the
+       same output directory as the bug report and is named
+       'git-diagnostics-<formatted suffix>'.
++
+Without `mode` specified, the diagnostic archive will contain the default set of
+statistics reported by `git diagnose`. An optional `mode` value may be specified
+to change which information is included in the archive. See
+linkgit:git-diagnose[1] for the list of valid values for `mode` and details
+about their usage.
+
 GIT
 ---
 Part of the linkgit:git[1] suite
diff --git a/Documentation/git-diagnose.txt b/Documentation/git-diagnose.txt
new file mode 100644 (file)
index 0000000..3ec8cc7
--- /dev/null
@@ -0,0 +1,65 @@
+git-diagnose(1)
+================
+
+NAME
+----
+git-diagnose - Generate a zip archive of diagnostic information
+
+SYNOPSIS
+--------
+[verse]
+'git diagnose' [(-o | --output-directory) <path>] [(-s | --suffix) <format>]
+              [--mode=<mode>]
+
+DESCRIPTION
+-----------
+Collects detailed information about the user's machine, Git client, and
+repository state and packages that information into a zip archive. The
+generated archive can then, for example, be shared with the Git mailing list to
+help debug an issue or serve as a reference for independent debugging.
+
+By default, the following information is captured in the archive:
+
+  * 'git version --build-options'
+  * The path to the repository root
+  * The available disk space on the filesystem
+  * The name and size of each packfile, including those in alternate object
+    stores
+  * The total count of loose objects, as well as counts broken down by
+    `.git/objects` subdirectory
+
+Additional information can be collected by selecting a different diagnostic mode
+using the `--mode` option.
+
+This tool differs from linkgit:git-bugreport[1] in that it collects much more
+detailed information with a greater focus on reporting the size and data shape
+of repository contents.
+
+OPTIONS
+-------
+-o <path>::
+--output-directory <path>::
+       Place the resulting diagnostics archive in `<path>` instead of the
+       current directory.
+
+-s <format>::
+--suffix <format>::
+       Specify an alternate suffix for the diagnostics archive name, to create
+       a file named 'git-diagnostics-<formatted suffix>'. This should take the
+       form of a strftime(3) format string; the current local time will be
+       used.
+
+--mode=(stats|all)::
+       Specify the type of diagnostics that should be collected. The default behavior
+       of 'git diagnose' is equivalent to `--mode=stats`.
++
+The `--mode=all` option collects everything included in `--mode=stats`, as well
+as copies of `.git`, `.git/hooks`, `.git/info`, `.git/logs`, and
+`.git/objects/info` directories. This additional information may be sensitive,
+as it can be used to reconstruct the full contents of the diagnosed repository.
+Users should exercise caution when sharing an archive generated with
+`--mode=all`.
+
+GIT
+---
+Part of the linkgit:git[1] suite
index 20e87cecf4917fdbb18c2ffe675a1b2d9e4c3177..b1285aee3c290ed76eb5accd4a733db0e747a518 100644 (file)
@@ -45,13 +45,23 @@ OPTIONS
 
 --decorate-refs=<pattern>::
 --decorate-refs-exclude=<pattern>::
-       If no `--decorate-refs` is given, pretend as if all refs were
-       included.  For each candidate, do not use it for decoration if it
+       For each candidate reference, do not use it for decoration if it
        matches any patterns given to `--decorate-refs-exclude` or if it
        doesn't match any of the patterns given to `--decorate-refs`. The
        `log.excludeDecoration` config option allows excluding refs from
        the decorations, but an explicit `--decorate-refs` pattern will
        override a match in `log.excludeDecoration`.
++
+If none of these options or config settings are given, then references are
+used as decoration if they match `HEAD`, `refs/heads/`, `refs/remotes/`,
+`refs/stash/`, or `refs/tags/`.
+
+--clear-decorations::
+       When specified, this option clears all previous `--decorate-refs`
+       or `--decorate-refs-exclude` options and relaxes the default
+       decoration filter to include all references. This option is
+       assumed if the config value `log.initialDecorationSet` is set to
+       `all`.
 
 --source::
        Print out the ref name given on the command line by which each
index 195e74eec633ea913c0934d2b690b674360376d7..1837509566a79a36a5f607837bc13af082d81b86 100644 (file)
@@ -242,6 +242,7 @@ ifdef::git-rev-list[]
        to `/dev/null` as the output does not have to be formatted.
 
 --disk-usage::
+--disk-usage=human::
        Suppress normal output; instead, print the sum of the bytes used
        for on-disk storage by the selected commits or objects. This is
        equivalent to piping the output into `git cat-file
@@ -249,6 +250,8 @@ ifdef::git-rev-list[]
        faster (especially with `--use-bitmap-index`). See the `CAVEATS`
        section in linkgit:git-cat-file[1] for the limitations of what
        "on-disk storage" means.
+       With the optional value `human`, on-disk storage size is shown
+       in human-readable string(e.g. 12.24 Kib, 3.50 Mib).
 endif::git-rev-list[]
 
 --cherry-mark::
@@ -389,12 +392,14 @@ Default mode::
        merges from the resulting history, as there are no selected
        commits contributing to this merge.
 
---ancestry-path::
+--ancestry-path[=<commit>]::
        When given a range of commits to display (e.g. 'commit1..commit2'
-       or 'commit2 {caret}commit1'), only display commits that exist
-       directly on the ancestry chain between the 'commit1' and
-       'commit2', i.e. commits that are both descendants of 'commit1',
-       and ancestors of 'commit2'.
+       or 'commit2 {caret}commit1'), only display commits in that range
+       that are ancestors of <commit>, descendants of <commit>, or
+       <commit> itself.  If no commit is specified, use 'commit1' (the
+       excluded part of the range) as <commit>.  Can be passed multiple
+       times; if so, a commit is included if it is any of the commits
+       given or if it is an ancestor or descendant of one of them.
 
 A more detailed explanation follows.
 
@@ -568,11 +573,10 @@ Note the major differences in `N`, `P`, and `Q` over `--full-history`:
 
 There is another simplification mode available:
 
---ancestry-path::
-       Limit the displayed commits to those directly on the ancestry
-       chain between the ``from'' and ``to'' commits in the given commit
-       range. I.e. only display commits that are ancestor of the ``to''
-       commit and descendants of the ``from'' commit.
+--ancestry-path[=<commit>]::
+       Limit the displayed commits to those which are an ancestor of
+       <commit>, or which are a descendant of <commit>, or are <commit>
+       itself.
 +
 As an example use case, consider the following commit history:
 +
@@ -604,6 +608,29 @@ option does. Applied to the 'D..M' range, it results in:
                               \
                                L--M
 -----------------------------------------------------------------------
++
+We can also use `--ancestry-path=D` instead of `--ancestry-path` which
+means the same thing when applied to the 'D..M' range but is just more
+explicit.
++
+If we instead are interested in a given topic within this range, and all
+commits affected by that topic, we may only want to view the subset of
+`D..M` which contain that topic in their ancestry path.  So, using
+`--ancestry-path=H D..M` for example would result in:
++
+-----------------------------------------------------------------------
+               E
+                \
+                 G---H---I---J
+                              \
+                               L--M
+-----------------------------------------------------------------------
++
+Whereas `--ancestry-path=K D..M` would result in
++
+-----------------------------------------------------------------------
+               K---------------L--M
+-----------------------------------------------------------------------
 
 Before discussing another option, `--show-pulls`, we need to
 create a new example history.
@@ -659,7 +686,7 @@ Here, the merge commits `O` and `P` contribute extra noise, as they did
 not actually contribute a change to `file.txt`. They only merged a topic
 that was based on an older version of `file.txt`. This is a common
 issue in repositories using a workflow where many contributors work in
-parallel and merge their topic branches along a single trunk: manu
+parallel and merge their topic branches along a single trunk: many
 unrelated merges appear in the `--full-history` results.
 
 When using the `--simplify-merges` option, the commits `O` and `P`
diff --git a/Documentation/technical/bundle-uri.txt b/Documentation/technical/bundle-uri.txt
new file mode 100644 (file)
index 0000000..c25c423
--- /dev/null
@@ -0,0 +1,573 @@
+Bundle URIs
+===========
+
+Git bundles are files that store a pack-file along with some extra metadata,
+including a set of refs and a (possibly empty) set of necessary commits. See
+linkgit:git-bundle[1] and link:bundle-format.txt[the bundle format] for more
+information.
+
+Bundle URIs are locations where Git can download one or more bundles in
+order to bootstrap the object database in advance of fetching the remaining
+objects from a remote.
+
+One goal is to speed up clones and fetches for users with poor network
+connectivity to the origin server. Another benefit is to allow heavy users,
+such as CI build farms, to use local resources for the majority of Git data
+and thereby reducing the load on the origin server.
+
+To enable the bundle URI feature, users can specify a bundle URI using
+command-line options or the origin server can advertise one or more URIs
+via a protocol v2 capability.
+
+Design Goals
+------------
+
+The bundle URI standard aims to be flexible enough to satisfy multiple
+workloads. The bundle provider and the Git client have several choices in
+how they create and consume bundle URIs.
+
+* Bundles can have whatever name the server desires. This name could refer
+  to immutable data by using a hash of the bundle contents. However, this
+  means that a new URI will be needed after every update of the content.
+  This might be acceptable if the server is advertising the URI (and the
+  server is aware of new bundles being generated) but would not be
+  ergonomic for users using the command line option.
+
+* The bundles could be organized specifically for bootstrapping full
+  clones, but could also be organized with the intention of bootstrapping
+  incremental fetches. The bundle provider must decide on one of several
+  organization schemes to minimize client downloads during incremental
+  fetches, but the Git client can also choose whether to use bundles for
+  either of these operations.
+
+* The bundle provider can choose to support full clones, partial clones,
+  or both. The client can detect which bundles are appropriate for the
+  repository's partial clone filter, if any.
+
+* The bundle provider can use a single bundle (for clones only), or a
+  list of bundles. When using a list of bundles, the provider can specify
+  whether or not the client needs _all_ of the bundle URIs for a full
+  clone, or if _any_ one of the bundle URIs is sufficient. This allows the
+  bundle provider to use different URIs for different geographies.
+
+* The bundle provider can organize the bundles using heuristics, such as
+  creation tokens, to help the client prevent downloading bundles it does
+  not need. When the bundle provider does not provide these heuristics,
+  the client can use optimizations to minimize how much of the data is
+  downloaded.
+
+* The bundle provider does not need to be associated with the Git server.
+  The client can choose to use the bundle provider without it being
+  advertised by the Git server.
+
+* The client can choose to discover bundle providers that are advertised
+  by the Git server. This could happen during `git clone`, during
+  `git fetch`, both, or neither. The user can choose which combination
+  works best for them.
+
+* The client can choose to configure a bundle provider manually at any
+  time. The client can also choose to specify a bundle provider manually
+  as a command-line option to `git clone`.
+
+Each repository is different and every Git server has different needs.
+Hopefully the bundle URI feature is flexible enough to satisfy all needs.
+If not, then the feature can be extended through its versioning mechanism.
+
+Server requirements
+-------------------
+
+To provide a server-side implementation of bundle servers, no other parts
+of the Git protocol are required. This allows server maintainers to use
+static content solutions such as CDNs in order to serve the bundle files.
+
+At the current scope of the bundle URI feature, all URIs are expected to
+be HTTP(S) URLs where content is downloaded to a local file using a `GET`
+request to that URL. The server could include authentication requirements
+to those requests with the aim of triggering the configured credential
+helper for secure access. (Future extensions could use "file://" URIs or
+SSH URIs.)
+
+Assuming a `200 OK` response from the server, the content at the URL is
+inspected. First, Git attempts to parse the file as a bundle file of
+version 2 or higher. If the file is not a bundle, then the file is parsed
+as a plain-text file using Git's config parser. The key-value pairs in
+that config file are expected to describe a list of bundle URIs. If
+neither of these parse attempts succeed, then Git will report an error to
+the user that the bundle URI provided erroneous data.
+
+Any other data provided by the server is considered erroneous.
+
+Bundle Lists
+------------
+
+The Git server can advertise bundle URIs using a set of `key=value` pairs.
+A bundle URI can also serve a plain-text file in the Git config format
+containing these same `key=value` pairs. In both cases, we consider this
+to be a _bundle list_. The pairs specify information about the bundles
+that the client can use to make decisions for which bundles to download
+and which to ignore.
+
+A few keys focus on properties of the list itself.
+
+bundle.version::
+       (Required) This value provides a version number for the bundle
+       list. If a future Git change enables a feature that needs the Git
+       client to react to a new key in the bundle list file, then this version
+       will increment. The only current version number is 1, and if any other
+       value is specified then Git will fail to use this file.
+
+bundle.mode::
+       (Required) This value has one of two values: `all` and `any`. When `all`
+       is specified, then the client should expect to need all of the listed
+       bundle URIs that match their repository's requirements. When `any` is
+       specified, then the client should expect that any one of the bundle URIs
+       that match their repository's requirements will suffice. Typically, the
+       `any` option is used to list a number of different bundle servers
+       located in different geographies.
+
+bundle.heuristic::
+       If this string-valued key exists, then the bundle list is designed to
+       work well with incremental `git fetch` commands. The heuristic signals
+       that there are additional keys available for each bundle that help
+       determine which subset of bundles the client should download. The only
+       heuristic currently planned is `creationToken`.
+
+The remaining keys include an `<id>` segment which is a server-designated
+name for each available bundle. The `<id>` must contain only alphanumeric
+and `-` characters.
+
+bundle.<id>.uri::
+       (Required) This string value is the URI for downloading bundle `<id>`.
+       If the URI begins with a protocol (`http://` or `https://`) then the URI
+       is absolute. Otherwise, the URI is interpreted as relative to the URI
+       used for the bundle list. If the URI begins with `/`, then that relative
+       path is relative to the domain name used for the bundle list. (This use
+       of relative paths is intended to make it easier to distribute a set of
+       bundles across a large number of servers or CDNs with different domain
+       names.)
+
+bundle.<id>.filter::
+       This string value represents an object filter that should also appear in
+       the header of this bundle. The server uses this value to differentiate
+       different kinds of bundles from which the client can choose those that
+       match their object filters.
+
+bundle.<id>.creationToken::
+       This value is a nonnegative 64-bit integer used for sorting the bundles
+       the list. This is used to download a subset of bundles during a fetch
+       when `bundle.heuristic=creationToken`.
+
+bundle.<id>.location::
+       This string value advertises a real-world location from where the bundle
+       URI is served. This can be used to present the user with an option for
+       which bundle URI to use or simply as an informative indicator of which
+       bundle URI was selected by Git. This is only valuable when
+       `bundle.mode` is `any`.
+
+Here is an example bundle list using the Git config format:
+
+       [bundle]
+               version = 1
+               mode = all
+               heuristic = creationToken
+
+       [bundle "2022-02-09-1644442601-daily"]
+               uri = https://bundles.example.com/git/git/2022-02-09-1644442601-daily.bundle
+               creationToken = 1644442601
+
+       [bundle "2022-02-02-1643842562"]
+               uri = https://bundles.example.com/git/git/2022-02-02-1643842562.bundle
+               creationToken = 1643842562
+
+       [bundle "2022-02-09-1644442631-daily-blobless"]
+               uri = 2022-02-09-1644442631-daily-blobless.bundle
+               creationToken = 1644442631
+               filter = blob:none
+
+       [bundle "2022-02-02-1643842568-blobless"]
+               uri = /git/git/2022-02-02-1643842568-blobless.bundle
+               creationToken = 1643842568
+               filter = blob:none
+
+This example uses `bundle.mode=all` as well as the
+`bundle.<id>.creationToken` heuristic. It also uses the `bundle.<id>.filter`
+options to present two parallel sets of bundles: one for full clones and
+another for blobless partial clones.
+
+Suppose that this bundle list was found at the URI
+`https://bundles.example.com/git/git/` and so the two blobless bundles have
+the following fully-expanded URIs:
+
+* `https://bundles.example.com/git/git/2022-02-09-1644442631-daily-blobless.bundle`
+* `https://bundles.example.com/git/git/2022-02-02-1643842568-blobless.bundle`
+
+Advertising Bundle URIs
+-----------------------
+
+If a user knows a bundle URI for the repository they are cloning, then
+they can specify that URI manually through a command-line option. However,
+a Git host may want to advertise bundle URIs during the clone operation,
+helping users unaware of the feature.
+
+The only thing required for this feature is that the server can advertise
+one or more bundle URIs. This advertisement takes the form of a new
+protocol v2 capability specifically for discovering bundle URIs.
+
+The client could choose an arbitrary bundle URI as an option _or_ select
+the URI with best performance by some exploratory checks. It is up to the
+bundle provider to decide if having multiple URIs is preferable to a
+single URI that is geodistributed through server-side infrastructure.
+
+Cloning with Bundle URIs
+------------------------
+
+The primary need for bundle URIs is to speed up clones. The Git client
+will interact with bundle URIs according to the following flow:
+
+1. The user specifies a bundle URI with the `--bundle-uri` command-line
+   option _or_ the client discovers a bundle list advertised by the
+   Git server.
+
+2. If the downloaded data from a bundle URI is a bundle, then the client
+   inspects the bundle headers to check that the prerequisite commit OIDs
+   are present in the client repository. If some are missing, then the
+   client delays unbundling until other bundles have been unbundled,
+   making those OIDs present. When all required OIDs are present, the
+   client unbundles that data using a refspec. The default refspec is
+   `+refs/heads/*:refs/bundles/*`, but this can be configured. These refs
+   are stored so that later `git fetch` negotiations can communicate the
+   bundled refs as `have`s, reducing the size of the fetch over the Git
+   protocol. To allow pruning refs from this ref namespace, Git may
+   introduce a numbered namespace (such as `refs/bundles/<i>/*`) such that
+   stale bundle refs can be deleted.
+
+3. If the file is instead a bundle list, then the client inspects the
+   `bundle.mode` to see if the list is of the `all` or `any` form.
+
+   a. If `bundle.mode=all`, then the client considers all bundle
+      URIs. The list is reduced based on the `bundle.<id>.filter` options
+      matching the client repository's partial clone filter. Then, all
+      bundle URIs are requested. If the `bundle.<id>.creationToken`
+      heuristic is provided, then the bundles are downloaded in decreasing
+      order by the creation token, stopping when a bundle has all required
+      OIDs. The bundles can then be unbundled in increasing creation token
+      order. The client stores the latest creation token as a heuristic
+      for avoiding future downloads if the bundle list does not advertise
+      bundles with larger creation tokens.
+
+   b. If `bundle.mode=any`, then the client can choose any one of the
+      bundle URIs to inspect. The client can use a variety of ways to
+      choose among these URIs. The client can also fallback to another URI
+      if the initial choice fails to return a result.
+
+Note that during a clone we expect that all bundles will be required, and
+heuristics such as `bundle.<uri>.creationToken` can be used to download
+bundles in chronological order or in parallel.
+
+If a given bundle URI is a bundle list with a `bundle.heuristic`
+value, then the client can choose to store that URI as its chosen bundle
+URI. The client can then navigate directly to that URI during later `git
+fetch` calls.
+
+When downloading bundle URIs, the client can choose to inspect the initial
+content before committing to downloading the entire content. This may
+provide enough information to determine if the URI is a bundle list or
+a bundle. In the case of a bundle, the client may inspect the bundle
+header to determine that all advertised tips are already in the client
+repository and cancel the remaining download.
+
+Fetching with Bundle URIs
+-------------------------
+
+When the client fetches new data, it can decide to fetch from bundle
+servers before fetching from the origin remote. This could be done via a
+command-line option, but it is more likely useful to use a config value
+such as the one specified during the clone.
+
+The fetch operation follows the same procedure to download bundles from a
+bundle list (although we do _not_ want to use parallel downloads here). We
+expect that the process will end when all prerequisite commit OIDs in a
+thin bundle are already in the object database.
+
+When using the `creationToken` heuristic, the client can avoid downloading
+any bundles if their creation tokenss are not larger than the stored
+creation token. After fetching new bundles, Git updates this local
+creation token.
+
+If the bundle provider does not provide a heuristic, then the client
+should attempt to inspect the bundle headers before downloading the full
+bundle data in case the bundle tips already exist in the client
+repository.
+
+Error Conditions
+----------------
+
+If the Git client discovers something unexpected while downloading
+information according to a bundle URI or the bundle list found at that
+location, then Git can ignore that data and continue as if it was not
+given a bundle URI. The remote Git server is the ultimate source of truth,
+not the bundle URI.
+
+Here are a few example error conditions:
+
+* The client fails to connect with a server at the given URI or a connection
+  is lost without any chance to recover.
+
+* The client receives a 400-level response (such as `404 Not Found` or
+  `401 Not Authorized`). The client should use the credential helper to
+  find and provide a credential for the URI, but match the semantics of
+  Git's other HTTP protocols in terms of handling specific 400-level
+  errors.
+
+* The server reports any other failure reponse.
+
+* The client receives data that is not parsable as a bundle or bundle list.
+
+* A bundle includes a filter that does not match expectations.
+
+* The client cannot unbundle the bundles because the prerequisite commit OIDs
+  are not in the object database and there are no more bundles to download.
+
+There are also situations that could be seen as wasteful, but are not
+error conditions:
+
+* The downloaded bundles contain more information than is requested by
+  the clone or fetch request. A primary example is if the user requests
+  a clone with `--single-branch` but downloads bundles that store every
+  reachable commit from all `refs/heads/*` references. This might be
+  initially wasteful, but perhaps these objects will become reachable by
+  a later ref update that the client cares about.
+
+* A bundle download during a `git fetch` contains objects already in the
+  object database. This is probably unavoidable if we are using bundles
+  for fetches, since the client will almost always be slightly ahead of
+  the bundle servers after performing its "catch-up" fetch to the remote
+  server. This extra work is most wasteful when the client is fetching
+  much more frequently than the server is computing bundles, such as if
+  the client is using hourly prefetches with background maintenance, but
+  the server is computing bundles weekly. For this reason, the client
+  should not use bundle URIs for fetch unless the server has explicitly
+  recommended it through a `bundle.heuristic` value.
+
+Example Bundle Provider organization
+------------------------------------
+
+The bundle URI feature is intentionally designed to be flexible to
+different ways a bundle provider wants to organize the object data.
+However, it can be helpful to have a complete organization model described
+here so providers can start from that base.
+
+This example organization is a simplified model of what is used by the
+GVFS Cache Servers (see section near the end of this document) which have
+been beneficial in speeding up clones and fetches for very large
+repositories, although using extra software outside of Git.
+
+The bundle provider deploys servers across multiple geographies. Each
+server manages its own bundle set. The server can track a number of Git
+repositories, but provides a bundle list for each based on a pattern. For
+example, when mirroring a repository at `https://<domain>/<org>/<repo>`
+the bundle server could have its bundle list available at
+`https://<server-url>/<domain>/<org>/<repo>`. The origin Git server can
+list all of these servers under the "any" mode:
+
+       [bundle]
+               version = 1
+               mode = any
+
+       [bundle "eastus"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>
+
+       [bundle "europe"]
+               uri = https://europe.example.com/<domain>/<org>/<repo>
+
+       [bundle "apac"]
+               uri = https://apac.example.com/<domain>/<org>/<repo>
+
+This "list of lists" is static and only changes if a bundle server is
+added or removed.
+
+Each bundle server manages its own set of bundles. The initial bundle list
+contains only a single bundle, containing all of the objects received from
+cloning the repository from the origin server. The list uses the
+`creationToken` heuristic and a `creationToken` is made for the bundle
+based on the server's timestamp.
+
+The bundle server runs regularly-scheduled updates for the bundle list,
+such as once a day. During this task, the server fetches the latest
+contents from the origin server and generates a bundle containing the
+objects reachable from the latest origin refs, but not contained in a
+previously-computed bundle. This bundle is added to the list, with care
+that the `creationToken` is strictly greater than the previous maximum
+`creationToken`.
+
+When the bundle list grows too large, say more than 30 bundles, then the
+oldest "_N_ minus 30" bundles are combined into a single bundle. This
+bundle's `creationToken` is equal to the maximum `creationToken` among the
+merged bundles.
+
+An example bundle list is provided here, although it only has two daily
+bundles and not a full list of 30:
+
+       [bundle]
+               version = 1
+               mode = all
+               heuristic = creationToken
+
+       [bundle "2022-02-13-1644770820-daily"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644770820-daily.bundle
+               creationToken = 1644770820
+
+       [bundle "2022-02-09-1644442601-daily"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644442601-daily.bundle
+               creationToken = 1644442601
+
+       [bundle "2022-02-02-1643842562"]
+               uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-02-1643842562.bundle
+               creationToken = 1643842562
+
+To avoid storing and serving object data in perpetuity despite becoming
+unreachable in the origin server, this bundle merge can be more careful.
+Instead of taking an absolute union of the old bundles, instead the bundle
+can be created by looking at the newer bundles and ensuring that their
+necessary commits are all available in this merged bundle (or in another
+one of the newer bundles). This allows "expiring" object data that is not
+being used by new commits in this window of time. That data could be
+reintroduced by a later push.
+
+The intention of this data organization has two main goals. First, initial
+clones of the repository become faster by downloading precomputed object
+data from a closer source. Second, `git fetch` commands can be faster,
+especially if the client has not fetched for a few days. However, if a
+client does not fetch for 30 days, then the bundle list organization would
+cause redownloading a large amount of object data.
+
+One way to make this organization more useful to users who fetch frequently
+is to have more frequent bundle creation. For example, bundles could be
+created every hour, and then once a day those "hourly" bundles could be
+merged into a "daily" bundle. The daily bundles are merged into the
+oldest bundle after 30 days.
+
+It is recommened that this bundle strategy is repeated with the `blob:none`
+filter if clients of this repository are expecting to use blobless partial
+clones. This list of blobless bundles stays in the same list as the full
+bundles, but uses the `bundle.<id>.filter` key to separate the two groups.
+For very large repositories, the bundle provider may want to _only_ provide
+blobless bundles.
+
+Implementation Plan
+-------------------
+
+This design document is being submitted on its own as an aspirational
+document, with the goal of implementing all of the mentioned client
+features over the course of several patch series. Here is a potential
+outline for submitting these features:
+
+1. Integrate bundle URIs into `git clone` with a `--bundle-uri` option.
+   This will include a new `git fetch --bundle-uri` mode for use as the
+   implementation underneath `git clone`. The initial version here will
+   expect a single bundle at the given URI.
+
+2. Implement the ability to parse a bundle list from a bundle URI and
+   update the `git fetch --bundle-uri` logic to properly distinguish
+   between `bundle.mode` options. Specifically design the feature so
+   that the config format parsing feeds a list of key-value pairs into the
+   bundle list logic.
+
+3. Create the `bundle-uri` protocol v2 command so Git servers can advertise
+   bundle URIs using the key-value pairs. Plug into the existing key-value
+   input to the bundle list logic. Allow `git clone` to discover these
+   bundle URIs and bootstrap the client repository from the bundle data.
+   (This choice is an opt-in via a config option and a command-line
+   option.)
+
+4. Allow the client to understand the `bundle.flag=forFetch` configuration
+   and the `bundle.<id>.creationToken` heuristic. When `git clone`
+   discovers a bundle URI with `bundle.flag=forFetch`, it configures the
+   client repository to check that bundle URI during later `git fetch <remote>`
+   commands.
+
+5. Allow clients to discover bundle URIs during `git fetch` and configure
+   a bundle URI for later fetches if `bundle.flag=forFetch`.
+
+6. Implement the "inspect headers" heuristic to reduce data downloads when
+   the `bundle.<id>.creationToken` heuristic is not available.
+
+As these features are reviewed, this plan might be updated. We also expect
+that new designs will be discovered and implemented as this feature
+matures and becomes used in real-world scenarios.
+
+Related Work: Packfile URIs
+---------------------------
+
+The Git protocol already has a capability where the Git server can list
+a set of URLs along with the packfile response when serving a client
+request. The client is then expected to download the packfiles at those
+locations in order to have a complete understanding of the response.
+
+This mechanism is used by the Gerrit server (implemented with JGit) and
+has been effective at reducing CPU load and improving user performance for
+clones.
+
+A major downside to this mechanism is that the origin server needs to know
+_exactly_ what is in those packfiles, and the packfiles need to be available
+to the user for some time after the server has responded. This coupling
+between the origin and the packfile data is difficult to manage.
+
+Further, this implementation is extremely hard to make work with fetches.
+
+Related Work: GVFS Cache Servers
+--------------------------------
+
+The GVFS Protocol [2] is a set of HTTP endpoints designed independently of
+the Git project before Git's partial clone was created. One feature of this
+protocol is the idea of a "cache server" which can be colocated with build
+machines or developer offices to transfer Git data without overloading the
+central server.
+
+The endpoint that VFS for Git is famous for is the `GET /gvfs/objects/{oid}`
+endpoint, which allows downloading an object on-demand. This is a critical
+piece of the filesystem virtualization of that product.
+
+However, a more subtle need is the `GET /gvfs/prefetch?lastPackTimestamp=<t>`
+endpoint. Given an optional timestamp, the cache server responds with a list
+of precomputed packfiles containing the commits and trees that were introduced
+in those time intervals.
+
+The cache server computes these "prefetch" packfiles using the following
+strategy:
+
+1. Every hour, an "hourly" pack is generated with a given timestamp.
+2. Nightly, the previous 24 hourly packs are rolled up into a "daily" pack.
+3. Nightly, all prefetch packs more than 30 days old are rolled up into
+   one pack.
+
+When a user runs `gvfs clone` or `scalar clone` against a repo with cache
+servers, the client requests all prefetch packfiles, which is at most
+`24 + 30 + 1` packfiles downloading only commits and trees. The client
+then follows with a request to the origin server for the references, and
+attempts to checkout that tip reference. (There is an extra endpoint that
+helps get all reachable trees from a given commit, in case that commit
+was not already in a prefetch packfile.)
+
+During a `git fetch`, a hook requests the prefetch endpoint using the
+most-recent timestamp from a previously-downloaded prefetch packfile.
+Only the list of packfiles with later timestamps are downloaded. Most
+users fetch hourly, so they get at most one hourly prefetch pack. Users
+whose machines have been off or otherwise have not fetched in over 30 days
+might redownload all prefetch packfiles. This is rare.
+
+It is important to note that the clients always contact the origin server
+for the refs advertisement, so the refs are frequently "ahead" of the
+prefetched pack data. The missing objects are downloaded on-demand using
+the `GET gvfs/objects/{oid}` requests, when needed by a command such as
+`git checkout` or `git log`. Some Git optimizations disable checks that
+would cause these on-demand downloads to be too aggressive.
+
+See Also
+--------
+
+[1] https://lore.kernel.org/git/RFC-cover-00.13-0000000000-20210805T150534Z-avarab@gmail.com/
+    An earlier RFC for a bundle URI feature.
+
+[2] https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+    The GVFS Protocol
index 047390e46eb70712f8b43e488e1c3ea6bf18fdb1..0600150b3ad8f4a99ddc1ba6715050eab3df19da 100644 (file)
@@ -84,6 +84,9 @@ series have been accepted:
 
 - `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand.
 
+- `scalar-generalize-diagnose`: Move the functionality of `scalar diagnose`
+  into `git diagnose` and `git bugreport --diagnose`.
+
 - 'scalar-add-fsmonitor: Enable the built-in FSMonitor in Scalar
   enlistments. At the end of this series, Scalar should be feature-complete
   from the perspective of a user.
@@ -91,12 +94,6 @@ series have been accepted:
 Roughly speaking (and subject to change), the following series are needed to
 "finish" this initial version of Scalar:
 
-- Generalize features not specific to Scalar: In the spirit of making Scalar
-  configure only what is needed for large repo performance, move common
-  utilities into other parts of Git. Some of this will be internal-only, but one
-  major change will be generalizing `scalar diagnose` for use with any Git
-  repository.
-
 - Move Scalar to toplevel: Move Scalar out of `contrib/` and into the root of
   `git`. This includes a variety of related updates, including:
     - building & installing Scalar in the Git root-level 'make [install]'.
index e8adeb09f1c1185c325f6bef7a9bcaf14b68564f..eac30126e29fe6f462b2c84006c9cd9730b1eee3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -772,6 +772,7 @@ TEST_BUILTINS_OBJS += test-read-midx.o
 TEST_BUILTINS_OBJS += test-ref-store.o
 TEST_BUILTINS_OBJS += test-reftable.o
 TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-rot13-filter.o
 TEST_BUILTINS_OBJS += test-repository.o
 TEST_BUILTINS_OBJS += test-revision-walking.o
 TEST_BUILTINS_OBJS += test-run-command.o
@@ -918,6 +919,7 @@ LIB_OBJS += combine-diff.o
 LIB_OBJS += commit-graph.o
 LIB_OBJS += commit-reach.o
 LIB_OBJS += commit.o
+LIB_OBJS += compat/nonblock.o
 LIB_OBJS += compat/obstack.o
 LIB_OBJS += compat/terminal.o
 LIB_OBJS += compat/zlib-uncompress2.o
@@ -932,6 +934,7 @@ LIB_OBJS += ctype.o
 LIB_OBJS += date.o
 LIB_OBJS += decorate.o
 LIB_OBJS += delta-islands.o
+LIB_OBJS += diagnose.o
 LIB_OBJS += diff-delta.o
 LIB_OBJS += diff-merges.o
 LIB_OBJS += diff-lib.o
@@ -1152,6 +1155,7 @@ BUILTIN_OBJS += builtin/credential-cache.o
 BUILTIN_OBJS += builtin/credential-store.o
 BUILTIN_OBJS += builtin/credential.o
 BUILTIN_OBJS += builtin/describe.o
+BUILTIN_OBJS += builtin/diagnose.o
 BUILTIN_OBJS += builtin/diff-files.o
 BUILTIN_OBJS += builtin/diff-index.o
 BUILTIN_OBJS += builtin/diff-tree.o
index 40e9ecc8485324a40e142d5cbc9345a22e45f333..8901a34d6bf424680b9d13a1bdf332bedb4d8e20 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -144,6 +144,7 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix);
 int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix);
 int cmd_credential_store(int argc, const char **argv, const char *prefix);
 int cmd_describe(int argc, const char **argv, const char *prefix);
+int cmd_diagnose(int argc, const char **argv, const char *prefix);
 int cmd_diff_files(int argc, const char **argv, const char *prefix);
 int cmd_diff_index(int argc, const char **argv, const char *prefix);
 int cmd_diff(int argc, const char **argv, const char *prefix);
index 9de32bc96e7a6f2171473b8d20f5cde3834e5c42..530895be55fe6d9627c5d954f4c235d22769051e 100644 (file)
@@ -5,6 +5,7 @@
 #include "compat/compiler.h"
 #include "hook.h"
 #include "hook-list.h"
+#include "diagnose.h"
 
 
 static void get_system_info(struct strbuf *sys_info)
@@ -59,7 +60,7 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit)
 }
 
 static const char * const bugreport_usage[] = {
-       N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>]"),
+       N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>] [--diagnose[=<mode>]"),
        NULL
 };
 
@@ -98,16 +99,21 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
        int report = -1;
        time_t now = time(NULL);
        struct tm tm;
+       enum diagnose_mode diagnose = DIAGNOSE_NONE;
        char *option_output = NULL;
        char *option_suffix = "%Y-%m-%d-%H%M";
        const char *user_relative_path = NULL;
        char *prefixed_filename;
+       size_t output_path_len;
 
        const struct option bugreport_options[] = {
+               OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"),
+                              N_("create an additional zip archive of detailed diagnostics (default 'stats')"),
+                              PARSE_OPT_OPTARG, option_parse_diagnose),
                OPT_STRING('o', "output-directory", &option_output, N_("path"),
-                          N_("specify a destination for the bugreport file")),
+                          N_("specify a destination for the bugreport file(s)")),
                OPT_STRING('s', "suffix", &option_suffix, N_("format"),
-                          N_("specify a strftime format suffix for the filename")),
+                          N_("specify a strftime format suffix for the filename(s)")),
                OPT_END()
        };
 
@@ -119,6 +125,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
                                            option_output ? option_output : "");
        strbuf_addstr(&report_path, prefixed_filename);
        strbuf_complete(&report_path, '/');
+       output_path_len = report_path.len;
 
        strbuf_addstr(&report_path, "git-bugreport-");
        strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0);
@@ -133,6 +140,20 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
                    report_path.buf);
        }
 
+       /* Prepare diagnostics, if requested */
+       if (diagnose != DIAGNOSE_NONE) {
+               struct strbuf zip_path = STRBUF_INIT;
+               strbuf_add(&zip_path, report_path.buf, output_path_len);
+               strbuf_addstr(&zip_path, "git-diagnostics-");
+               strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+               strbuf_addstr(&zip_path, ".zip");
+
+               if (create_diagnostics_archive(&zip_path, diagnose))
+                       die_errno(_("unable to create diagnostics archive %s"), zip_path.buf);
+
+               strbuf_release(&zip_path);
+       }
+
        /* Prepare the report contents */
        get_bug_template(&buffer);
 
index 29c74f898bf183d724bba22457a1a3ab782e519a..f9d63d80b926c783265beb459962954128f6ea4c 100644 (file)
@@ -626,6 +626,7 @@ static void show_local_changes(struct object *head,
        repo_init_revisions(the_repository, &rev, NULL);
        rev.diffopt.flags = opts->flags;
        rev.diffopt.output_format |= DIFF_FORMAT_NAME_STATUS;
+       rev.diffopt.flags.recursive = 1;
        diff_setup_done(&rev.diffopt);
        add_pending_object(&rev, head, NULL);
        run_diff_index(&rev, 0);
diff --git a/builtin/diagnose.c b/builtin/diagnose.c
new file mode 100644 (file)
index 0000000..cd260c2
--- /dev/null
@@ -0,0 +1,61 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "diagnose.h"
+
+static const char * const diagnose_usage[] = {
+       N_("git diagnose [-o|--output-directory <path>] [-s|--suffix <format>] [--mode=<mode>]"),
+       NULL
+};
+
+int cmd_diagnose(int argc, const char **argv, const char *prefix)
+{
+       struct strbuf zip_path = STRBUF_INIT;
+       time_t now = time(NULL);
+       struct tm tm;
+       enum diagnose_mode mode = DIAGNOSE_STATS;
+       char *option_output = NULL;
+       char *option_suffix = "%Y-%m-%d-%H%M";
+       char *prefixed_filename;
+
+       const struct option diagnose_options[] = {
+               OPT_STRING('o', "output-directory", &option_output, N_("path"),
+                          N_("specify a destination for the diagnostics archive")),
+               OPT_STRING('s', "suffix", &option_suffix, N_("format"),
+                          N_("specify a strftime format suffix for the filename")),
+               OPT_CALLBACK_F(0, "mode", &mode, N_("(stats|all)"),
+                              N_("specify the content of the diagnostic archive"),
+                              PARSE_OPT_NONEG, option_parse_diagnose),
+               OPT_END()
+       };
+
+       argc = parse_options(argc, argv, prefix, diagnose_options,
+                            diagnose_usage, 0);
+
+       /* Prepare the path to put the result */
+       prefixed_filename = prefix_filename(prefix,
+                                           option_output ? option_output : "");
+       strbuf_addstr(&zip_path, prefixed_filename);
+       strbuf_complete(&zip_path, '/');
+
+       strbuf_addstr(&zip_path, "git-diagnostics-");
+       strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+       strbuf_addstr(&zip_path, ".zip");
+
+       switch (safe_create_leading_directories(zip_path.buf)) {
+       case SCLD_OK:
+       case SCLD_EXISTS:
+               break;
+       default:
+               die_errno(_("could not create leading directories for '%s'"),
+                         zip_path.buf);
+       }
+
+       /* Prepare diagnostics */
+       if (create_diagnostics_archive(&zip_path, mode))
+               die_errno(_("unable to create diagnostics archive %s"),
+                         zip_path.buf);
+
+       free(prefixed_filename);
+       strbuf_release(&zip_path);
+       return 0;
+}
index fc5cecb48356ffccd4251670bd779d52ab3d392c..368a0f5329c58a3c0093237f4ab47065433bef57 100644 (file)
@@ -490,7 +490,9 @@ static void filter_prefetch_refspec(struct refspec *rs)
                        continue;
                if (!rs->items[i].dst ||
                    (rs->items[i].src &&
-                    !strncmp(rs->items[i].src, "refs/tags/", 10))) {
+                    !strncmp(rs->items[i].src,
+                             ref_namespace[NAMESPACE_TAGS].ref,
+                             strlen(ref_namespace[NAMESPACE_TAGS].ref)))) {
                        int j;
 
                        free(rs->items[i].src);
@@ -506,7 +508,7 @@ static void filter_prefetch_refspec(struct refspec *rs)
                }
 
                old_dst = rs->items[i].dst;
-               strbuf_addstr(&new_dst, "refs/prefetch/");
+               strbuf_addstr(&new_dst, ref_namespace[NAMESPACE_PREFETCH].ref);
 
                /*
                 * If old_dst starts with "refs/", then place
index eeff2b760e0cc58978618c791a195df0bca6627b..6c222052177bca1eac3c990f65e01bb09b426cac 100644 (file)
@@ -910,12 +910,6 @@ static int fetch_remote(struct remote *remote, void *cbdata)
 
 static int maintenance_task_prefetch(struct maintenance_run_opts *opts)
 {
-       git_config_set_multivar_gently("log.excludedecoration",
-                                       "refs/prefetch/",
-                                       "refs/prefetch/",
-                                       CONFIG_FLAGS_FIXED_VALUE |
-                                       CONFIG_FLAGS_MULTI_REPLACE);
-
        if (for_each_remote(fetch_remote, opts)) {
                error(_("failed to prefetch remotes"));
                return 1;
index 9b937d59b83dc29691d9cb940223c981f38ff7bf..fd209725e56abccc1fb8b2d7764e86990aed3f77 100644 (file)
@@ -101,6 +101,20 @@ static int parse_decoration_style(const char *value)
        return -1;
 }
 
+static int use_default_decoration_filter = 1;
+static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
+
+static int clear_decorations_callback(const struct option *opt,
+                                           const char *arg, int unset)
+{
+       string_list_clear(&decorate_refs_include, 0);
+       string_list_clear(&decorate_refs_exclude, 0);
+       use_default_decoration_filter = 0;
+       return 0;
+}
+
 static int decorate_callback(const struct option *opt, const char *arg, int unset)
 {
        if (unset)
@@ -162,18 +176,61 @@ static void cmd_log_init_defaults(struct rev_info *rev)
                parse_date_format(default_date_mode, &rev->date_mode);
 }
 
+static void set_default_decoration_filter(struct decoration_filter *decoration_filter)
+{
+       int i;
+       char *value = NULL;
+       struct string_list *include = decoration_filter->include_ref_pattern;
+       const struct string_list *config_exclude =
+                       git_config_get_value_multi("log.excludeDecoration");
+
+       if (config_exclude) {
+               struct string_list_item *item;
+               for_each_string_list_item(item, config_exclude)
+                       string_list_append(decoration_filter->exclude_ref_config_pattern,
+                                          item->string);
+       }
+
+       /*
+        * By default, decorate_all is disabled. Enable it if
+        * log.initialDecorationSet=all. Don't ever disable it by config,
+        * since the command-line takes precedent.
+        */
+       if (use_default_decoration_filter &&
+           !git_config_get_string("log.initialdecorationset", &value) &&
+           !strcmp("all", value))
+               use_default_decoration_filter = 0;
+       free(value);
+
+       if (!use_default_decoration_filter ||
+           decoration_filter->exclude_ref_pattern->nr ||
+           decoration_filter->include_ref_pattern->nr ||
+           decoration_filter->exclude_ref_config_pattern->nr)
+               return;
+
+       /*
+        * No command-line or config options were given, so
+        * populate with sensible defaults.
+        */
+       for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) {
+               if (!ref_namespace[i].decoration)
+                       continue;
+
+               string_list_append(include, ref_namespace[i].ref);
+       }
+}
+
 static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
                         struct rev_info *rev, struct setup_revision_opt *opt)
 {
        struct userformat_want w;
        int quiet = 0, source = 0, mailmap;
        static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP};
-       static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
-       static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
-       static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
-       struct decoration_filter decoration_filter = {&decorate_refs_include,
-                                                     &decorate_refs_exclude,
-                                                     &decorate_refs_exclude_config};
+       struct decoration_filter decoration_filter = {
+               .exclude_ref_pattern = &decorate_refs_exclude,
+               .include_ref_pattern = &decorate_refs_include,
+               .exclude_ref_config_pattern = &decorate_refs_exclude_config,
+       };
        static struct revision_sources revision_sources;
 
        const struct option builtin_log_options[] = {
@@ -181,6 +238,10 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
                OPT_BOOL(0, "source", &source, N_("show source")),
                OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")),
                OPT_ALIAS(0, "mailmap", "use-mailmap"),
+               OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL,
+                              N_("clear all previously-defined decoration filters"),
+                              PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+                              clear_decorations_callback),
                OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include,
                                N_("pattern"), N_("only decorate refs that match <pattern>")),
                OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude,
@@ -265,16 +326,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
        }
 
        if (decoration_style || rev->simplify_by_decoration) {
-               const struct string_list *config_exclude =
-                       repo_config_get_value_multi(the_repository,
-                                                   "log.excludeDecoration");
-
-               if (config_exclude) {
-                       struct string_list_item *item;
-                       for_each_string_list_item(item, config_exclude)
-                               string_list_append(&decorate_refs_exclude_config,
-                                                  item->string);
-               }
+               set_default_decoration_filter(&decoration_filter);
 
                if (decoration_style)
                        rev->show_decorations = 1;
index 583702a0980e8ced3f91e1b88a9a1698be16fc6e..a29e911d3099be9d327ecc030d68c56fe046a04a 100644 (file)
@@ -106,6 +106,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn)
        size_t base_len;
        int had_error = 0;
        struct object_id oid;
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
 
        strbuf_addstr(&ref, git_replace_ref_base);
        base_len = ref.len;
@@ -147,6 +148,8 @@ static int check_ref_valid(struct object_id *object,
                            struct strbuf *ref,
                            int force)
 {
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
+
        strbuf_reset(ref);
        strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
        if (check_refname_format(ref->buf, 0))
index 344fff8f3a9322a7895b607a8236c5461625407d..fdce6f8c85670c8b2b0e20304336debba3190408 100644 (file)
@@ -174,88 +174,6 @@ static void update_index_from_diff(struct diff_queue_struct *q,
        }
 }
 
-static int pathspec_needs_expanded_index(const struct pathspec *pathspec)
-{
-       unsigned int i, pos;
-       int res = 0;
-       char *skip_worktree_seen = NULL;
-
-       /*
-        * When using a magic pathspec, assume for the sake of simplicity that
-        * the index needs to be expanded to match all matchable files.
-        */
-       if (pathspec->magic)
-               return 1;
-
-       for (i = 0; i < pathspec->nr; i++) {
-               struct pathspec_item item = pathspec->items[i];
-
-               /*
-                * If the pathspec item has a wildcard, the index should be expanded
-                * if the pathspec has the possibility of matching a subset of entries inside
-                * of a sparse directory (but not the entire directory).
-                *
-                * If the pathspec item is a literal path, the index only needs to be expanded
-                * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't
-                * expand for in-cone files) and b) it doesn't match any sparse directories
-                * (since we can reset whole sparse directories without expanding them).
-                */
-               if (item.nowildcard_len < item.len) {
-                       /*
-                        * Special case: if the pattern is a path inside the cone
-                        * followed by only wildcards, the pattern cannot match
-                        * partial sparse directories, so we know we don't need to
-                        * expand the index.
-                        *
-                        * Examples:
-                        * - in-cone/foo***: doesn't need expanded index
-                        * - not-in-cone/bar*: may need expanded index
-                        * - **.c: may need expanded index
-                        */
-                       if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len &&
-                           path_in_cone_mode_sparse_checkout(item.original, &the_index))
-                               continue;
-
-                       for (pos = 0; pos < active_nr; pos++) {
-                               struct cache_entry *ce = active_cache[pos];
-
-                               if (!S_ISSPARSEDIR(ce->ce_mode))
-                                       continue;
-
-                               /*
-                                * If the pre-wildcard length is longer than the sparse
-                                * directory name and the sparse directory is the first
-                                * component of the pathspec, need to expand the index.
-                                */
-                               if (item.nowildcard_len > ce_namelen(ce) &&
-                                   !strncmp(item.original, ce->name, ce_namelen(ce))) {
-                                       res = 1;
-                                       break;
-                               }
-
-                               /*
-                                * If the pre-wildcard length is shorter than the sparse
-                                * directory and the pathspec does not match the whole
-                                * directory, need to expand the index.
-                                */
-                               if (!strncmp(item.original, ce->name, item.nowildcard_len) &&
-                                   wildmatch(item.original, ce->name, 0)) {
-                                       res = 1;
-                                       break;
-                               }
-                       }
-               } else if (!path_in_cone_mode_sparse_checkout(item.original, &the_index) &&
-                          !matches_skip_worktree(pathspec, i, &skip_worktree_seen))
-                       res = 1;
-
-               if (res > 0)
-                       break;
-       }
-
-       free(skip_worktree_seen);
-       return res;
-}
-
 static int read_from_tree(const struct pathspec *pathspec,
                          struct object_id *tree_oid,
                          int intent_to_add)
@@ -273,7 +191,7 @@ static int read_from_tree(const struct pathspec *pathspec,
        opt.change = diff_change;
        opt.add_remove = diff_addremove;
 
-       if (pathspec->nr && the_index.sparse_index && pathspec_needs_expanded_index(pathspec))
+       if (pathspec->nr && pathspec_needs_expanded_index(&the_index, pathspec))
                ensure_full_index(&the_index);
 
        if (do_diff_cache(tree_oid, &opt))
index 30fd8e83eaf2ca767802343d2dcff1825f6b1b50..fba6f5d51f32d1217b89298e1361169b43feb38b 100644 (file)
@@ -46,6 +46,7 @@ static const char rev_list_usage[] =
 "    --parents\n"
 "    --children\n"
 "    --objects | --objects-edge\n"
+"    --disk-usage[=human]\n"
 "    --unpacked\n"
 "    --header | --pretty\n"
 "    --[no-]object-names\n"
@@ -81,6 +82,7 @@ static int arg_show_object_names = 1;
 
 static int show_disk_usage;
 static off_t total_disk_usage;
+static int human_readable;
 
 static off_t get_object_disk_usage(struct object *obj)
 {
@@ -368,6 +370,17 @@ static int show_object_fast(
        return 1;
 }
 
+static void print_disk_usage(off_t size)
+{
+       struct strbuf sb = STRBUF_INIT;
+       if (human_readable)
+               strbuf_humanise_bytes(&sb, size);
+       else
+               strbuf_addf(&sb, "%"PRIuMAX, (uintmax_t)size);
+       puts(sb.buf);
+       strbuf_release(&sb);
+}
+
 static inline int parse_missing_action_value(const char *value)
 {
        if (!strcmp(value, "error")) {
@@ -473,6 +486,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
                                 int filter_provided_objects)
 {
        struct bitmap_index *bitmap_git;
+       off_t size_from_bitmap;
 
        if (!show_disk_usage)
                return -1;
@@ -481,8 +495,8 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
        if (!bitmap_git)
                return -1;
 
-       printf("%"PRIuMAX"\n",
-              (uintmax_t)get_disk_usage_from_bitmap(bitmap_git, revs));
+       size_from_bitmap = get_disk_usage_from_bitmap(bitmap_git, revs);
+       print_disk_usage(size_from_bitmap);
        return 0;
 }
 
@@ -624,7 +638,21 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                        continue;
                }
 
-               if (!strcmp(arg, "--disk-usage")) {
+               if (skip_prefix(arg, "--disk-usage", &arg)) {
+                       if (*arg == '=') {
+                               if (!strcmp(++arg, "human")) {
+                                       human_readable = 1;
+                               } else
+                                       die(_("invalid value for '%s': '%s', the only allowed format is '%s'"),
+                                           "--disk-usage=<format>", arg, "human");
+                       } else if (*arg) {
+                               /*
+                                * Arguably should goto a label to continue chain of ifs?
+                                * Doesn't matter unless we try to add --disk-usage-foo
+                                * afterwards.
+                                */
+                               usage(rev_list_usage);
+                       }
                        show_disk_usage = 1;
                        info.flags |= REV_LIST_QUIET;
                        continue;
@@ -753,7 +781,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
        }
 
        if (show_disk_usage)
-               printf("%"PRIuMAX"\n", (uintmax_t)total_disk_usage);
+               print_disk_usage(total_disk_usage);
 
 cleanup:
        release_revisions(&revs);
index 84a935a16e8be447d0bc95ad2a1e5d133452e635..b6ba859fe42571fd868697db1da8dac6ffd2c32e 100644 (file)
@@ -287,6 +287,8 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
        if (!index_only)
                setup_work_tree();
 
+       prepare_repo_settings(the_repository);
+       the_repository->settings.command_requires_full_index = 0;
        hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
 
        if (read_cache() < 0)
@@ -296,8 +298,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
 
        seen = xcalloc(pathspec.nr, 1);
 
-       /* TODO: audit for interaction with sparse-index. */
-       ensure_full_index(&the_index);
+       if (pathspec_needs_expanded_index(&the_index, &pathspec))
+               ensure_full_index(&the_index);
+
        for (i = 0; i < active_nr; i++) {
                const struct cache_entry *ce = active_cache[i];
 
diff --git a/cache.h b/cache.h
index 302810b353a216939ff42416beffdfdb34d26ceb..26ed03bd6de626e497af549fe4fecfe27acaf699 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -829,6 +829,15 @@ struct cache_entry *index_file_exists(struct index_state *istate, const char *na
  */
 int index_name_pos(struct index_state *, const char *name, int namelen);
 
+/*
+ * Like index_name_pos, returns the position of an entry of the given name in
+ * the index if one exists, otherwise returns a negative value where the negated
+ * value minus 1 is the position where the index entry would be inserted. Unlike
+ * index_name_pos, however, a sparse index is not expanded to find an entry
+ * inside a sparse directory.
+ */
+int index_name_pos_sparse(struct index_state *, const char *name, int namelen);
+
 /*
  * Determines whether an entry with the given name exists within the
  * given index. The return value is 1 if an exact match is found, otherwise
@@ -1007,7 +1016,6 @@ void reset_shared_repository(void);
  * commands that do not want replace references to be active.
  */
 extern int read_replace_refs;
-extern char *git_replace_ref_base;
 
 /*
  * These values are used to help identify parts of a repository to fsync.
diff --git a/compat/disk.h b/compat/disk.h
new file mode 100644 (file)
index 0000000..50a32e3
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef COMPAT_DISK_H
+#define COMPAT_DISK_H
+
+#include "git-compat-util.h"
+
+static int get_disk_info(struct strbuf *out)
+{
+       struct strbuf buf = STRBUF_INIT;
+       int res = 0;
+
+#ifdef GIT_WINDOWS_NATIVE
+       char volume_name[MAX_PATH], fs_name[MAX_PATH];
+       DWORD serial_number, component_length, flags;
+       ULARGE_INTEGER avail2caller, total, avail;
+
+       strbuf_realpath(&buf, ".", 1);
+       if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) {
+               error(_("could not determine free disk size for '%s'"),
+                     buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+
+       strbuf_setlen(&buf, offset_1st_component(buf.buf));
+       if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name),
+                                  &serial_number, &component_length, &flags,
+                                  fs_name, sizeof(fs_name))) {
+               error(_("could not get info for '%s'"), buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+       strbuf_addf(out, "Available space on '%s': ", buf.buf);
+       strbuf_humanise_bytes(out, avail2caller.QuadPart);
+       strbuf_addch(out, '\n');
+#else
+       struct statvfs stat;
+
+       strbuf_realpath(&buf, ".", 1);
+       if (statvfs(buf.buf, &stat) < 0) {
+               error_errno(_("could not determine free disk size for '%s'"),
+                           buf.buf);
+               res = -1;
+               goto cleanup;
+       }
+
+       strbuf_addf(out, "Available space on '%s': ", buf.buf);
+       strbuf_humanise_bytes(out, (off_t)stat.f_bsize * (off_t)stat.f_bavail);
+       strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag);
+#endif
+
+cleanup:
+       strbuf_release(&buf);
+       return res;
+}
+
+#endif /* COMPAT_DISK_H */
diff --git a/compat/nonblock.c b/compat/nonblock.c
new file mode 100644 (file)
index 0000000..9694ebd
--- /dev/null
@@ -0,0 +1,50 @@
+#include "git-compat-util.h"
+#include "nonblock.h"
+
+#ifdef O_NONBLOCK
+
+int enable_pipe_nonblock(int fd)
+{
+       int flags = fcntl(fd, F_GETFL);
+       if (flags < 0)
+               return -1;
+       flags |= O_NONBLOCK;
+       return fcntl(fd, F_SETFL, flags);
+}
+
+#elif defined(GIT_WINDOWS_NATIVE)
+
+#include "win32.h"
+
+int enable_pipe_nonblock(int fd)
+{
+       HANDLE h = (HANDLE)_get_osfhandle(fd);
+       DWORD mode;
+       DWORD type = GetFileType(h);
+       if (type == FILE_TYPE_UNKNOWN && GetLastError() != NO_ERROR) {
+               errno = EBADF;
+               return -1;
+       }
+       if (type != FILE_TYPE_PIPE)
+               BUG("unsupported file type: %lu", type);
+       if (!GetNamedPipeHandleState(h, &mode, NULL, NULL, NULL, NULL, 0)) {
+               errno = err_win_to_posix(GetLastError());
+               return -1;
+       }
+       mode |= PIPE_NOWAIT;
+       if (!SetNamedPipeHandleState(h, &mode, NULL, NULL)) {
+               errno = err_win_to_posix(GetLastError());
+               return -1;
+       }
+       return 0;
+}
+
+#else
+
+int enable_pipe_nonblock(int fd)
+{
+       errno = ENOSYS;
+       return -1;
+}
+
+#endif
diff --git a/compat/nonblock.h b/compat/nonblock.h
new file mode 100644 (file)
index 0000000..af1a331
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef COMPAT_NONBLOCK_H
+#define COMPAT_NONBLOCK_H
+
+/*
+ * Enable non-blocking I/O for the pipe specified by the passed-in descriptor.
+ */
+int enable_pipe_nonblock(int fd);
+
+#endif
index 07c3f7dd6b6672616a43df83b7324e69b4f83f71..642d16124eb20c28de20e8601e99f1b347c7fa75 100644 (file)
@@ -14,8 +14,6 @@
 #include "dir.h"
 #include "packfile.h"
 #include "help.h"
-#include "archive.h"
-#include "object-store.h"
 
 static void setup_enlistment_directory(int argc, const char **argv,
                                       const char * const *usagestr,
@@ -285,99 +283,6 @@ static int unregister_dir(void)
        return res;
 }
 
-static int add_directory_to_archiver(struct strvec *archiver_args,
-                                         const char *path, int recurse)
-{
-       int at_root = !*path;
-       DIR *dir = opendir(at_root ? "." : path);
-       struct dirent *e;
-       struct strbuf buf = STRBUF_INIT;
-       size_t len;
-       int res = 0;
-
-       if (!dir)
-               return error_errno(_("could not open directory '%s'"), path);
-
-       if (!at_root)
-               strbuf_addf(&buf, "%s/", path);
-       len = buf.len;
-       strvec_pushf(archiver_args, "--prefix=%s", buf.buf);
-
-       while (!res && (e = readdir(dir))) {
-               if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
-                       continue;
-
-               strbuf_setlen(&buf, len);
-               strbuf_addstr(&buf, e->d_name);
-
-               if (e->d_type == DT_REG)
-                       strvec_pushf(archiver_args, "--add-file=%s", buf.buf);
-               else if (e->d_type != DT_DIR)
-                       warning(_("skipping '%s', which is neither file nor "
-                                 "directory"), buf.buf);
-               else if (recurse &&
-                        add_directory_to_archiver(archiver_args,
-                                                  buf.buf, recurse) < 0)
-                       res = -1;
-       }
-
-       closedir(dir);
-       strbuf_release(&buf);
-       return res;
-}
-
-#ifndef WIN32
-#include <sys/statvfs.h>
-#endif
-
-static int get_disk_info(struct strbuf *out)
-{
-#ifdef WIN32
-       struct strbuf buf = STRBUF_INIT;
-       char volume_name[MAX_PATH], fs_name[MAX_PATH];
-       DWORD serial_number, component_length, flags;
-       ULARGE_INTEGER avail2caller, total, avail;
-
-       strbuf_realpath(&buf, ".", 1);
-       if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) {
-               error(_("could not determine free disk size for '%s'"),
-                     buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
-
-       strbuf_setlen(&buf, offset_1st_component(buf.buf));
-       if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name),
-                                  &serial_number, &component_length, &flags,
-                                  fs_name, sizeof(fs_name))) {
-               error(_("could not get info for '%s'"), buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
-       strbuf_addf(out, "Available space on '%s': ", buf.buf);
-       strbuf_humanise_bytes(out, avail2caller.QuadPart);
-       strbuf_addch(out, '\n');
-       strbuf_release(&buf);
-#else
-       struct strbuf buf = STRBUF_INIT;
-       struct statvfs stat;
-
-       strbuf_realpath(&buf, ".", 1);
-       if (statvfs(buf.buf, &stat) < 0) {
-               error_errno(_("could not determine free disk size for '%s'"),
-                           buf.buf);
-               strbuf_release(&buf);
-               return -1;
-       }
-
-       strbuf_addf(out, "Available space on '%s': ", buf.buf);
-       strbuf_humanise_bytes(out, st_mult(stat.f_bsize, stat.f_bavail));
-       strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag);
-       strbuf_release(&buf);
-#endif
-       return 0;
-}
-
 /* printf-style interface, expects `<key>=<value>` argument */
 static int set_config(const char *fmt, ...)
 {
@@ -628,83 +533,6 @@ cleanup:
        return res;
 }
 
-static void dir_file_stats_objects(const char *full_path, size_t full_path_len,
-                                  const char *file_name, void *data)
-{
-       struct strbuf *buf = data;
-       struct stat st;
-
-       if (!stat(full_path, &st))
-               strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name,
-                           (uintmax_t)st.st_size);
-}
-
-static int dir_file_stats(struct object_directory *object_dir, void *data)
-{
-       struct strbuf *buf = data;
-
-       strbuf_addf(buf, "Contents of %s:\n", object_dir->path);
-
-       for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects,
-                                 data);
-
-       return 0;
-}
-
-static int count_files(char *path)
-{
-       DIR *dir = opendir(path);
-       struct dirent *e;
-       int count = 0;
-
-       if (!dir)
-               return 0;
-
-       while ((e = readdir(dir)) != NULL)
-               if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG)
-                       count++;
-
-       closedir(dir);
-       return count;
-}
-
-static void loose_objs_stats(struct strbuf *buf, const char *path)
-{
-       DIR *dir = opendir(path);
-       struct dirent *e;
-       int count;
-       int total = 0;
-       unsigned char c;
-       struct strbuf count_path = STRBUF_INIT;
-       size_t base_path_len;
-
-       if (!dir)
-               return;
-
-       strbuf_addstr(buf, "Object directory stats for ");
-       strbuf_add_absolute_path(buf, path);
-       strbuf_addstr(buf, ":\n");
-
-       strbuf_add_absolute_path(&count_path, path);
-       strbuf_addch(&count_path, '/');
-       base_path_len = count_path.len;
-
-       while ((e = readdir(dir)) != NULL)
-               if (!is_dot_or_dotdot(e->d_name) &&
-                   e->d_type == DT_DIR && strlen(e->d_name) == 2 &&
-                   !hex_to_bytes(&c, e->d_name, 1)) {
-                       strbuf_setlen(&count_path, base_path_len);
-                       strbuf_addstr(&count_path, e->d_name);
-                       total += (count = count_files(count_path.buf));
-                       strbuf_addf(buf, "%s : %7d files\n", e->d_name, count);
-               }
-
-       strbuf_addf(buf, "Total: %d loose objects", total);
-
-       strbuf_release(&count_path);
-       closedir(dir);
-}
-
 static int cmd_diagnose(int argc, const char **argv)
 {
        struct option options[] = {
@@ -714,106 +542,19 @@ static int cmd_diagnose(int argc, const char **argv)
                N_("scalar diagnose [<enlistment>]"),
                NULL
        };
-       struct strbuf zip_path = STRBUF_INIT;
-       struct strvec archiver_args = STRVEC_INIT;
-       char **argv_copy = NULL;
-       int stdout_fd = -1, archiver_fd = -1;
-       time_t now = time(NULL);
-       struct tm tm;
-       struct strbuf buf = STRBUF_INIT;
+       struct strbuf diagnostics_root = STRBUF_INIT;
        int res = 0;
 
        argc = parse_options(argc, argv, NULL, options,
                             usage, 0);
 
-       setup_enlistment_directory(argc, argv, usage, options, &zip_path);
-
-       strbuf_addstr(&zip_path, "/.scalarDiagnostics/scalar_");
-       strbuf_addftime(&zip_path,
-                       "%Y%m%d_%H%M%S", localtime_r(&now, &tm), 0, 0);
-       strbuf_addstr(&zip_path, ".zip");
-       switch (safe_create_leading_directories(zip_path.buf)) {
-       case SCLD_EXISTS:
-       case SCLD_OK:
-               break;
-       default:
-               error_errno(_("could not create directory for '%s'"),
-                           zip_path.buf);
-               goto diagnose_cleanup;
-       }
-       stdout_fd = dup(1);
-       if (stdout_fd < 0) {
-               res = error_errno(_("could not duplicate stdout"));
-               goto diagnose_cleanup;
-       }
-
-       archiver_fd = xopen(zip_path.buf, O_CREAT | O_WRONLY | O_TRUNC, 0666);
-       if (archiver_fd < 0 || dup2(archiver_fd, 1) < 0) {
-               res = error_errno(_("could not redirect output"));
-               goto diagnose_cleanup;
-       }
-
-       init_zip_archiver();
-       strvec_pushl(&archiver_args, "scalar-diagnose", "--format=zip", NULL);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "Collecting diagnostic info\n\n");
-       get_version_info(&buf, 1);
+       setup_enlistment_directory(argc, argv, usage, options, &diagnostics_root);
+       strbuf_addstr(&diagnostics_root, "/.scalarDiagnostics");
 
-       strbuf_addf(&buf, "Enlistment root: %s\n", the_repository->worktree);
-       get_disk_info(&buf);
-       write_or_die(stdout_fd, buf.buf, buf.len);
-       strvec_pushf(&archiver_args,
-                    "--add-virtual-file=diagnostics.log:%.*s",
-                    (int)buf.len, buf.buf);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:");
-       dir_file_stats(the_repository->objects->odb, &buf);
-       foreach_alt_odb(dir_file_stats, &buf);
-       strvec_push(&archiver_args, buf.buf);
-
-       strbuf_reset(&buf);
-       strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:");
-       loose_objs_stats(&buf, ".git/objects");
-       strvec_push(&archiver_args, buf.buf);
-
-       if ((res = add_directory_to_archiver(&archiver_args, ".git", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/hooks", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/info", 0)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/logs", 1)) ||
-           (res = add_directory_to_archiver(&archiver_args, ".git/objects/info", 0)))
-               goto diagnose_cleanup;
-
-       strvec_pushl(&archiver_args, "--prefix=",
-                    oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
-
-       /* `write_archive()` modifies the `argv` passed to it. Let it. */
-       argv_copy = xmemdupz(archiver_args.v,
-                            sizeof(char *) * archiver_args.nr);
-       res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL,
-                           the_repository, NULL, 0);
-       if (res) {
-               error(_("failed to write archive"));
-               goto diagnose_cleanup;
-       }
-
-       if (!res)
-               fprintf(stderr, "\n"
-                      "Diagnostics complete.\n"
-                      "All of the gathered info is captured in '%s'\n",
-                      zip_path.buf);
-
-diagnose_cleanup:
-       if (archiver_fd >= 0) {
-               close(1);
-               dup2(stdout_fd, 1);
-       }
-       free(argv_copy);
-       strvec_clear(&archiver_args);
-       strbuf_release(&zip_path);
-       strbuf_release(&buf);
+       res = run_git("diagnose", "--mode=all", "-s", "%Y%m%d_%H%M%S",
+                     "-o", diagnostics_root.buf, NULL);
 
+       strbuf_release(&diagnostics_root);
        return res;
 }
 
index 365eab9b54fc6cacfd9a12bc56713382517ffcbb..dfb949f52eed045e73ad06c2ed972bdd84f5663d 100755 (executable)
@@ -202,14 +202,14 @@ test_expect_success UNZIP 'scalar diagnose' '
        sed -n "s/.*$SQ\\(.*\\.zip\\)$SQ.*/\\1/p" <err >zip_path &&
        zip_path=$(cat zip_path) &&
        test -n "$zip_path" &&
-       unzip -v "$zip_path" &&
+       "$GIT_UNZIP" -v "$zip_path" &&
        folder=${zip_path%.zip} &&
        test_path_is_missing "$folder" &&
-       unzip -p "$zip_path" diagnostics.log >out &&
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
        test_file_not_empty out &&
-       unzip -p "$zip_path" packs-local.txt >out &&
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
        grep "$(pwd)/.git/objects" out &&
-       unzip -p "$zip_path" objects-local.txt >out &&
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
        grep "^Total: [1-9]" out
 '
 
diff --git a/diagnose.c b/diagnose.c
new file mode 100644 (file)
index 0000000..beb0a87
--- /dev/null
@@ -0,0 +1,269 @@
+#include "cache.h"
+#include "diagnose.h"
+#include "compat/disk.h"
+#include "archive.h"
+#include "dir.h"
+#include "help.h"
+#include "strvec.h"
+#include "object-store.h"
+#include "packfile.h"
+
+struct archive_dir {
+       const char *path;
+       int recursive;
+};
+
+struct diagnose_option {
+       enum diagnose_mode mode;
+       const char *option_name;
+};
+
+static struct diagnose_option diagnose_options[] = {
+       { DIAGNOSE_STATS, "stats" },
+       { DIAGNOSE_ALL, "all" },
+};
+
+int option_parse_diagnose(const struct option *opt, const char *arg, int unset)
+{
+       int i;
+       enum diagnose_mode *diagnose = opt->value;
+
+       if (!arg) {
+               *diagnose = unset ? DIAGNOSE_NONE : DIAGNOSE_STATS;
+               return 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(diagnose_options); i++) {
+               if (!strcmp(arg, diagnose_options[i].option_name)) {
+                       *diagnose = diagnose_options[i].mode;
+                       return 0;
+               }
+       }
+
+       return error(_("invalid --%s value '%s'"), opt->long_name, arg);
+}
+
+static void dir_file_stats_objects(const char *full_path, size_t full_path_len,
+                                  const char *file_name, void *data)
+{
+       struct strbuf *buf = data;
+       struct stat st;
+
+       if (!stat(full_path, &st))
+               strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name,
+                           (uintmax_t)st.st_size);
+}
+
+static int dir_file_stats(struct object_directory *object_dir, void *data)
+{
+       struct strbuf *buf = data;
+
+       strbuf_addf(buf, "Contents of %s:\n", object_dir->path);
+
+       for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects,
+                                 data);
+
+       return 0;
+}
+
+static int count_files(char *path)
+{
+       DIR *dir = opendir(path);
+       struct dirent *e;
+       int count = 0;
+
+       if (!dir)
+               return 0;
+
+       while ((e = readdir(dir)) != NULL)
+               if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG)
+                       count++;
+
+       closedir(dir);
+       return count;
+}
+
+static void loose_objs_stats(struct strbuf *buf, const char *path)
+{
+       DIR *dir = opendir(path);
+       struct dirent *e;
+       int count;
+       int total = 0;
+       unsigned char c;
+       struct strbuf count_path = STRBUF_INIT;
+       size_t base_path_len;
+
+       if (!dir)
+               return;
+
+       strbuf_addstr(buf, "Object directory stats for ");
+       strbuf_add_absolute_path(buf, path);
+       strbuf_addstr(buf, ":\n");
+
+       strbuf_add_absolute_path(&count_path, path);
+       strbuf_addch(&count_path, '/');
+       base_path_len = count_path.len;
+
+       while ((e = readdir(dir)) != NULL)
+               if (!is_dot_or_dotdot(e->d_name) &&
+                   e->d_type == DT_DIR && strlen(e->d_name) == 2 &&
+                   !hex_to_bytes(&c, e->d_name, 1)) {
+                       strbuf_setlen(&count_path, base_path_len);
+                       strbuf_addstr(&count_path, e->d_name);
+                       total += (count = count_files(count_path.buf));
+                       strbuf_addf(buf, "%s : %7d files\n", e->d_name, count);
+               }
+
+       strbuf_addf(buf, "Total: %d loose objects", total);
+
+       strbuf_release(&count_path);
+       closedir(dir);
+}
+
+static int add_directory_to_archiver(struct strvec *archiver_args,
+                                    const char *path, int recurse)
+{
+       int at_root = !*path;
+       DIR *dir;
+       struct dirent *e;
+       struct strbuf buf = STRBUF_INIT;
+       size_t len;
+       int res = 0;
+
+       dir = opendir(at_root ? "." : path);
+       if (!dir) {
+               if (errno == ENOENT) {
+                       warning(_("could not archive missing directory '%s'"), path);
+                       return 0;
+               }
+               return error_errno(_("could not open directory '%s'"), path);
+       }
+
+       if (!at_root)
+               strbuf_addf(&buf, "%s/", path);
+       len = buf.len;
+       strvec_pushf(archiver_args, "--prefix=%s", buf.buf);
+
+       while (!res && (e = readdir(dir))) {
+               if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+                       continue;
+
+               strbuf_setlen(&buf, len);
+               strbuf_addstr(&buf, e->d_name);
+
+               if (e->d_type == DT_REG)
+                       strvec_pushf(archiver_args, "--add-file=%s", buf.buf);
+               else if (e->d_type != DT_DIR)
+                       warning(_("skipping '%s', which is neither file nor "
+                                 "directory"), buf.buf);
+               else if (recurse &&
+                        add_directory_to_archiver(archiver_args,
+                                                  buf.buf, recurse) < 0)
+                       res = -1;
+       }
+
+       closedir(dir);
+       strbuf_release(&buf);
+       return res;
+}
+
+int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
+{
+       struct strvec archiver_args = STRVEC_INIT;
+       char **argv_copy = NULL;
+       int stdout_fd = -1, archiver_fd = -1;
+       struct strbuf buf = STRBUF_INIT;
+       int res, i;
+       struct archive_dir archive_dirs[] = {
+               { ".git", 0 },
+               { ".git/hooks", 0 },
+               { ".git/info", 0 },
+               { ".git/logs", 1 },
+               { ".git/objects/info", 0 }
+       };
+
+       if (mode == DIAGNOSE_NONE) {
+               res = 0;
+               goto diagnose_cleanup;
+       }
+
+       stdout_fd = dup(STDOUT_FILENO);
+       if (stdout_fd < 0) {
+               res = error_errno(_("could not duplicate stdout"));
+               goto diagnose_cleanup;
+       }
+
+       archiver_fd = xopen(zip_path->buf, O_CREAT | O_WRONLY | O_TRUNC, 0666);
+       if (dup2(archiver_fd, STDOUT_FILENO) < 0) {
+               res = error_errno(_("could not redirect output"));
+               goto diagnose_cleanup;
+       }
+
+       init_zip_archiver();
+       strvec_pushl(&archiver_args, "git-diagnose", "--format=zip", NULL);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "Collecting diagnostic info\n\n");
+       get_version_info(&buf, 1);
+
+       strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
+       get_disk_info(&buf);
+       write_or_die(stdout_fd, buf.buf, buf.len);
+       strvec_pushf(&archiver_args,
+                    "--add-virtual-file=diagnostics.log:%.*s",
+                    (int)buf.len, buf.buf);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:");
+       dir_file_stats(the_repository->objects->odb, &buf);
+       foreach_alt_odb(dir_file_stats, &buf);
+       strvec_push(&archiver_args, buf.buf);
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:");
+       loose_objs_stats(&buf, ".git/objects");
+       strvec_push(&archiver_args, buf.buf);
+
+       /* Only include this if explicitly requested */
+       if (mode == DIAGNOSE_ALL) {
+               for (i = 0; i < ARRAY_SIZE(archive_dirs); i++) {
+                       if (add_directory_to_archiver(&archiver_args,
+                                                     archive_dirs[i].path,
+                                                     archive_dirs[i].recursive)) {
+                               res = error_errno(_("could not add directory '%s' to archiver"),
+                                                 archive_dirs[i].path);
+                               goto diagnose_cleanup;
+                       }
+               }
+       }
+
+       strvec_pushl(&archiver_args, "--prefix=",
+                    oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
+
+       /* `write_archive()` modifies the `argv` passed to it. Let it. */
+       argv_copy = xmemdupz(archiver_args.v,
+                            sizeof(char *) * archiver_args.nr);
+       res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL,
+                           the_repository, NULL, 0);
+       if (res) {
+               error(_("failed to write archive"));
+               goto diagnose_cleanup;
+       }
+
+       fprintf(stderr, "\n"
+               "Diagnostics complete.\n"
+               "All of the gathered info is captured in '%s'\n",
+               zip_path->buf);
+
+diagnose_cleanup:
+       if (archiver_fd >= 0) {
+               dup2(stdout_fd, STDOUT_FILENO);
+               close(stdout_fd);
+               close(archiver_fd);
+       }
+       free(argv_copy);
+       strvec_clear(&archiver_args);
+       strbuf_release(&buf);
+
+       return res;
+}
diff --git a/diagnose.h b/diagnose.h
new file mode 100644 (file)
index 0000000..7a4951a
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef DIAGNOSE_H
+#define DIAGNOSE_H
+
+#include "strbuf.h"
+#include "parse-options.h"
+
+enum diagnose_mode {
+       DIAGNOSE_NONE,
+       DIAGNOSE_STATS,
+       DIAGNOSE_ALL
+};
+
+int option_parse_diagnose(const struct option *opt, const char *arg, int unset);
+
+int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode);
+
+#endif /* DIAGNOSE_H */
index 7eb66a417aa08daa6f22753eba5c52f32361dfc8..2edea41a2345af623017d43e14a74d71a32bb873 100644 (file)
@@ -466,6 +466,11 @@ static void do_oneway_diff(struct unpack_trees_options *o,
         * Something removed from the tree?
         */
        if (!idx) {
+               if (S_ISSPARSEDIR(tree->ce_mode)) {
+                       diff_tree_oid(&tree->oid, NULL, tree->name, &revs->diffopt);
+                       return;
+               }
+
                diff_index_show_file(revs, "-", tree, &tree->oid, 1,
                                     tree->ce_mode, 0);
                return;
index b3296ce7d15140bff12299b25d1450f69f8508ee..b2004437dceb3994b19bdb5a7ea8d13b595502f8 100644 (file)
@@ -56,7 +56,6 @@ const char *askpass_program;
 const char *excludes_file;
 enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
 int read_replace_refs = 1;
-char *git_replace_ref_base;
 enum eol core_eol = EOL_UNSET;
 int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
 char *check_roundtrip_encoding = "SHIFT-JIS";
@@ -162,6 +161,7 @@ const char *getenv_safe(struct strvec *argv, const char *name)
 
 void setup_git_env(const char *git_dir)
 {
+       char *git_replace_ref_base;
        const char *shallow_file;
        const char *replace_ref_base;
        struct set_gitdir_args args = { NULL };
@@ -182,9 +182,10 @@ void setup_git_env(const char *git_dir)
        if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT))
                read_replace_refs = 0;
        replace_ref_base = getenv(GIT_REPLACE_REF_BASE_ENVIRONMENT);
-       free(git_replace_ref_base);
        git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
                                                          : "refs/replace/");
+       update_ref_namespace(NAMESPACE_REPLACE, git_replace_ref_base);
+
        free(git_namespace);
        git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
        shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
index d35be4177b107768d6af30c5260f16ddc8cbb5a0..a1a508ee72e2f6f58ed4b61430bf2a6d61c00651 100644 (file)
@@ -323,6 +323,7 @@ static int find_common(struct fetch_negotiator *negotiator,
 {
        int fetching;
        int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
+       int negotiation_round = 0, haves = 0;
        const struct object_id *oid;
        unsigned in_vain = 0;
        int got_continue = 0;
@@ -461,9 +462,19 @@ static int find_common(struct fetch_negotiator *negotiator,
                packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
                print_verbose(args, "have %s", oid_to_hex(oid));
                in_vain++;
+               haves++;
                if (flush_at <= ++count) {
                        int ack;
 
+                       negotiation_round++;
+                       trace2_region_enter_printf("negotiation_v0_v1", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
+                       trace2_data_intmax("negotiation_v0_v1", the_repository,
+                                          "haves_added", haves);
+                       trace2_data_intmax("negotiation_v0_v1", the_repository,
+                                          "in_vain", in_vain);
+                       haves = 0;
                        packet_buf_flush(&req_buf);
                        send_request(args, fd[1], &req_buf);
                        strbuf_setlen(&req_buf, state_len);
@@ -485,6 +496,9 @@ static int find_common(struct fetch_negotiator *negotiator,
                                                      ack, oid_to_hex(result_oid));
                                switch (ack) {
                                case ACK:
+                                       trace2_region_leave_printf("negotiation_v0_v1", "round",
+                                                                  the_repository, "%d",
+                                                                  negotiation_round);
                                        flushes = 0;
                                        multi_ack = 0;
                                        retval = 0;
@@ -510,6 +524,7 @@ static int find_common(struct fetch_negotiator *negotiator,
                                                const char *hex = oid_to_hex(result_oid);
                                                packet_buf_write(&req_buf, "have %s\n", hex);
                                                state_len = req_buf.len;
+                                               haves++;
                                                /*
                                                 * Reset in_vain because an ack
                                                 * for this commit has not been
@@ -528,6 +543,9 @@ static int find_common(struct fetch_negotiator *negotiator,
                                }
                        } while (ack);
                        flushes--;
+                       trace2_region_leave_printf("negotiation_v0_v1", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (got_continue && MAX_IN_VAIN < in_vain) {
                                print_verbose(args, _("giving up"));
                                break; /* give up */
@@ -538,6 +556,8 @@ static int find_common(struct fetch_negotiator *negotiator,
        }
 done:
        trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository);
+       trace2_data_intmax("negotiation_v0_v1", the_repository, "total_rounds",
+                          negotiation_round);
        if (!got_ready || !no_done) {
                packet_buf_write(&req_buf, "done\n");
                send_request(args, fd[1], &req_buf);
@@ -1381,6 +1401,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
 
        haves_added = add_haves(negotiator, &req_buf, haves_to_send);
        *in_vain += haves_added;
+       trace2_data_intmax("negotiation_v2", the_repository, "haves_added", haves_added);
+       trace2_data_intmax("negotiation_v2", the_repository, "in_vain", *in_vain);
        if (!haves_added || (seen_ack && *in_vain >= MAX_IN_VAIN)) {
                /* Send Done */
                packet_buf_write(&req_buf, "done\n");
@@ -1623,6 +1645,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
        struct oidset common = OIDSET_INIT;
        struct packet_reader reader;
        int in_vain = 0, negotiation_started = 0;
+       int negotiation_round = 0;
        int haves_to_send = INITIAL_FLUSH;
        struct fetch_negotiator negotiator_alloc;
        struct fetch_negotiator *negotiator;
@@ -1679,12 +1702,20 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                                                    "negotiation_v2",
                                                    the_repository);
                        }
+                       negotiation_round++;
+                       trace2_region_enter_printf("negotiation_v2", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (send_fetch_request(negotiator, fd[1], args, ref,
                                               &common,
                                               &haves_to_send, &in_vain,
                                               reader.use_sideband,
-                                              seen_ack))
+                                              seen_ack)) {
+                               trace2_region_leave_printf("negotiation_v2", "round",
+                                                          the_repository, "%d",
+                                                          negotiation_round);
                                state = FETCH_GET_PACK;
+                       }
                        else
                                state = FETCH_PROCESS_ACKS;
                        break;
@@ -1697,6 +1728,9 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                                seen_ack = 1;
                                oidset_insert(&common, &common_oid);
                        }
+                       trace2_region_leave_printf("negotiation_v2", "round",
+                                                  the_repository, "%d",
+                                                  negotiation_round);
                        if (received_ready) {
                                /*
                                 * Don't check for response delimiter; get_pack() will
@@ -1712,6 +1746,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
                        trace2_region_leave("fetch-pack",
                                            "negotiation_v2",
                                            the_repository);
+                       trace2_data_intmax("negotiation_v2", the_repository,
+                                          "total_rounds", negotiation_round);
                        /* Check for shallow-info section */
                        if (process_section_header(&reader, "shallow-info", 1))
                                receive_shallow_info(args, &reader, shallows, si);
@@ -2091,6 +2127,7 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
        int in_vain = 0;
        int seen_ack = 0;
        int last_iteration = 0;
+       int negotiation_round = 0;
        timestamp_t min_generation = GENERATION_NUMBER_INFINITY;
 
        fetch_negotiator_init(the_repository, &negotiator);
@@ -2104,11 +2141,17 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                           add_to_object_array,
                           &nt_object_array);
 
+       trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository);
        while (!last_iteration) {
                int haves_added;
                struct object_id common_oid;
                int received_ready = 0;
 
+               negotiation_round++;
+
+               trace2_region_enter_printf("negotiate_using_fetch", "round",
+                                          the_repository, "%d",
+                                          negotiation_round);
                strbuf_reset(&req_buf);
                write_fetch_command_and_capabilities(&req_buf, server_options);
 
@@ -2119,6 +2162,11 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                if (!haves_added || (seen_ack && in_vain >= MAX_IN_VAIN))
                        last_iteration = 1;
 
+               trace2_data_intmax("negotiate_using_fetch", the_repository,
+                                  "haves_added", haves_added);
+               trace2_data_intmax("negotiate_using_fetch", the_repository,
+                                  "in_vain", in_vain);
+
                /* Send request */
                packet_buf_flush(&req_buf);
                if (write_in_full(fd[1], req_buf.buf, req_buf.len) < 0)
@@ -2151,7 +2199,13 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips,
                                                 REACH_SCRATCH, 0,
                                                 min_generation))
                        last_iteration = 1;
+               trace2_region_leave_printf("negotiation", "round",
+                                          the_repository, "%d",
+                                          negotiation_round);
        }
+       trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository);
+       trace2_data_intmax("negotiate_using_fetch", the_repository,
+                          "total_rounds", negotiation_round);
        clear_common_flag(acked_commits);
        strbuf_release(&req_buf);
 }
diff --git a/fsck.c b/fsck.c
index dd4822ba1be7fe2de830613757110e6d55bfb566..b3da1d68c0b15d8066a5d44ecdfd24cde9fa1ade 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -308,7 +308,7 @@ static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *op
                return -1;
 
        name = fsck_get_object_name(options, &tree->object.oid);
-       if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+       if (init_tree_desc_gently(&desc, tree->buffer, tree->size, 0))
                return -1;
        while (tree_entry_gently(&desc, &entry)) {
                struct object *obj;
@@ -578,7 +578,7 @@ static int fsck_tree(const struct object_id *tree_oid,
        const char *o_name;
        struct name_stack df_dup_candidates = { NULL };
 
-       if (init_tree_desc_gently(&desc, buffer, size)) {
+       if (init_tree_desc_gently(&desc, buffer, size, TREE_DESC_RAW_MODES)) {
                retval += report(options, tree_oid, OBJ_TREE,
                                 FSCK_MSG_BAD_TREE,
                                 "cannot be parsed as a tree");
diff --git a/fsck.h b/fsck.h
index d07f7a2459e8a264fe8cb2b6457d0ea729bb9e49..6f801e53b1d70ea9e522babbb119b913a7b32b62 100644 (file)
--- a/fsck.h
+++ b/fsck.h
@@ -56,7 +56,6 @@ enum fsck_msg_type {
        FUNC(GITMODULES_PATH, ERROR) \
        FUNC(GITMODULES_UPDATE, ERROR) \
        /* warnings */ \
-       FUNC(BAD_FILEMODE, WARN) \
        FUNC(EMPTY_NAME, WARN) \
        FUNC(FULL_PATHNAME, WARN) \
        FUNC(HAS_DOT, WARN) \
@@ -66,6 +65,7 @@ enum fsck_msg_type {
        FUNC(ZERO_PADDED_FILEMODE, WARN) \
        FUNC(NUL_IN_COMMIT, WARN) \
        /* infos (reported as warnings, but ignored by default) */ \
+       FUNC(BAD_FILEMODE, INFO) \
        FUNC(GITMODULES_PARSE, INFO) \
        FUNC(GITIGNORE_SYMLINK, INFO) \
        FUNC(GITATTRIBUTES_SYMLINK, INFO) \
index 36a25ae252f24d23852c5cdd810936df1c23558f..4e51a1c48bc007c9f65c9750bc55933064981270 100644 (file)
@@ -261,6 +261,7 @@ static inline int is_xplatform_dir_sep(int c)
 #include <sys/resource.h>
 #include <sys/socket.h>
 #include <sys/ioctl.h>
+#include <sys/statvfs.h>
 #include <termios.h>
 #ifndef NO_SYS_SELECT_H
 #include <sys/select.h>
@@ -998,6 +999,28 @@ static inline unsigned long cast_size_t_to_ulong(size_t a)
        return (unsigned long)a;
 }
 
+/*
+ * Limit size of IO chunks, because huge chunks only cause pain.  OS X
+ * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
+ * the absence of bugs, large chunks can result in bad latencies when
+ * you decide to kill the process.
+ *
+ * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX
+ * that is smaller than that, clip it to SSIZE_MAX, as a call to
+ * read(2) or write(2) larger than that is allowed to fail.  As the last
+ * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value"
+ * to override this, if the definition of SSIZE_MAX given by the platform
+ * is broken.
+ */
+#ifndef MAX_IO_SIZE
+# define MAX_IO_SIZE_DEFAULT (8*1024*1024)
+# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT)
+#  define MAX_IO_SIZE SSIZE_MAX
+# else
+#  define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT
+# endif
+#endif
+
 #ifdef HAVE_ALLOCA_H
 # include <alloca.h>
 # define xalloca(size)      (alloca(size))
diff --git a/git.c b/git.c
index e5d62fa5a92e95e1ede041ebf913d841744c31f8..0b9d8ef76776f3a1a011962b33b37d80fd21c4b2 100644 (file)
--- a/git.c
+++ b/git.c
@@ -522,6 +522,7 @@ static struct cmd_struct commands[] = {
        { "credential-cache--daemon", cmd_credential_cache_daemon },
        { "credential-store", cmd_credential_store },
        { "describe", cmd_describe, RUN_SETUP },
+       { "diagnose", cmd_diagnose, RUN_SETUP_GENTLY },
        { "diff", cmd_diff, NO_PARSEOPT },
        { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
        { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
index d0ac0a6327a18f5eeee6fc43f036c9a2618fd672..bb6cbceee63ecb5cc4dfa5591366eb25b8d04bd1 100644 (file)
@@ -137,10 +137,12 @@ static int ref_filter_match(const char *refname,
 static int add_ref_decoration(const char *refname, const struct object_id *oid,
                              int flags, void *cb_data)
 {
+       int i;
        struct object *obj;
        enum object_type objtype;
        enum decoration_type deco_type = DECORATION_NONE;
        struct decoration_filter *filter = (struct decoration_filter *)cb_data;
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
 
        if (filter && !ref_filter_match(refname, filter))
                return 0;
@@ -165,16 +167,21 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
                return 0;
        obj = lookup_object_by_type(the_repository, oid, objtype);
 
-       if (starts_with(refname, "refs/heads/"))
-               deco_type = DECORATION_REF_LOCAL;
-       else if (starts_with(refname, "refs/remotes/"))
-               deco_type = DECORATION_REF_REMOTE;
-       else if (starts_with(refname, "refs/tags/"))
-               deco_type = DECORATION_REF_TAG;
-       else if (!strcmp(refname, "refs/stash"))
-               deco_type = DECORATION_REF_STASH;
-       else if (!strcmp(refname, "HEAD"))
-               deco_type = DECORATION_REF_HEAD;
+       for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) {
+               struct ref_namespace_info *info = &ref_namespace[i];
+
+               if (!info->decoration)
+                       continue;
+               if (info->exact) {
+                       if (!strcmp(refname, info->ref)) {
+                               deco_type = info->decoration;
+                               break;
+                       }
+               } else if (starts_with(refname, info->ref)) {
+                       deco_type = info->decoration;
+                       break;
+               }
+       }
 
        add_name_decoration(deco_type, refname, obj);
        while (obj->type == OBJ_TAG) {
index 7d105be275002de66cc80b769c1d719c0f91144e..e634a7624af983b742ab4c87d7cc30177e7b616f 100644 (file)
@@ -387,8 +387,24 @@ struct merge_options_internal {
 
        /* call_depth: recursion level counter for merging merge bases */
        int call_depth;
+
+       /* field that holds submodule conflict information */
+       struct string_list conflicted_submodules;
+};
+
+struct conflicted_submodule_item {
+       char *abbrev;
+       int flag;
 };
 
+static void conflicted_submodule_item_free(void *util, const char *str)
+{
+       struct conflicted_submodule_item *item = util;
+
+       free(item->abbrev);
+       free(item);
+}
+
 struct version_info {
        struct object_id oid;
        unsigned short mode;
@@ -517,6 +533,7 @@ enum conflict_and_info_types {
        CONFLICT_SUBMODULE_NOT_INITIALIZED,
        CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE,
        CONFLICT_SUBMODULE_MAY_HAVE_REWINDS,
+       CONFLICT_SUBMODULE_NULL_MERGE_BASE,
 
        /* Keep this entry _last_ in the list */
        NB_CONFLICT_TYPES,
@@ -570,6 +587,8 @@ static const char *type_short_descriptions[] = {
                "CONFLICT (submodule history not available)",
        [CONFLICT_SUBMODULE_MAY_HAVE_REWINDS] =
                "CONFLICT (submodule may have rewinds)",
+       [CONFLICT_SUBMODULE_NULL_MERGE_BASE] =
+               "CONFLICT (submodule lacks merge base)"
 };
 
 struct logical_conflict_info {
@@ -686,6 +705,9 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
 
        mem_pool_discard(&opti->pool, 0);
 
+       string_list_clear_func(&opti->conflicted_submodules,
+                                       conflicted_submodule_item_free);
+
        /* Clean out callback_data as well. */
        FREE_AND_NULL(renames->callback_data);
        renames->callback_data_nr = renames->callback_data_alloc = 0;
@@ -1744,24 +1766,32 @@ static int merge_submodule(struct merge_options *opt,
 
        int i;
        int search = !opt->priv->call_depth;
+       int sub_not_initialized = 1;
+       int sub_flag = CONFLICT_SUBMODULE_FAILED_TO_MERGE;
 
        /* store fallback answer in result in case we fail */
        oidcpy(result, opt->priv->call_depth ? o : a);
 
        /* we can not handle deletion conflicts */
-       if (is_null_oid(o))
-               return 0;
-       if (is_null_oid(a))
-               return 0;
-       if (is_null_oid(b))
-               return 0;
+       if (is_null_oid(a) || is_null_oid(b))
+               BUG("submodule deleted on one side; this should be handled outside of merge_submodule()");
 
-       if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) {
+       if ((sub_not_initialized = repo_submodule_init(&subrepo,
+               opt->repo, path, null_oid()))) {
                path_msg(opt, CONFLICT_SUBMODULE_NOT_INITIALIZED, 0,
                         path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s (not checked out)"),
                         path);
-               return 0;
+               sub_flag = CONFLICT_SUBMODULE_NOT_INITIALIZED;
+               goto cleanup;
+       }
+
+       if (is_null_oid(o)) {
+               path_msg(opt, CONFLICT_SUBMODULE_NULL_MERGE_BASE, 0,
+                        path, NULL, NULL, NULL,
+                        _("Failed to merge submodule %s (no merge base)"),
+                        path);
+               goto cleanup;
        }
 
        if (!(commit_o = lookup_commit_reference(&subrepo, o)) ||
@@ -1771,6 +1801,7 @@ static int merge_submodule(struct merge_options *opt,
                         path, NULL, NULL, NULL,
                         _("Failed to merge submodule %s (commits not present)"),
                         path);
+               sub_flag = CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE;
                goto cleanup;
        }
 
@@ -1849,7 +1880,23 @@ static int merge_submodule(struct merge_options *opt,
 
        object_array_clear(&merges);
 cleanup:
-       repo_clear(&subrepo);
+       if (!opt->priv->call_depth && !ret) {
+               struct string_list *csub = &opt->priv->conflicted_submodules;
+               struct conflicted_submodule_item *util;
+               const char *abbrev;
+
+               util = xmalloc(sizeof(*util));
+               util->flag = sub_flag;
+               util->abbrev = NULL;
+               if (!sub_not_initialized) {
+                       abbrev = repo_find_unique_abbrev(&subrepo, b, DEFAULT_ABBREV);
+                       util->abbrev = xstrdup(abbrev);
+               }
+               string_list_append(csub, path)->util = util;
+       }
+
+       if (!sub_not_initialized)
+               repo_clear(&subrepo);
        return ret;
 }
 
@@ -4434,6 +4481,63 @@ static int record_conflicted_index_entries(struct merge_options *opt)
        return errs;
 }
 
+static void print_submodule_conflict_suggestion(struct string_list *csub) {
+       struct string_list_item *item;
+       struct strbuf msg = STRBUF_INIT;
+       struct strbuf tmp = STRBUF_INIT;
+       struct strbuf subs = STRBUF_INIT;
+
+       if (!csub->nr)
+               return;
+
+       strbuf_add_separated_string_list(&subs, " ", csub);
+       for_each_string_list_item(item, csub) {
+               struct conflicted_submodule_item *util = item->util;
+
+               /*
+                * NEEDSWORK: The steps to resolve these errors deserve a more
+                * detailed explanation than what is currently printed below.
+                */
+               if (util->flag == CONFLICT_SUBMODULE_NOT_INITIALIZED ||
+                   util->flag == CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE)
+                       continue;
+
+               /*
+                * TRANSLATORS: This is a line of advice to resolve a merge
+                * conflict in a submodule. The first argument is the submodule
+                * name, and the second argument is the abbreviated id of the
+                * commit that needs to be merged.  For example:
+                *  - go to submodule (mysubmodule), and either merge commit abc1234"
+                */
+               strbuf_addf(&tmp, _(" - go to submodule (%s), and either merge commit %s\n"
+                                   "   or update to an existing commit which has merged those changes\n"),
+                           item->string, util->abbrev);
+       }
+
+       /*
+        * TRANSLATORS: This is a detailed message for resolving submodule
+        * conflicts.  The first argument is string containing one step per
+        * submodule.  The second is a space-separated list of submodule names.
+        */
+       strbuf_addf(&msg,
+                   _("Recursive merging with submodules currently only supports trivial cases.\n"
+                     "Please manually handle the merging of each conflicted submodule.\n"
+                     "This can be accomplished with the following steps:\n"
+                     "%s"
+                     " - come back to superproject and run:\n\n"
+                     "      git add %s\n\n"
+                     "   to record the above merge or update\n"
+                     " - resolve any other conflicts in the superproject\n"
+                     " - commit the resulting index in the superproject\n"),
+                   tmp.buf, subs.buf);
+
+       printf("%s", msg.buf);
+
+       strbuf_release(&subs);
+       strbuf_release(&tmp);
+       strbuf_release(&msg);
+}
+
 void merge_display_update_messages(struct merge_options *opt,
                                   int detailed,
                                   struct merge_result *result)
@@ -4483,6 +4587,8 @@ void merge_display_update_messages(struct merge_options *opt,
        }
        string_list_clear(&olist, 0);
 
+       print_submodule_conflict_suggestion(&opti->conflicted_submodules);
+
        /* Also include needed rename limit adjustment now */
        diff_warn_rename_limit("merge.renamelimit",
                               opti->renames.needed_limit, 0);
@@ -4684,6 +4790,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
        trace2_region_enter("merge", "allocate/init", opt->repo);
        if (opt->priv) {
                clear_or_reinit_internal_opts(opt->priv, 1);
+               string_list_init_nodup(&opt->priv->conflicted_submodules);
                trace2_region_leave("merge", "allocate/init", opt->repo);
                return;
        }
index f770b8fe244c8ddd9dc349125a8e8e0ac0325574..06937acbf5497115c9ba4d9a2377634b1885db7e 100644 (file)
@@ -29,8 +29,8 @@
 ################################################################################
 
 debug_print () {
-       # Send message to stderr if global variable GIT_MERGETOOL_VIMDIFF is set
-       # to "true"
+       # Send message to stderr if global variable GIT_MERGETOOL_VIMDIFF_DEBUG
+       # is set.
 
        if test -n "$GIT_MERGETOOL_VIMDIFF_DEBUG"
        then
@@ -66,11 +66,6 @@ gen_cmd_aux () {
        debug_print "LAYOUT    : $LAYOUT"
        debug_print "CMD       : $CMD"
 
-       if test -z "$CMD"
-       then
-               CMD="echo" # vim "nop" operator
-       fi
-
        start=0
        end=${#LAYOUT}
 
@@ -144,11 +139,10 @@ gen_cmd_aux () {
 
        # Step 2:
        #
-       # Search for all valid separators ("+", "/" or ",") which are *not*
+       # Search for all valid separators ("/" or ",") which are *not*
        # inside parenthesis. Save the index at which each of them makes the
        # first appearance.
 
-       index_new_tab=""
        index_horizontal_split=""
        index_vertical_split=""
 
@@ -182,14 +176,7 @@ gen_cmd_aux () {
                then
                        current=$c
 
-                       if test "$current" = "+"
-                       then
-                               if test -z "$index_new_tab"
-                               then
-                                       index_new_tab=$i
-                               fi
-
-                       elif test "$current" = "/"
+                       if test "$current" = "/"
                        then
                                if test -z "$index_horizontal_split"
                                then
@@ -219,14 +206,7 @@ gen_cmd_aux () {
 
        terminate="false"
 
-       if ! test -z "$index_new_tab"
-       then
-               before="-tabnew"
-               after="tabnext"
-               index=$index_new_tab
-               terminate="true"
-
-       elif ! test -z "$index_horizontal_split"
+       if ! test -z "$index_horizontal_split"
        then
                before="leftabove split"
                after="wincmd j"
@@ -333,25 +313,31 @@ gen_cmd () {
 
        # Obtain the first part of vim "-c" option to obtain the desired layout
 
-       CMD=$(gen_cmd_aux "$LAYOUT")
-
-
-       # Adjust the just obtained script depending on whether more than one
-       # windows are visible or not
+       CMD=
+       oldIFS=$IFS
+       IFS=+
+       for tab in $LAYOUT
+       do
+               if test -z "$CMD"
+               then
+                       CMD="echo" # vim "nop" operator
+               else
+                       CMD="$CMD | tabnew"
+               fi
 
-       if echo "$LAYOUT" | grep ",\|/" >/dev/null
-       then
-               CMD="$CMD | tabdo windo diffthis"
-       else
-               CMD="$CMD | bufdo diffthis"
-       fi
+               # If this is a single window diff with all the buffers
+               if ! echo "$tab" | grep ",\|/" >/dev/null
+               then
+                       CMD="$CMD | silent execute 'bufdo diffthis'"
+               fi
 
+               CMD=$(gen_cmd_aux "$tab" "$CMD")
+       done
+       IFS=$oldIFS
 
-       # Add an extra "-c" option to move to the first tab (notice that we
-       # can't simply append the command to the previous "-c" string as
-       # explained here: https://github.com/vim/vim/issues/9076
+       CMD="$CMD | execute 'tabdo windo diffthis'"
 
-       FINAL_CMD="-c \"$CMD\" -c \"tabfirst\""
+       FINAL_CMD="-c \"set hidden diffopt-=hiddenoff | $CMD | tabfirst\""
 }
 
 
@@ -555,22 +541,22 @@ run_unit_tests () {
        TEST_CASE_15="  ((  (LOCAL , BASE , REMOTE) / MERGED))   +(BASE)   , LOCAL+ BASE , REMOTE+ (((LOCAL / BASE / REMOTE)) ,    MERGED   )  "
        TEST_CASE_16="LOCAL,BASE,REMOTE / MERGED + BASE,LOCAL + BASE,REMOTE + (LOCAL / BASE / REMOTE),MERGED"
 
-       EXPECTED_CMD_01="-c \"echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_02="-c \"echo | leftabove vertical split | 1b | wincmd l | 3b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_03="-c \"echo | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 4b | wincmd l | 3b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_04="-c \"echo | 4b | bufdo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_05="-c \"echo | leftabove split | 1b | wincmd j | leftabove split | 4b | wincmd j | 3b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_06="-c \"echo | leftabove vertical split | leftabove split | 1b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_07="-c \"echo | leftabove vertical split | 4b | wincmd l | leftabove split | 1b | wincmd j | 3b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_08="-c \"echo | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_09="-c \"echo | leftabove split | 4b | wincmd j | leftabove vertical split | 1b | wincmd l | 3b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_10="-c \"echo | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_11="-c \"echo | -tabnew | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnext | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_12="-c \"echo | leftabove vertical split | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 2b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_13="-c \"echo | leftabove vertical split | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 2b | wincmd l | leftabove vertical split | leftabove split | 1b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_14="-c \"echo | -tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnext | leftabove vertical split | 2b | wincmd l | 1b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_15="-c \"echo | -tabnew | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnext | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
-       EXPECTED_CMD_16="-c \"echo | -tabnew | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnext | -tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnext | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | tabdo windo diffthis\" -c \"tabfirst\""
+       EXPECTED_CMD_01="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_02="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | 1b | wincmd l | 3b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_03="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 4b | wincmd l | 3b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_04="-c \"set hidden diffopt-=hiddenoff | echo | silent execute 'bufdo diffthis' | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_05="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | 1b | wincmd j | leftabove split | 4b | wincmd j | 3b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_06="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | leftabove split | 1b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_07="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | 4b | wincmd l | leftabove split | 1b | wincmd j | 3b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_08="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_09="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | 4b | wincmd j | leftabove vertical split | 1b | wincmd l | 3b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_10="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_11="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnew | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_12="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 2b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_13="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | leftabove split | leftabove vertical split | 1b | wincmd l | 3b | wincmd j | 2b | wincmd l | leftabove vertical split | leftabove split | 1b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_14="-c \"set hidden diffopt-=hiddenoff | echo | leftabove vertical split | 2b | wincmd l | 3b | tabnew | leftabove vertical split | 2b | wincmd l | 1b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_15="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnew | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
+       EXPECTED_CMD_16="-c \"set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | 2b | wincmd l | 3b | wincmd j | 4b | tabnew | leftabove vertical split | 2b | wincmd l | 1b | tabnew | leftabove vertical split | 2b | wincmd l | 3b | tabnew | leftabove vertical split | leftabove split | 1b | wincmd j | leftabove split | 2b | wincmd j | 3b | wincmd l | 4b | execute 'tabdo windo diffthis' | tabfirst\""
 
        EXPECTED_TARGET_01="MERGED"
        EXPECTED_TARGET_02="LOCAL"
@@ -635,9 +621,7 @@ run_unit_tests () {
        cat >expect <<-\EOF
        -f
        -c
-       echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | quit | wincmd l | 2b | wincmd j | 3b | tabdo windo diffthis
-       -c
-       tabfirst
+       set hidden diffopt-=hiddenoff | echo | leftabove split | leftabove vertical split | 1b | wincmd l | leftabove vertical split | quit | wincmd l | 2b | wincmd j | 3b | execute 'tabdo windo diffthis' | tabfirst
        lo cal
        ' '
        mer ged
diff --git a/notes.c b/notes.c
index 7452e71cc8dd289c7ace9361a6c6e090b8b113f9..7bade6d8f69ca256a51436ec9a04d55d93528c7b 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -1005,6 +1005,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
 
        if (!notes_ref)
                notes_ref = default_notes_ref();
+       update_ref_namespace(NAMESPACE_NOTES, xstrdup(notes_ref));
 
        if (!combine_notes)
                combine_notes = combine_notes_concatenate;
index a2219464c2b31043692fb9c0d224fdf0b1c2c253..9caef89f1f0a59be4921ced5acf1377fee1bad5d 100644 (file)
--- a/object.h
+++ b/object.h
@@ -59,7 +59,7 @@ struct object_array {
 
 /*
  * object flag allocation:
- * revision.h:               0---------10         15             23------26
+ * revision.h:               0---------10         15             23------27
  * fetch-pack.c:             01    67
  * negotiator/default.c:       2--5
  * walker.c:                 0-2
index 6b0eb9048ecc3e04f3fc839a3d687866a6f0b910..a41887c94458669bda7285f50711b90ec53855bb 100644 (file)
@@ -2217,7 +2217,17 @@ static int add_promisor_object(const struct object_id *oid,
                               void *set_)
 {
        struct oidset *set = set_;
-       struct object *obj = parse_object(the_repository, oid);
+       struct object *obj;
+       int we_parsed_object;
+
+       obj = lookup_object(the_repository, oid);
+       if (obj && obj->parsed) {
+               we_parsed_object = 0;
+       } else {
+               we_parsed_object = 1;
+               obj = parse_object(the_repository, oid);
+       }
+
        if (!obj)
                return 1;
 
@@ -2231,7 +2241,7 @@ static int add_promisor_object(const struct object_id *oid,
                struct tree *tree = (struct tree *)obj;
                struct tree_desc desc;
                struct name_entry entry;
-               if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+               if (init_tree_desc_gently(&desc, tree->buffer, tree->size, 0))
                        /*
                         * Error messages are given when packs are
                         * verified, so do not print any here.
@@ -2239,7 +2249,8 @@ static int add_promisor_object(const struct object_id *oid,
                        return 0;
                while (tree_entry_gently(&desc, &entry))
                        oidset_insert(set, &entry.oid);
-               free_tree_buffer(tree);
+               if (we_parsed_object)
+                       free_tree_buffer(tree);
        } else if (obj->type == OBJ_COMMIT) {
                struct commit *commit = (struct commit *) obj;
                struct commit_list *parents = commit->parents;
index 84ad9c73cfb50a9fd34eed3b5e68fae522c796fe..46e77a85fee9d86e6e16e9f466ecfc3db893fd16 100644 (file)
@@ -759,3 +759,92 @@ int match_pathspec_attrs(struct index_state *istate,
 
        return 1;
 }
+
+int pathspec_needs_expanded_index(struct index_state *istate,
+                                 const struct pathspec *pathspec)
+{
+       unsigned int i, pos;
+       int res = 0;
+       char *skip_worktree_seen = NULL;
+
+       /*
+        * If index is not sparse, no index expansion is needed.
+        */
+       if (!istate->sparse_index)
+               return 0;
+
+       /*
+        * When using a magic pathspec, assume for the sake of simplicity that
+        * the index needs to be expanded to match all matchable files.
+        */
+       if (pathspec->magic)
+               return 1;
+
+       for (i = 0; i < pathspec->nr; i++) {
+               struct pathspec_item item = pathspec->items[i];
+
+               /*
+                * If the pathspec item has a wildcard, the index should be expanded
+                * if the pathspec has the possibility of matching a subset of entries inside
+                * of a sparse directory (but not the entire directory).
+                *
+                * If the pathspec item is a literal path, the index only needs to be expanded
+                * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't
+                * expand for in-cone files) and b) it doesn't match any sparse directories
+                * (since we can reset whole sparse directories without expanding them).
+                */
+               if (item.nowildcard_len < item.len) {
+                       /*
+                        * Special case: if the pattern is a path inside the cone
+                        * followed by only wildcards, the pattern cannot match
+                        * partial sparse directories, so we know we don't need to
+                        * expand the index.
+                        *
+                        * Examples:
+                        * - in-cone/foo***: doesn't need expanded index
+                        * - not-in-cone/bar*: may need expanded index
+                        * - **.c: may need expanded index
+                        */
+                       if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len &&
+                           path_in_cone_mode_sparse_checkout(item.original, istate))
+                               continue;
+
+                       for (pos = 0; pos < istate->cache_nr; pos++) {
+                               struct cache_entry *ce = istate->cache[pos];
+
+                               if (!S_ISSPARSEDIR(ce->ce_mode))
+                                       continue;
+
+                               /*
+                                * If the pre-wildcard length is longer than the sparse
+                                * directory name and the sparse directory is the first
+                                * component of the pathspec, need to expand the index.
+                                */
+                               if (item.nowildcard_len > ce_namelen(ce) &&
+                                   !strncmp(item.original, ce->name, ce_namelen(ce))) {
+                                       res = 1;
+                                       break;
+                               }
+
+                               /*
+                                * If the pre-wildcard length is shorter than the sparse
+                                * directory and the pathspec does not match the whole
+                                * directory, need to expand the index.
+                                */
+                               if (!strncmp(item.original, ce->name, item.nowildcard_len) &&
+                                   wildmatch(item.original, ce->name, 0)) {
+                                       res = 1;
+                                       break;
+                               }
+                       }
+               } else if (!path_in_cone_mode_sparse_checkout(item.original, istate) &&
+                          !matches_skip_worktree(pathspec, i, &skip_worktree_seen))
+                       res = 1;
+
+               if (res > 0)
+                       break;
+       }
+
+       free(skip_worktree_seen);
+       return res;
+}
index 402ebb808081e386da46786fe1d5a1521e3cc4ac..41f6adfbb421fee745e3717ffd57ff92acfc82b1 100644 (file)
@@ -171,4 +171,16 @@ int match_pathspec_attrs(struct index_state *istate,
                         const char *name, int namelen,
                         const struct pathspec_item *item);
 
+/*
+ * Determine whether a pathspec will match only entire index entries (non-sparse
+ * files and/or entire sparse directories). If the pathspec has the potential to
+ * match partial contents of a sparse directory, return 1 to indicate the index
+ * should be expanded to match the  appropriate index entries.
+ *
+ * For the sake of simplicity, always return 1 if using a more complex "magic"
+ * pathspec.
+ */
+int pathspec_needs_expanded_index(struct index_state *istate,
+                                 const struct pathspec *pathspec);
+
 #endif /* PATHSPEC_H */
index 8e43c2def4ca4fa470b7a318ffa8a060b5c39838..ce4e73b6833a48119dfc5f36b64daa8e962dc9c4 100644 (file)
@@ -309,7 +309,8 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out)
        return err;
 }
 
-int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out)
+int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len,
+                                            int fd_out, int *packet_counter)
 {
        int err = 0;
        size_t bytes_written = 0;
@@ -324,6 +325,8 @@ int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_ou
                        break;
                err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
                bytes_written += bytes_to_write;
+               if (packet_counter)
+                       (*packet_counter)++;
        }
        return err;
 }
index 1f623de60a856944b8c1b3e844228e36c8c07162..79c538b99e477660fb8819d7fd0175daf4520cc5 100644 (file)
@@ -32,7 +32,13 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f
 int packet_flush_gently(int fd);
 int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
-int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
+int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len,
+                                            int fd_out, int *packet_counter);
+static inline int write_packetized_from_buf_no_flush(const char *src_in,
+                                                    size_t len, int fd_out)
+{
+       return write_packetized_from_buf_no_flush_count(src_in, len, fd_out, NULL);
+}
 
 /*
  * Stdio versions of packet_write functions. When mixing these with fd
index 4de207752dc217d0b8c43b373f453c8d846d6173..b09128b188431857450336481898f27f8894a241 100644 (file)
@@ -620,6 +620,11 @@ int index_name_pos(struct index_state *istate, const char *name, int namelen)
        return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE);
 }
 
+int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen)
+{
+       return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE);
+}
+
 int index_entry_exists(struct index_state *istate, const char *name, int namelen)
 {
        return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0;
diff --git a/refs.c b/refs.c
index 90bcb2716873592864e2496951f913618521cb45..92819732ab75bb245f836a483fb63fc4ca2b5996 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -20,6 +20,7 @@
 #include "repository.h"
 #include "sigchain.h"
 #include "date.h"
+#include "commit.h"
 
 /*
  * List of all available backends
@@ -56,6 +57,88 @@ static unsigned char refname_disposition[256] = {
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 4, 4
 };
 
+struct ref_namespace_info ref_namespace[] = {
+       [NAMESPACE_HEAD] = {
+               .ref = "HEAD",
+               .decoration = DECORATION_REF_HEAD,
+               .exact = 1,
+       },
+       [NAMESPACE_BRANCHES] = {
+               .ref = "refs/heads/",
+               .decoration = DECORATION_REF_LOCAL,
+       },
+       [NAMESPACE_TAGS] = {
+               .ref = "refs/tags/",
+               .decoration = DECORATION_REF_TAG,
+       },
+       [NAMESPACE_REMOTE_REFS] = {
+               /*
+                * The default refspec for new remotes copies refs from
+                * refs/heads/ on the remote into refs/remotes/<remote>/.
+                * As such, "refs/remotes/" has special handling.
+                */
+               .ref = "refs/remotes/",
+               .decoration = DECORATION_REF_REMOTE,
+       },
+       [NAMESPACE_STASH] = {
+               /*
+                * The single ref "refs/stash" stores the latest stash.
+                * Older stashes can be found in the reflog.
+                */
+               .ref = "refs/stash",
+               .exact = 1,
+               .decoration = DECORATION_REF_STASH,
+       },
+       [NAMESPACE_REPLACE] = {
+               /*
+                * This namespace allows Git to act as if one object ID
+                * points to the content of another. Unlike the other
+                * ref namespaces, this one can be changed by the
+                * GIT_REPLACE_REF_BASE environment variable. This
+                * .namespace value will be overwritten in setup_git_env().
+                */
+               .ref = "refs/replace/",
+               .decoration = DECORATION_GRAFTED,
+       },
+       [NAMESPACE_NOTES] = {
+               /*
+                * The refs/notes/commit ref points to the tip of a
+                * parallel commit history that adds metadata to commits
+                * in the normal history. This ref can be overwritten
+                * by the core.notesRef config variable or the
+                * GIT_NOTES_REFS environment variable.
+                */
+               .ref = "refs/notes/commit",
+               .exact = 1,
+       },
+       [NAMESPACE_PREFETCH] = {
+               /*
+                * Prefetch refs are written by the background 'fetch'
+                * maintenance task. It allows faster foreground fetches
+                * by advertising these previously-downloaded tips without
+                * updating refs/remotes/ without user intervention.
+                */
+               .ref = "refs/prefetch/",
+       },
+       [NAMESPACE_REWRITTEN] = {
+               /*
+                * Rewritten refs are used by the 'label' command in the
+                * sequencer. These are particularly useful during an
+                * interactive rebase that uses the 'merge' command.
+                */
+               .ref = "refs/rewritten/",
+       },
+};
+
+void update_ref_namespace(enum ref_namespace namespace, char *ref)
+{
+       struct ref_namespace_info *info = &ref_namespace[namespace];
+       if (info->ref_updated)
+               free(info->ref);
+       info->ref = ref;
+       info->ref_updated = 1;
+}
+
 /*
  * Try to read one refname component from the front of refname.
  * Return the length of the component found, or -1 if the component is
@@ -455,11 +538,16 @@ void normalize_glob_ref(struct string_list_item *item, const char *prefix,
        if (*pattern == '/')
                BUG("pattern must not start with '/'");
 
-       if (prefix) {
+       if (prefix)
                strbuf_addstr(&normalized_pattern, prefix);
-       }
-       else if (!starts_with(pattern, "refs/"))
+       else if (!starts_with(pattern, "refs/") &&
+                  strcmp(pattern, "HEAD"))
                strbuf_addstr(&normalized_pattern, "refs/");
+       /*
+        * NEEDSWORK: Special case other symrefs such as REBASE_HEAD,
+        * MERGE_HEAD, etc.
+        */
+
        strbuf_addstr(&normalized_pattern, pattern);
        strbuf_strip_suffix(&normalized_pattern, "/");
 
@@ -1524,6 +1612,7 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
 
 int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data)
 {
+       const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
        return do_for_each_repo_ref(r, git_replace_ref_base, fn,
                                    strlen(git_replace_ref_base),
                                    DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
diff --git a/refs.h b/refs.h
index 47cb9edbaa8913c3af721d347744beefacb1e754..d6575b8c2bdf0d3e9021f7000b42308ff33203af 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -2,6 +2,7 @@
 #define REFS_H
 
 #include "cache.h"
+#include "commit.h"
 
 struct object_id;
 struct ref_store;
@@ -930,4 +931,49 @@ struct ref_store *get_main_ref_store(struct repository *r);
 struct ref_store *get_submodule_ref_store(const char *submodule);
 struct ref_store *get_worktree_ref_store(const struct worktree *wt);
 
+/*
+ * Some of the names specified by refs have special meaning to Git.
+ * Organize these namespaces in a comon 'ref_namespace' array for
+ * reference from multiple places in the codebase.
+ */
+
+struct ref_namespace_info {
+       char *ref;
+       enum decoration_type decoration;
+
+       /*
+        * If 'exact' is true, then we must match the 'ref' exactly.
+        * Otherwise, use a prefix match.
+        *
+        * 'ref_updated' is for internal use. It represents whether the
+        * 'ref' value was replaced from its original literal version.
+        */
+       unsigned exact:1,
+                ref_updated:1;
+};
+
+enum ref_namespace {
+       NAMESPACE_HEAD,
+       NAMESPACE_BRANCHES,
+       NAMESPACE_TAGS,
+       NAMESPACE_REMOTE_REFS,
+       NAMESPACE_STASH,
+       NAMESPACE_REPLACE,
+       NAMESPACE_NOTES,
+       NAMESPACE_PREFETCH,
+       NAMESPACE_REWRITTEN,
+
+       /* Must be last */
+       NAMESPACE__COUNT
+};
+
+/* See refs.c for the contents of this array. */
+extern struct ref_namespace_info ref_namespace[NAMESPACE__COUNT];
+
+/*
+ * Some ref namespaces can be modified by config values or environment
+ * variables. Modify a namespace as specified by its ref_namespace key.
+ */
+void update_ref_namespace(enum ref_namespace namespace, char *ref);
+
 #endif /* REFS_H */
index f4eee11cc8b424fd92d43bc30bf9c3a0ae1b3492..ee702e498a07f6e8364e046eeb6e7d9212874c3e 100644 (file)
@@ -1105,7 +1105,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
                           struct commit_list **list, struct prio_queue *queue)
 {
        struct commit_list *parent = commit->parents;
-       unsigned left_flag;
+       unsigned pass_flags;
 
        if (commit->object.flags & ADDED)
                return 0;
@@ -1160,7 +1160,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
        if (revs->no_walk)
                return 0;
 
-       left_flag = (commit->object.flags & SYMMETRIC_LEFT);
+       pass_flags = (commit->object.flags & (SYMMETRIC_LEFT | ANCESTRY_PATH));
 
        for (parent = commit->parents; parent; parent = parent->next) {
                struct commit *p = parent->item;
@@ -1181,7 +1181,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
                        if (!*slot)
                                *slot = *revision_sources_at(revs->sources, commit);
                }
-               p->object.flags |= left_flag;
+               p->object.flags |= pass_flags;
                if (!(p->object.flags & SEEN)) {
                        p->object.flags |= (SEEN | NOT_USER_GIVEN);
                        if (list)
@@ -1304,13 +1304,24 @@ static int still_interesting(struct commit_list *src, timestamp_t date, int slop
 }
 
 /*
- * "rev-list --ancestry-path A..B" computes commits that are ancestors
- * of B but not ancestors of A but further limits the result to those
- * that are descendants of A.  This takes the list of bottom commits and
- * the result of "A..B" without --ancestry-path, and limits the latter
- * further to the ones that can reach one of the commits in "bottom".
+ * "rev-list --ancestry-path=C_0 [--ancestry-path=C_1 ...] A..B"
+ * computes commits that are ancestors of B but not ancestors of A but
+ * further limits the result to those that have any of C in their
+ * ancestry path (i.e. are either ancestors of any of C, descendants
+ * of any of C, or are any of C). If --ancestry-path is specified with
+ * no commit, we use all bottom commits for C.
+ *
+ * Before this function is called, ancestors of C will have already
+ * been marked with ANCESTRY_PATH previously.
+ *
+ * This takes the list of bottom commits and the result of "A..B"
+ * without --ancestry-path, and limits the latter further to the ones
+ * that have any of C in their ancestry path. Since the ancestors of C
+ * have already been marked (a prerequisite of this function), we just
+ * need to mark the descendants, then exclude any commit that does not
+ * have any of these marks.
  */
-static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list)
+static void limit_to_ancestry(struct commit_list *bottoms, struct commit_list *list)
 {
        struct commit_list *p;
        struct commit_list *rlist = NULL;
@@ -1323,7 +1334,7 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li
        for (p = list; p; p = p->next)
                commit_list_insert(p->item, &rlist);
 
-       for (p = bottom; p; p = p->next)
+       for (p = bottoms; p; p = p->next)
                p->item->object.flags |= TMP_MARK;
 
        /*
@@ -1356,38 +1367,39 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li
         */
 
        /*
-        * The ones that are not marked with TMP_MARK are uninteresting
+        * The ones that are not marked with either TMP_MARK or
+        * ANCESTRY_PATH are uninteresting
         */
        for (p = list; p; p = p->next) {
                struct commit *c = p->item;
-               if (c->object.flags & TMP_MARK)
+               if (c->object.flags & (TMP_MARK | ANCESTRY_PATH))
                        continue;
                c->object.flags |= UNINTERESTING;
        }
 
-       /* We are done with the TMP_MARK */
+       /* We are done with TMP_MARK and ANCESTRY_PATH */
        for (p = list; p; p = p->next)
-               p->item->object.flags &= ~TMP_MARK;
-       for (p = bottom; p; p = p->next)
-               p->item->object.flags &= ~TMP_MARK;
+               p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH);
+       for (p = bottoms; p; p = p->next)
+               p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH);
        free_commit_list(rlist);
 }
 
 /*
- * Before walking the history, keep the set of "negative" refs the
- * caller has asked to exclude.
+ * Before walking the history, add the set of "negative" refs the
+ * caller has asked to exclude to the bottom list.
  *
  * This is used to compute "rev-list --ancestry-path A..B", as we need
  * to filter the result of "A..B" further to the ones that can actually
  * reach A.
  */
-static struct commit_list *collect_bottom_commits(struct commit_list *list)
+static void collect_bottom_commits(struct commit_list *list,
+                                  struct commit_list **bottom)
 {
-       struct commit_list *elem, *bottom = NULL;
+       struct commit_list *elem;
        for (elem = list; elem; elem = elem->next)
                if (elem->item->object.flags & BOTTOM)
-                       commit_list_insert(elem->item, &bottom);
-       return bottom;
+                       commit_list_insert(elem->item, bottom);
 }
 
 /* Assumes either left_only or right_only is set */
@@ -1414,12 +1426,12 @@ static int limit_list(struct rev_info *revs)
        struct commit_list *original_list = revs->commits;
        struct commit_list *newlist = NULL;
        struct commit_list **p = &newlist;
-       struct commit_list *bottom = NULL;
        struct commit *interesting_cache = NULL;
 
-       if (revs->ancestry_path) {
-               bottom = collect_bottom_commits(original_list);
-               if (!bottom)
+       if (revs->ancestry_path_implicit_bottoms) {
+               collect_bottom_commits(original_list,
+                                      &revs->ancestry_path_bottoms);
+               if (!revs->ancestry_path_bottoms)
                        die("--ancestry-path given but there are no bottom commits");
        }
 
@@ -1464,9 +1476,8 @@ static int limit_list(struct rev_info *revs)
        if (revs->left_only || revs->right_only)
                limit_left_right(newlist, revs);
 
-       if (bottom)
-               limit_to_ancestry(bottom, newlist);
-       free_commit_list(bottom);
+       if (revs->ancestry_path)
+               limit_to_ancestry(revs->ancestry_path_bottoms, newlist);
 
        /*
         * Check if any commits have become TREESAME by some of their parents
@@ -2213,7 +2224,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                               const struct setup_revision_opt* opt)
 {
        const char *arg = argv[0];
-       const char *optarg;
+       const char *optarg = NULL;
        int argcount;
        const unsigned hexsz = the_hash_algo->hexsz;
 
@@ -2284,6 +2295,23 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->ancestry_path = 1;
                revs->simplify_history = 0;
                revs->limited = 1;
+               revs->ancestry_path_implicit_bottoms = 1;
+       } else if (skip_prefix(arg, "--ancestry-path=", &optarg)) {
+               struct commit *c;
+               struct object_id oid;
+               const char *msg = _("could not get commit for ancestry-path argument %s");
+
+               revs->ancestry_path = 1;
+               revs->simplify_history = 0;
+               revs->limited = 1;
+
+               if (repo_get_oid_committish(revs->repo, optarg, &oid))
+                       return error(msg, optarg);
+               get_reference(revs, optarg, &oid, ANCESTRY_PATH);
+               c = lookup_commit_reference(revs->repo, &oid);
+               if (!c)
+                       return error(msg, optarg);
+               commit_list_insert(c, &revs->ancestry_path_bottoms);
        } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) {
                init_reflog_walk(&revs->reflog_info);
        } else if (!strcmp(arg, "--default")) {
@@ -2993,6 +3021,7 @@ static void release_revisions_topo_walk_info(struct topo_walk_info *info);
 void release_revisions(struct rev_info *revs)
 {
        free_commit_list(revs->commits);
+       free_commit_list(revs->ancestry_path_bottoms);
        object_array_clear(&revs->pending);
        object_array_clear(&revs->boundary_commits);
        release_revisions_cmdline(&revs->cmdline);
index bb91e7ed9148644355e03e6b3190cebf767289a1..61a9b1316b8a69898a70631f1f127156b13d1849 100644 (file)
@@ -48,6 +48,7 @@
  */
 #define NOT_USER_GIVEN (1u<<25)
 #define TRACK_LINEAR   (1u<<26)
+#define ANCESTRY_PATH  (1u<<27)
 #define ALL_REV_FLAGS  (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR | PULL_MERGE)
 
 #define DECORATE_SHORT_REFS    1
@@ -164,6 +165,13 @@ struct rev_info {
                        cherry_mark:1,
                        bisect:1,
                        ancestry_path:1,
+
+                       /* True if --ancestry-path was specified without an
+                        * argument. The bottom revisions are implicitly
+                        * the arguments in this case.
+                        */
+                       ancestry_path_implicit_bottoms:1,
+
                        first_parent_only:1,
                        exclude_first_parent_only:1,
                        line_level_traverse:1,
@@ -306,6 +314,7 @@ struct rev_info {
        struct saved_parents *saved_parents_slab;
 
        struct commit_list *previous_parents;
+       struct commit_list *ancestry_path_bottoms;
        const char *break_bar;
 
        struct revision_sources *sources;
index 14f17830f51254511e2c184293a6816a132d287f..5ec3a46dccf959bd54af42fbeaaf4027dc64996a 100644 (file)
@@ -10,6 +10,7 @@
 #include "config.h"
 #include "packfile.h"
 #include "hook.h"
+#include "compat/nonblock.h"
 
 void child_process_init(struct child_process *child)
 {
@@ -1364,12 +1365,25 @@ static int pump_io_round(struct io_pump *slots, int nr, struct pollfd *pfd)
                        continue;
 
                if (io->type == POLLOUT) {
-                       ssize_t len = xwrite(io->fd,
-                                            io->u.out.buf, io->u.out.len);
+                       ssize_t len;
+
+                       /*
+                        * Don't use xwrite() here. It loops forever on EAGAIN,
+                        * and we're in our own poll() loop here.
+                        *
+                        * Note that we lose xwrite()'s handling of MAX_IO_SIZE
+                        * and EINTR, so we have to implement those ourselves.
+                        */
+                       len = write(io->fd, io->u.out.buf,
+                                   io->u.out.len <= MAX_IO_SIZE ?
+                                   io->u.out.len : MAX_IO_SIZE);
                        if (len < 0) {
-                               io->error = errno;
-                               close(io->fd);
-                               io->fd = -1;
+                               if (errno != EINTR && errno != EAGAIN &&
+                                   errno != ENOSPC) {
+                                       io->error = errno;
+                                       close(io->fd);
+                                       io->fd = -1;
+                               }
                        } else {
                                io->u.out.buf += len;
                                io->u.out.len -= len;
@@ -1438,6 +1452,15 @@ int pipe_command(struct child_process *cmd,
                return -1;
 
        if (in) {
+               if (enable_pipe_nonblock(cmd->in) < 0) {
+                       error_errno("unable to make pipe non-blocking");
+                       close(cmd->in);
+                       if (out)
+                               close(cmd->out);
+                       if (err)
+                               close(cmd->err);
+                       return -1;
+               }
                io[nr].fd = cmd->in;
                io[nr].type = POLLOUT;
                io[nr].u.out.buf = in;
diff --git a/t/helper/test-rot13-filter.c b/t/helper/test-rot13-filter.c
new file mode 100644 (file)
index 0000000..f8d564c
--- /dev/null
@@ -0,0 +1,382 @@
+/*
+ * Example implementation for the Git filter protocol version 2
+ * See Documentation/gitattributes.txt, section "Filter Protocol"
+ *
+ * Usage: test-tool rot13-filter [--always-delay] --log=<path> <capabilities>
+ *
+ * Log path defines a debug log file that the script writes to. The
+ * subsequent arguments define a list of supported protocol capabilities
+ * ("clean", "smudge", etc).
+ *
+ * When --always-delay is given all pathnames with the "can-delay" flag
+ * that don't appear on the list bellow are delayed with a count of 1
+ * (see more below).
+ *
+ * This implementation supports special test cases:
+ * (1) If data with the pathname "clean-write-fail.r" is processed with
+ *     a "clean" operation then the write operation will die.
+ * (2) If data with the pathname "smudge-write-fail.r" is processed with
+ *     a "smudge" operation then the write operation will die.
+ * (3) If data with the pathname "error.r" is processed with any
+ *     operation then the filter signals that it cannot or does not want
+ *     to process the file.
+ * (4) If data with the pathname "abort.r" is processed with any
+ *     operation then the filter signals that it cannot or does not want
+ *     to process the file and any file after that is processed with the
+ *     same command.
+ * (5) If data with a pathname that is a key in the delay hash is
+ *     requested (e.g. "test-delay10.a") then the filter responds with
+ *     a "delay" status and sets the "requested" field in the delay hash.
+ *     The filter will signal the availability of this object after
+ *     "count" (field in delay hash) "list_available_blobs" commands.
+ * (6) If data with the pathname "missing-delay.a" is processed that the
+ *     filter will drop the path from the "list_available_blobs" response.
+ * (7) If data with the pathname "invalid-delay.a" is processed that the
+ *     filter will add the path "unfiltered" which was not delayed before
+ *     to the "list_available_blobs" response.
+ */
+
+#include "test-tool.h"
+#include "pkt-line.h"
+#include "string-list.h"
+#include "strmap.h"
+#include "parse-options.h"
+
+static FILE *logfile;
+static int always_delay, has_clean_cap, has_smudge_cap;
+static struct strmap delay = STRMAP_INIT;
+
+static inline const char *str_or_null(const char *str)
+{
+       return str ? str : "(null)";
+}
+
+static char *rot13(char *str)
+{
+       char *c;
+       for (c = str; *c; c++)
+               if (isalpha(*c))
+                       *c += tolower(*c) < 'n' ? 13 : -13;
+       return str;
+}
+
+static char *get_value(char *buf, const char *key)
+{
+       const char *orig_buf = buf;
+       if (!buf ||
+           !skip_prefix((const char *)buf, key, (const char **)&buf) ||
+           !skip_prefix((const char *)buf, "=", (const char **)&buf) ||
+           !*buf)
+               die("expected key '%s', got '%s'", key, str_or_null(orig_buf));
+       return buf;
+}
+
+/*
+ * Read a text packet, expecting that it is in the form "key=value" for
+ * the given key. An EOF does not trigger any error and is reported
+ * back to the caller with NULL. Die if the "key" part of "key=value" does
+ * not match the given key, or the value part is empty.
+ */
+static char *packet_key_val_read(const char *key)
+{
+       char *buf;
+       if (packet_read_line_gently(0, NULL, &buf) < 0)
+               return NULL;
+       return xstrdup(get_value(buf, key));
+}
+
+static inline void assert_remote_capability(struct strset *caps, const char *cap)
+{
+       if (!strset_contains(caps, cap))
+               die("required '%s' capability not available from remote", cap);
+}
+
+static void read_capabilities(struct strset *remote_caps)
+{
+       for (;;) {
+               char *buf = packet_read_line(0, NULL);
+               if (!buf)
+                       break;
+               strset_add(remote_caps, get_value(buf, "capability"));
+       }
+
+       assert_remote_capability(remote_caps, "clean");
+       assert_remote_capability(remote_caps, "smudge");
+       assert_remote_capability(remote_caps, "delay");
+}
+
+static void check_and_write_capabilities(struct strset *remote_caps,
+                                        const char **caps, int nr_caps)
+{
+       int i;
+       for (i = 0; i < nr_caps; i++) {
+               if (!strset_contains(remote_caps, caps[i]))
+                       die("our capability '%s' is not available from remote",
+                           caps[i]);
+               packet_write_fmt(1, "capability=%s\n", caps[i]);
+       }
+       packet_flush(1);
+}
+
+struct delay_entry {
+       int requested, count;
+       char *output;
+};
+
+static void free_delay_entries(void)
+{
+       struct hashmap_iter iter;
+       struct strmap_entry *ent;
+
+       strmap_for_each_entry(&delay, &iter, ent) {
+               struct delay_entry *delay_entry = ent->value;
+               free(delay_entry->output);
+               free(delay_entry);
+       }
+       strmap_clear(&delay, 0);
+}
+
+static void add_delay_entry(char *pathname, int count, int requested)
+{
+       struct delay_entry *entry = xcalloc(1, sizeof(*entry));
+       entry->count = count;
+       entry->requested = requested;
+       if (strmap_put(&delay, pathname, entry))
+               BUG("adding the same path twice to delay hash?");
+}
+
+static void reply_list_available_blobs_cmd(void)
+{
+       struct hashmap_iter iter;
+       struct strmap_entry *ent;
+       struct string_list_item *str_item;
+       struct string_list paths = STRING_LIST_INIT_NODUP;
+
+       /* flush */
+       if (packet_read_line(0, NULL))
+               die("bad list_available_blobs end");
+
+       strmap_for_each_entry(&delay, &iter, ent) {
+               struct delay_entry *delay_entry = ent->value;
+               if (!delay_entry->requested)
+                       continue;
+               delay_entry->count--;
+               if (!strcmp(ent->key, "invalid-delay.a")) {
+                       /* Send Git a pathname that was not delayed earlier */
+                       packet_write_fmt(1, "pathname=unfiltered");
+               }
+               if (!strcmp(ent->key, "missing-delay.a")) {
+                       /* Do not signal Git that this file is available */
+               } else if (!delay_entry->count) {
+                       string_list_append(&paths, ent->key);
+                       packet_write_fmt(1, "pathname=%s", ent->key);
+               }
+       }
+
+       /* Print paths in sorted order. */
+       string_list_sort(&paths);
+       for_each_string_list_item(str_item, &paths)
+               fprintf(logfile, " %s", str_item->string);
+       string_list_clear(&paths, 0);
+
+       packet_flush(1);
+
+       fprintf(logfile, " [OK]\n");
+       packet_write_fmt(1, "status=success");
+       packet_flush(1);
+}
+
+static void command_loop(void)
+{
+       for (;;) {
+               char *buf, *output;
+               char *pathname;
+               struct delay_entry *entry;
+               struct strbuf input = STRBUF_INIT;
+               char *command = packet_key_val_read("command");
+
+               if (!command) {
+                       fprintf(logfile, "STOP\n");
+                       break;
+               }
+               fprintf(logfile, "IN: %s", command);
+
+               if (!strcmp(command, "list_available_blobs")) {
+                       reply_list_available_blobs_cmd();
+                       free(command);
+                       continue;
+               }
+
+               pathname = packet_key_val_read("pathname");
+               if (!pathname)
+                       die("unexpected EOF while expecting pathname");
+               fprintf(logfile, " %s", pathname);
+
+               /* Read until flush */
+               while ((buf = packet_read_line(0, NULL))) {
+                       if (!strcmp(buf, "can-delay=1")) {
+                               entry = strmap_get(&delay, pathname);
+                               if (entry && !entry->requested)
+                                       entry->requested = 1;
+                               else if (!entry && always_delay)
+                                       add_delay_entry(pathname, 1, 1);
+                       } else if (starts_with(buf, "ref=") ||
+                                  starts_with(buf, "treeish=") ||
+                                  starts_with(buf, "blob=")) {
+                               fprintf(logfile, " %s", buf);
+                       } else {
+                               /*
+                                * In general, filters need to be graceful about
+                                * new metadata, since it's documented that we
+                                * can pass any key-value pairs, but for tests,
+                                * let's be a little stricter.
+                                */
+                               die("Unknown message '%s'", buf);
+                       }
+               }
+
+               read_packetized_to_strbuf(0, &input, 0);
+               fprintf(logfile, " %"PRIuMAX" [OK] -- ", (uintmax_t)input.len);
+
+               entry = strmap_get(&delay, pathname);
+               if (entry && entry->output) {
+                       output = entry->output;
+               } else if (!strcmp(pathname, "error.r") || !strcmp(pathname, "abort.r")) {
+                       output = "";
+               } else if (!strcmp(command, "clean") && has_clean_cap) {
+                       output = rot13(input.buf);
+               } else if (!strcmp(command, "smudge") && has_smudge_cap) {
+                       output = rot13(input.buf);
+               } else {
+                       die("bad command '%s'", command);
+               }
+
+               if (!strcmp(pathname, "error.r")) {
+                       fprintf(logfile, "[ERROR]\n");
+                       packet_write_fmt(1, "status=error");
+                       packet_flush(1);
+               } else if (!strcmp(pathname, "abort.r")) {
+                       fprintf(logfile, "[ABORT]\n");
+                       packet_write_fmt(1, "status=abort");
+                       packet_flush(1);
+               } else if (!strcmp(command, "smudge") &&
+                          (entry = strmap_get(&delay, pathname)) &&
+                          entry->requested == 1) {
+                       fprintf(logfile, "[DELAYED]\n");
+                       packet_write_fmt(1, "status=delayed");
+                       packet_flush(1);
+                       entry->requested = 2;
+                       if (entry->output != output) {
+                               free(entry->output);
+                               entry->output = xstrdup(output);
+                       }
+               } else {
+                       int i, nr_packets = 0;
+                       size_t output_len;
+                       const char *p;
+                       packet_write_fmt(1, "status=success");
+                       packet_flush(1);
+
+                       if (skip_prefix(pathname, command, &p) &&
+                           !strcmp(p, "-write-fail.r")) {
+                               fprintf(logfile, "[WRITE FAIL]\n");
+                               die("%s write error", command);
+                       }
+
+                       output_len = strlen(output);
+                       fprintf(logfile, "OUT: %"PRIuMAX" ", (uintmax_t)output_len);
+
+                       if (write_packetized_from_buf_no_flush_count(output,
+                               output_len, 1, &nr_packets))
+                               die("failed to write buffer to stdout");
+                       packet_flush(1);
+
+                       for (i = 0; i < nr_packets; i++)
+                               fprintf(logfile, ".");
+                       fprintf(logfile, " [OK]\n");
+
+                       packet_flush(1);
+               }
+               free(pathname);
+               strbuf_release(&input);
+               free(command);
+       }
+}
+
+static void packet_initialize(void)
+{
+       char *pkt_buf = packet_read_line(0, NULL);
+
+       if (!pkt_buf || strcmp(pkt_buf, "git-filter-client"))
+               die("bad initialize: '%s'", str_or_null(pkt_buf));
+
+       pkt_buf = packet_read_line(0, NULL);
+       if (!pkt_buf || strcmp(pkt_buf, "version=2"))
+               die("bad version: '%s'", str_or_null(pkt_buf));
+
+       pkt_buf = packet_read_line(0, NULL);
+       if (pkt_buf)
+               die("bad version end: '%s'", pkt_buf);
+
+       packet_write_fmt(1, "git-filter-server");
+       packet_write_fmt(1, "version=2");
+       packet_flush(1);
+}
+
+static const char *rot13_usage[] = {
+       "test-tool rot13-filter [--always-delay] --log=<path> <capabilities>",
+       NULL
+};
+
+int cmd__rot13_filter(int argc, const char **argv)
+{
+       int i, nr_caps;
+       struct strset remote_caps = STRSET_INIT;
+       const char *log_path = NULL;
+
+       struct option options[] = {
+               OPT_BOOL(0, "always-delay", &always_delay,
+                        "delay all paths with the can-delay flag"),
+               OPT_STRING(0, "log", &log_path, "path",
+                          "path to the debug log file"),
+               OPT_END()
+       };
+       nr_caps = parse_options(argc, argv, NULL, options, rot13_usage,
+                               PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (!log_path || !nr_caps)
+               usage_with_options(rot13_usage, options);
+
+       logfile = fopen(log_path, "a");
+       if (!logfile)
+               die_errno("failed to open log file");
+
+       for (i = 0; i < nr_caps; i++) {
+               if (!strcmp(argv[i], "smudge"))
+                       has_smudge_cap = 1;
+               if (!strcmp(argv[i], "clean"))
+                       has_clean_cap = 1;
+       }
+
+       add_delay_entry("test-delay10.a", 1, 0);
+       add_delay_entry("test-delay11.a", 1, 0);
+       add_delay_entry("test-delay20.a", 2, 0);
+       add_delay_entry("test-delay10.b", 1, 0);
+       add_delay_entry("missing-delay.a", 1, 0);
+       add_delay_entry("invalid-delay.a", 1, 0);
+
+       fprintf(logfile, "START\n");
+       packet_initialize();
+
+       read_capabilities(&remote_caps);
+       check_and_write_capabilities(&remote_caps, argv, nr_caps);
+       fprintf(logfile, "init handshake complete\n");
+       strset_clear(&remote_caps);
+
+       command_loop();
+
+       if (fclose(logfile))
+               die_errno("error closing logfile");
+       free_delay_entries();
+       return 0;
+}
index 318fdbab0c315cd0823b76de2864d9689dc5c377..d6a560f83254f42edc430faf704d4b3f54d222ac 100644 (file)
@@ -65,6 +65,7 @@ static struct test_cmd cmds[] = {
        { "read-midx", cmd__read_midx },
        { "ref-store", cmd__ref_store },
        { "reftable", cmd__reftable },
+       { "rot13-filter", cmd__rot13_filter },
        { "dump-reftable", cmd__dump_reftable },
        { "regex", cmd__regex },
        { "repository", cmd__repository },
index bb79927163196cd4815d32fb45ee82a6e1f0bdfc..21a91b10195743081d7e376bcf1c30f0f17b6a61 100644 (file)
@@ -54,6 +54,7 @@ int cmd__read_cache(int argc, const char **argv);
 int cmd__read_graph(int argc, const char **argv);
 int cmd__read_midx(int argc, const char **argv);
 int cmd__ref_store(int argc, const char **argv);
+int cmd__rot13_filter(int argc, const char **argv);
 int cmd__reftable(int argc, const char **argv);
 int cmd__regex(int argc, const char **argv);
 int cmd__repository(int argc, const char **argv);
index c181110a43931fb2a8131270a4d8bdcb0a2e7a4b..fce8151d41cbbd9eadd098b26c864d61b5b74bcc 100755 (executable)
@@ -123,5 +123,6 @@ test_perf_on_all git blame $SPARSE_CONE/f3/a
 test_perf_on_all git read-tree -mu HEAD
 test_perf_on_all git checkout-index -f --all
 test_perf_on_all git update-index --add --remove $SPARSE_CONE/a
+test_perf_on_all "git rm -f $SPARSE_CONE/a && git checkout HEAD -- $SPARSE_CONE/a"
 
 test_done
index 1c840348bd1eec848850c9679fb5e0368c5348da..abecd75e4e430b6a1182690ae54a0e5d4e76bc32 100755 (executable)
@@ -17,9 +17,6 @@ tr \
   'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM'
 EOF
 
-write_script rot13-filter.pl "$PERL_PATH" \
-       <"$TEST_DIRECTORY"/t0021/rot13-filter.pl
-
 generate_random_characters () {
        LEN=$1
        NAME=$2
@@ -365,8 +362,8 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' '
        test_line_count = 0 count
 '
 
-test_expect_success PERL 'required process filter should filter data' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should filter data' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -450,8 +447,8 @@ test_expect_success PERL 'required process filter should filter data' '
        )
 '
 
-test_expect_success PERL 'required process filter should filter data for various subcommands' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should filter data for various subcommands' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        (
                cd repo &&
@@ -561,9 +558,9 @@ test_expect_success PERL 'required process filter should filter data for various
        )
 '
 
-test_expect_success PERL 'required process filter takes precedence' '
+test_expect_success 'required process filter takes precedence' '
        test_config_global filter.protocol.clean false &&
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -587,8 +584,8 @@ test_expect_success PERL 'required process filter takes precedence' '
        )
 '
 
-test_expect_success PERL 'required process filter should be used only for "clean" operation only' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
+test_expect_success 'required process filter should be used only for "clean" operation only' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -622,8 +619,8 @@ test_expect_success PERL 'required process filter should be used only for "clean
        )
 '
 
-test_expect_success PERL 'required process filter should process multiple packets' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter should process multiple packets' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
 
        rm -rf repo &&
@@ -687,8 +684,8 @@ test_expect_success PERL 'required process filter should process multiple packet
        )
 '
 
-test_expect_success PERL 'required process filter with clean error should fail' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'required process filter with clean error should fail' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        test_config_global filter.protocol.required true &&
        rm -rf repo &&
        mkdir repo &&
@@ -706,8 +703,8 @@ test_expect_success PERL 'required process filter with clean error should fail'
        )
 '
 
-test_expect_success PERL 'process filter should restart after unexpected write failure' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter should restart after unexpected write failure' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -735,7 +732,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f
                rm -f debug.log &&
                git checkout --quiet --no-progress . 2>git-stderr.log &&
 
-               grep "smudge write error at" git-stderr.log &&
+               grep "smudge write error" git-stderr.log &&
                test_i18ngrep "error: external filter" git-stderr.log &&
 
                cat >expected.log <<-EOF &&
@@ -761,8 +758,8 @@ test_expect_success PERL 'process filter should restart after unexpected write f
        )
 '
 
-test_expect_success PERL 'process filter should not be restarted if it signals an error' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter should not be restarted if it signals an error' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -804,8 +801,8 @@ test_expect_success PERL 'process filter should not be restarted if it signals a
        )
 '
 
-test_expect_success PERL 'process filter abort stops processing of all further files' '
-       test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
+test_expect_success 'process filter abort stops processing of all further files' '
+       test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" &&
        rm -rf repo &&
        mkdir repo &&
        (
@@ -861,10 +858,10 @@ test_expect_success PERL 'invalid process filter must fail (and not hang!)' '
        )
 '
 
-test_expect_success PERL 'delayed checkout in process filter' '
-       test_config_global filter.a.process "rot13-filter.pl a.log clean smudge delay" &&
+test_expect_success 'delayed checkout in process filter' '
+       test_config_global filter.a.process "test-tool rot13-filter --log=a.log clean smudge delay" &&
        test_config_global filter.a.required true &&
-       test_config_global filter.b.process "rot13-filter.pl b.log clean smudge delay" &&
+       test_config_global filter.b.process "test-tool rot13-filter --log=b.log clean smudge delay" &&
        test_config_global filter.b.required true &&
 
        rm -rf repo &&
@@ -940,8 +937,8 @@ test_expect_success PERL 'delayed checkout in process filter' '
        )
 '
 
-test_expect_success PERL 'missing file in delayed checkout' '
-       test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
+test_expect_success 'missing file in delayed checkout' '
+       test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" &&
        test_config_global filter.bug.required true &&
 
        rm -rf repo &&
@@ -960,8 +957,8 @@ test_expect_success PERL 'missing file in delayed checkout' '
        grep "error: .missing-delay\.a. was not filtered properly" git-stderr.log
 '
 
-test_expect_success PERL 'invalid file in delayed checkout' '
-       test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
+test_expect_success 'invalid file in delayed checkout' '
+       test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" &&
        test_config_global filter.bug.required true &&
 
        rm -rf repo &&
@@ -990,10 +987,10 @@ do
                mode_prereq='UTF8_NFD_TO_NFC' ;;
        esac
 
-       test_expect_success PERL,SYMLINKS,$mode_prereq \
+       test_expect_success SYMLINKS,$mode_prereq \
        "delayed checkout with $mode-collision don't write to the wrong place" '
                test_config_global filter.delay.process \
-                       "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+                       "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
                test_config_global filter.delay.required true &&
 
                git init $mode-collision &&
@@ -1026,12 +1023,12 @@ do
        '
 done
 
-test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \
+test_expect_success SYMLINKS,CASE_INSENSITIVE_FS \
 "delayed checkout with submodule collision don't write to the wrong place" '
        git init collision-with-submodule &&
        (
                cd collision-with-submodule &&
-               git config filter.delay.process "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                # We need Git to treat the submodule "a" and the
@@ -1062,11 +1059,11 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \
        )
 '
 
-test_expect_success PERL 'setup for progress tests' '
+test_expect_success 'setup for progress tests' '
        git init progress &&
        (
                cd progress &&
-               git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --log=delay-progress.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                echo "*.a filter=delay" >.gitattributes &&
@@ -1132,12 +1129,12 @@ do
        '
 done
 
-test_expect_success PERL 'delayed checkout correctly reports the number of updated entries' '
+test_expect_success 'delayed checkout correctly reports the number of updated entries' '
        rm -rf repo &&
        git init repo &&
        (
                cd repo &&
-               git config filter.delay.process "../rot13-filter.pl delayed.log clean smudge delay" &&
+               git config filter.delay.process "test-tool rot13-filter --log=delayed.log clean smudge delay" &&
                git config filter.delay.required true &&
 
                echo "*.a filter=delay" >.gitattributes &&
diff --git a/t/t0021/rot13-filter.pl b/t/t0021/rot13-filter.pl
deleted file mode 100644 (file)
index 7bb9376..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-#
-# Example implementation for the Git filter protocol version 2
-# See Documentation/gitattributes.txt, section "Filter Protocol"
-#
-# Usage: rot13-filter.pl [--always-delay] <log path> <capabilities>
-#
-# Log path defines a debug log file that the script writes to. The
-# subsequent arguments define a list of supported protocol capabilities
-# ("clean", "smudge", etc).
-#
-# When --always-delay is given all pathnames with the "can-delay" flag
-# that don't appear on the list bellow are delayed with a count of 1
-# (see more below).
-#
-# This implementation supports special test cases:
-# (1) If data with the pathname "clean-write-fail.r" is processed with
-#     a "clean" operation then the write operation will die.
-# (2) If data with the pathname "smudge-write-fail.r" is processed with
-#     a "smudge" operation then the write operation will die.
-# (3) If data with the pathname "error.r" is processed with any
-#     operation then the filter signals that it cannot or does not want
-#     to process the file.
-# (4) If data with the pathname "abort.r" is processed with any
-#     operation then the filter signals that it cannot or does not want
-#     to process the file and any file after that is processed with the
-#     same command.
-# (5) If data with a pathname that is a key in the DELAY hash is
-#     requested (e.g. "test-delay10.a") then the filter responds with
-#     a "delay" status and sets the "requested" field in the DELAY hash.
-#     The filter will signal the availability of this object after
-#     "count" (field in DELAY hash) "list_available_blobs" commands.
-# (6) If data with the pathname "missing-delay.a" is processed that the
-#     filter will drop the path from the "list_available_blobs" response.
-# (7) If data with the pathname "invalid-delay.a" is processed that the
-#     filter will add the path "unfiltered" which was not delayed before
-#     to the "list_available_blobs" response.
-#
-
-use 5.008;
-sub gitperllib {
-       # Git assumes that all path lists are Unix-y colon-separated ones. But
-       # when the Git for Windows executes the test suite, its MSYS2 Bash
-       # calls git.exe, and colon-separated path lists are converted into
-       # Windows-y semicolon-separated lists of *Windows* paths (which
-       # naturally contain a colon after the drive letter, so splitting by
-       # colons simply does not cut it).
-       #
-       # Detect semicolon-separated path list and handle them appropriately.
-
-       if ($ENV{GITPERLLIB} =~ /;/) {
-               return split(/;/, $ENV{GITPERLLIB});
-       }
-       return split(/:/, $ENV{GITPERLLIB});
-}
-use lib (gitperllib());
-use strict;
-use warnings;
-use IO::File;
-use Git::Packet;
-
-my $MAX_PACKET_CONTENT_SIZE = 65516;
-
-my $always_delay = 0;
-if ( $ARGV[0] eq '--always-delay' ) {
-       $always_delay = 1;
-       shift @ARGV;
-}
-
-my $log_file                = shift @ARGV;
-my @capabilities            = @ARGV;
-
-open my $debug, ">>", $log_file or die "cannot open log file: $!";
-
-my %DELAY = (
-       'test-delay10.a' => { "requested" => 0, "count" => 1 },
-       'test-delay11.a' => { "requested" => 0, "count" => 1 },
-       'test-delay20.a' => { "requested" => 0, "count" => 2 },
-       'test-delay10.b' => { "requested" => 0, "count" => 1 },
-       'missing-delay.a' => { "requested" => 0, "count" => 1 },
-       'invalid-delay.a' => { "requested" => 0, "count" => 1 },
-);
-
-sub rot13 {
-       my $str = shift;
-       $str =~ y/A-Za-z/N-ZA-Mn-za-m/;
-       return $str;
-}
-
-print $debug "START\n";
-$debug->flush();
-
-packet_initialize("git-filter", 2);
-
-my %remote_caps = packet_read_and_check_capabilities("clean", "smudge", "delay");
-packet_check_and_write_capabilities(\%remote_caps, @capabilities);
-
-print $debug "init handshake complete\n";
-$debug->flush();
-
-while (1) {
-       my ( $res, $command ) = packet_key_val_read("command");
-       if ( $res == -1 ) {
-               print $debug "STOP\n";
-               exit();
-       }
-       print $debug "IN: $command";
-       $debug->flush();
-
-       if ( $command eq "list_available_blobs" ) {
-               # Flush
-               packet_compare_lists([1, ""], packet_bin_read()) ||
-                       die "bad list_available_blobs end";
-
-               foreach my $pathname ( sort keys %DELAY ) {
-                       if ( $DELAY{$pathname}{"requested"} >= 1 ) {
-                               $DELAY{$pathname}{"count"} = $DELAY{$pathname}{"count"} - 1;
-                               if ( $pathname eq "invalid-delay.a" ) {
-                                       # Send Git a pathname that was not delayed earlier
-                                       packet_txt_write("pathname=unfiltered");
-                               }
-                               if ( $pathname eq "missing-delay.a" ) {
-                                       # Do not signal Git that this file is available
-                               } elsif ( $DELAY{$pathname}{"count"} == 0 ) {
-                                       print $debug " $pathname";
-                                       packet_txt_write("pathname=$pathname");
-                               }
-                       }
-               }
-
-               packet_flush();
-
-               print $debug " [OK]\n";
-               $debug->flush();
-               packet_txt_write("status=success");
-               packet_flush();
-       } else {
-               my ( $res, $pathname ) = packet_key_val_read("pathname");
-               if ( $res == -1 ) {
-                       die "unexpected EOF while expecting pathname";
-               }
-               print $debug " $pathname";
-               $debug->flush();
-
-               # Read until flush
-               my ( $done, $buffer ) = packet_txt_read();
-               while ( $buffer ne '' ) {
-                       if ( $buffer eq "can-delay=1" ) {
-                               if ( exists $DELAY{$pathname} and $DELAY{$pathname}{"requested"} == 0 ) {
-                                       $DELAY{$pathname}{"requested"} = 1;
-                               } elsif ( !exists $DELAY{$pathname} and $always_delay ) {
-                                       $DELAY{$pathname} = { "requested" => 1, "count" => 1 };
-                               }
-                       } elsif ($buffer =~ /^(ref|treeish|blob)=/) {
-                               print $debug " $buffer";
-                       } else {
-                               # In general, filters need to be graceful about
-                               # new metadata, since it's documented that we
-                               # can pass any key-value pairs, but for tests,
-                               # let's be a little stricter.
-                               die "Unknown message '$buffer'";
-                       }
-
-                       ( $done, $buffer ) = packet_txt_read();
-               }
-               if ( $done == -1 ) {
-                       die "unexpected EOF after pathname '$pathname'";
-               }
-
-               my $input = "";
-               {
-                       binmode(STDIN);
-                       my $buffer;
-                       my $done = 0;
-                       while ( !$done ) {
-                               ( $done, $buffer ) = packet_bin_read();
-                               $input .= $buffer;
-                       }
-                       if ( $done == -1 ) {
-                               die "unexpected EOF while reading input for '$pathname'";
-                       }                       
-                       print $debug " " . length($input) . " [OK] -- ";
-                       $debug->flush();
-               }
-
-               my $output;
-               if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) {
-                       $output = $DELAY{$pathname}{"output"}
-               } elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
-                       $output = "";
-               } elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
-                       $output = rot13($input);
-               } elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
-                       $output = rot13($input);
-               } else {
-                       die "bad command '$command'";
-               }
-
-               if ( $pathname eq "error.r" ) {
-                       print $debug "[ERROR]\n";
-                       $debug->flush();
-                       packet_txt_write("status=error");
-                       packet_flush();
-               } elsif ( $pathname eq "abort.r" ) {
-                       print $debug "[ABORT]\n";
-                       $debug->flush();
-                       packet_txt_write("status=abort");
-                       packet_flush();
-               } elsif ( $command eq "smudge" and
-                       exists $DELAY{$pathname} and
-                       $DELAY{$pathname}{"requested"} == 1 ) {
-                       print $debug "[DELAYED]\n";
-                       $debug->flush();
-                       packet_txt_write("status=delayed");
-                       packet_flush();
-                       $DELAY{$pathname}{"requested"} = 2;
-                       $DELAY{$pathname}{"output"} = $output;
-               } else {
-                       packet_txt_write("status=success");
-                       packet_flush();
-
-                       if ( $pathname eq "${command}-write-fail.r" ) {
-                               print $debug "[WRITE FAIL]\n";
-                               $debug->flush();
-                               die "${command} write error";
-                       }
-
-                       print $debug "OUT: " . length($output) . " ";
-                       $debug->flush();
-
-                       while ( length($output) > 0 ) {
-                               my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
-                               packet_bin_write($packet);
-                               # dots represent the number of packets
-                               print $debug ".";
-                               if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
-                                       $output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
-                               } else {
-                                       $output = "";
-                               }
-                       }
-                       packet_flush();
-                       print $debug " [OK]\n";
-                       $debug->flush();
-                       packet_flush();
-               }
-       }
-}
index 08f5fe9caef0c8901e49401abf2762753d8a0b7c..b6d2f591acdd999483b2d28af8bbbe3d30008ea7 100755 (executable)
@@ -78,4 +78,52 @@ test_expect_success 'indicates populated hooks' '
        test_cmp expect actual
 '
 
+test_expect_success UNZIP '--diagnose creates diagnostics zip archive' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose -o report -s test >out &&
+
+       zip_path=report/git-diagnostics-test.zip &&
+       grep "Available space" out &&
+       test_path_is_file "$zip_path" &&
+
+       # Check zipped archive content
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
+       test_file_not_empty out &&
+
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
+       grep "^Total: [0-9][0-9]*" out &&
+
+       # Should not include .git directory contents by default
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--diagnose=stats excludes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose=stats -o report -s test >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Does not include .git directory contents
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--diagnose=all includes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git bugreport --diagnose=all -o report -s test >out &&
+
+       # Includes .git directory contents
+       "$GIT_UNZIP" -l "$zip_path" | grep ".git/" &&
+
+       "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out &&
+       test_file_not_empty out
+'
+
 test_done
diff --git a/t/t0092-diagnose.sh b/t/t0092-diagnose.sh
new file mode 100755 (executable)
index 0000000..fca9b58
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+test_description='git diagnose'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success UNZIP 'creates diagnostics zip archive' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test >out &&
+       grep "Available space" out &&
+
+       zip_path=report/git-diagnostics-test.zip &&
+       test_path_is_file "$zip_path" &&
+
+       # Check zipped archive content
+       "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out &&
+       test_file_not_empty out &&
+
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out &&
+       grep "^Total: [0-9][0-9]*" out &&
+
+       # Should not include .git directory contents by default
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--mode=stats excludes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test --mode=stats >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Does not include .git directory contents
+       ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/"
+'
+
+test_expect_success UNZIP '--mode=all includes .git dir contents' '
+       test_when_finished rm -rf report &&
+
+       git diagnose -o report -s test --mode=all >out &&
+
+       # Includes pack quantity/size info
+       "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out &&
+       grep ".git/objects" out &&
+
+       # Includes .git directory contents
+       "$GIT_UNZIP" -l "$zip_path" | grep ".git/" &&
+
+       "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out &&
+       test_file_not_empty out
+'
+
+test_done
index 763c6cc684687a3aee6b915dc676f4425065eeeb..a6a14c8a21f73b48510b6a40cc1896145cbafad8 100755 (executable)
@@ -372,6 +372,14 @@ test_expect_success 'deep changes during checkout' '
        test_all_match git checkout base
 '
 
+test_expect_success 'checkout with modified sparse directory' '
+       init_repos &&
+
+       test_all_match git checkout rename-in-to-out -- . &&
+       test_sparse_match git sparse-checkout reapply &&
+       test_all_match git checkout base
+'
+
 test_expect_success 'add outside sparse cone' '
        init_repos &&
 
@@ -687,6 +695,23 @@ test_expect_success 'reset with wildcard pathspec' '
        test_all_match git ls-files -s -- folder1
 '
 
+test_expect_success 'reset hard with removed sparse dir' '
+       init_repos &&
+
+       run_on_all git rm -r --sparse folder1 &&
+       test_all_match git status --porcelain=v2 &&
+
+       test_all_match git reset --hard &&
+       test_all_match git status --porcelain=v2 &&
+
+       cat >expect <<-\EOF &&
+       folder1/
+       EOF
+
+       git -C sparse-index ls-files --sparse folder1 >out &&
+       test_cmp expect out
+'
+
 test_expect_success 'update-index modify outside sparse definition' '
        init_repos &&
 
@@ -912,7 +937,7 @@ test_expect_success 'read-tree --prefix' '
        test_all_match git read-tree --prefix=deep/deeper1/deepest -u deepest &&
        test_all_match git status --porcelain=v2 &&
 
-       test_all_match git rm -rf --sparse folder1/ &&
+       run_on_all git rm -rf --sparse folder1/ &&
        test_all_match git read-tree --prefix=folder1/ -u update-folder1 &&
        test_all_match git status --porcelain=v2 &&
 
@@ -1340,10 +1365,14 @@ ensure_not_expanded () {
                shift &&
                test_must_fail env \
                        GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
-                       git -C sparse-index "$@" || return 1
+                       git -C sparse-index "$@" \
+                       >sparse-index-out \
+                       2>sparse-index-error || return 1
        else
                GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
-                       git -C sparse-index "$@" || return 1
+                       git -C sparse-index "$@" \
+                       >sparse-index-out \
+                       2>sparse-index-error || return 1
        fi &&
        test_region ! index ensure_full_index trace2.txt
 }
@@ -1853,4 +1882,94 @@ test_expect_success 'mv directory from out-of-cone to in-cone' '
        grep -e "H deep/0/1" actual
 '
 
+test_expect_success 'rm pathspec inside sparse definition' '
+       init_repos &&
+
+       test_all_match git rm deep/a &&
+       test_all_match git status --porcelain=v2 &&
+
+       # test wildcard
+       run_on_all git reset --hard &&
+       test_all_match git rm deep/* &&
+       test_all_match git status --porcelain=v2 &&
+
+       # test recursive rm
+       run_on_all git reset --hard &&
+       test_all_match git rm -r deep &&
+       test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'rm pathspec outside sparse definition' '
+       init_repos &&
+
+       for file in folder1/a folder1/0/1
+       do
+               test_sparse_match test_must_fail git rm $file &&
+               test_sparse_match test_must_fail git rm --cached $file &&
+               test_sparse_match git rm --sparse $file &&
+               test_sparse_match git status --porcelain=v2
+       done &&
+
+       cat >folder1-full <<-EOF &&
+       rm ${SQ}folder1/0/0/0${SQ}
+       rm ${SQ}folder1/0/1${SQ}
+       rm ${SQ}folder1/a${SQ}
+       EOF
+
+       cat >folder1-sparse <<-EOF &&
+       rm ${SQ}folder1/${SQ}
+       EOF
+
+       # test wildcard
+       run_on_sparse git reset --hard &&
+       run_on_sparse git sparse-checkout reapply &&
+       test_sparse_match test_must_fail git rm folder1/* &&
+       run_on_sparse git rm --sparse folder1/* &&
+       test_cmp folder1-full sparse-checkout-out &&
+       test_cmp folder1-sparse sparse-index-out &&
+       test_sparse_match git status --porcelain=v2 &&
+
+       # test recursive rm
+       run_on_sparse git reset --hard &&
+       run_on_sparse git sparse-checkout reapply &&
+       test_sparse_match test_must_fail git rm --sparse folder1 &&
+       run_on_sparse git rm --sparse -r folder1 &&
+       test_cmp folder1-full sparse-checkout-out &&
+       test_cmp folder1-sparse sparse-index-out &&
+       test_sparse_match git status --porcelain=v2
+'
+
+test_expect_success 'rm pathspec expands index when necessary' '
+       init_repos &&
+
+       # in-cone pathspec (do not expand)
+       ensure_not_expanded rm "deep/deep*" &&
+       test_must_be_empty sparse-index-err &&
+
+       # out-of-cone pathspec (expand)
+       ! ensure_not_expanded rm --sparse "folder1/a*" &&
+       test_must_be_empty sparse-index-err &&
+
+       # pathspec that should expand index
+       ! ensure_not_expanded rm "*/a" &&
+       test_must_be_empty sparse-index-err &&
+
+       ! ensure_not_expanded rm "**a" &&
+       test_must_be_empty sparse-index-err
+'
+
+test_expect_success 'sparse index is not expanded: rm' '
+       init_repos &&
+
+       ensure_not_expanded rm deep/a &&
+
+       # test in-cone wildcard
+       git -C sparse-index reset --hard &&
+       ensure_not_expanded rm deep/* &&
+
+       # test recursive rm
+       git -C sparse-index reset --hard &&
+       ensure_not_expanded rm -r deep
+'
+
 test_done
index ab7f31f1dcd5b5b7224ded4099f8316171bdea7d..53c2aa10b72745f96c6619fc5f8e5c9f50a48d54 100755 (executable)
@@ -364,6 +364,20 @@ test_expect_success 'tree entry with type mismatch' '
        test_i18ngrep ! "dangling blob" out
 '
 
+test_expect_success 'tree entry with bogus mode' '
+       test_when_finished "remove_object \$blob" &&
+       test_when_finished "remove_object \$tree" &&
+       blob=$(echo blob | git hash-object -w --stdin) &&
+       blob_oct=$(echo $blob | hex2oct) &&
+       tree=$(printf "100000 foo\0${blob_oct}" |
+              git hash-object -t tree --stdin -w --literally) &&
+       git fsck 2>err &&
+       cat >expect <<-EOF &&
+       warning in tree $tree: badFilemode: contains bad file modes
+       EOF
+       test_cmp expect err
+'
+
 test_expect_success 'tag pointing to nonexistent' '
        badoid=$(test_oid deadbeef) &&
        cat >invalid-tag <<-EOF &&
index c683e60007219807b18bfb42bac83c297cdc8234..00ce3033d3489285b8580d6e23f3eeb24074862f 100755 (executable)
@@ -230,12 +230,9 @@ test_expect_success SYMLINKS 'parallel checkout checks for symlinks in leading d
 # check the final report including sequential, parallel, and delayed entries
 # all at the same time. So we must have finer control of the parallel checkout
 # variables.
-test_expect_success PERL '"git checkout ." report should not include failed entries' '
-       write_script rot13-filter.pl "$PERL_PATH" \
-               <"$TEST_DIRECTORY"/t0021/rot13-filter.pl &&
-
+test_expect_success '"git checkout ." report should not include failed entries' '
        test_config_global filter.delay.process \
-               "\"$(pwd)/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" &&
+               "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" &&
        test_config_global filter.delay.required true &&
        test_config_global filter.cat.clean cat  &&
        test_config_global filter.cat.smudge cat  &&
index 252545796182e3e4c469284f1a99a8691a0812a8..f3511cd43a9ddc929c886ef7c44f4e4704b6c367 100755 (executable)
@@ -138,12 +138,9 @@ test_expect_success 'parallel-checkout and external filter' '
 # The delayed queue is independent from the parallel queue, and they should be
 # able to work together in the same checkout process.
 #
-test_expect_success PERL 'parallel-checkout and delayed checkout' '
-       write_script rot13-filter.pl "$PERL_PATH" \
-               <"$TEST_DIRECTORY"/t0021/rot13-filter.pl &&
-
+test_expect_success 'parallel-checkout and delayed checkout' '
        test_config_global filter.delay.process \
-               "\"$(pwd)/rot13-filter.pl\" --always-delay \"$(pwd)/delayed.log\" clean smudge delay" &&
+               "test-tool rot13-filter --always-delay --log=\"$(pwd)/delayed.log\" clean smudge delay" &&
        test_config_global filter.delay.required true &&
 
        echo "abcd" >original &&
index b354fb39de839aba1506693ee4a0cd7d4967d656..3b7df9bed5ad3e740090e2dc9b3fecb40479a65f 100755 (executable)
@@ -766,6 +766,19 @@ test_expect_success 'detect bogus diffFilter output' '
        force_color test_must_fail git add -p <y
 '
 
+test_expect_success 'handle very large filtered diff' '
+       git reset --hard &&
+       # The specific number here is not important, but it must
+       # be large enough that the output of "git diff --color"
+       # fills up the pipe buffer. 10,000 results in ~200k of
+       # colored output.
+       test_seq 10000 >test &&
+       test_config interactive.diffFilter cat &&
+       printf y >y &&
+       force_color git add -p >output 2>&1 <y &&
+       git diff-files --exit-code -- test
+'
+
 test_expect_success 'diff.algorithm is passed to `git diff-files`' '
        git reset --hard &&
 
index 056e922164d04976570b4e5fc25975f1cbfc8b7a..dfcf3a0aaae3e2a72f6f41023b01c639a99c2f44 100755 (executable)
@@ -352,6 +352,8 @@ log -GF -p --pickaxe-all master
 log -IA -IB -I1 -I2 -p master
 log --decorate --all
 log --decorate=full --all
+log --decorate --clear-decorations --all
+log --decorate=full --clear-decorations --all
 
 rev-list --parents HEAD
 rev-list --children HEAD
index 3f9b872eceb734cb1e7adbd53a4de0580b1cd524..6b0b334a5d6ca160fc60ecf3f8e607a13633f562 100644 (file)
@@ -20,7 +20,7 @@ Date:   Mon Jun 26 00:06:00 2006 +0000
 
     Rearranged lines in dir/sub
 
-commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0
 Author: A U Thor <author@example.com>
 Date:   Mon Jun 26 00:06:00 2006 +0000
 
diff --git a/t/t4013/diff.log_--decorate=full_--clear-decorations_--all b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all
new file mode 100644 (file)
index 0000000..1c030a6
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate=full --clear-decorations --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
diff --git a/t/t4013/diff.log_--decorate=full_--decorate-all_--all b/t/t4013/diff.log_--decorate=full_--decorate-all_--all
new file mode 100644 (file)
index 0000000..d6e7928
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate=full --decorate-all --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
index f5e20e1e14aaef179ac9570b0e16bc47077ff79d..c7df1f581410d1155d87122ce91f6890390cf90a 100644 (file)
@@ -20,7 +20,7 @@ Date:   Mon Jun 26 00:06:00 2006 +0000
 
     Rearranged lines in dir/sub
 
-commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0
 Author: A U Thor <author@example.com>
 Date:   Mon Jun 26 00:06:00 2006 +0000
 
diff --git a/t/t4013/diff.log_--decorate_--clear-decorations_--all b/t/t4013/diff.log_--decorate_--clear-decorations_--all
new file mode 100644 (file)
index 0000000..88be82c
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate --clear-decorations --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
diff --git a/t/t4013/diff.log_--decorate_--decorate-all_--all b/t/t4013/diff.log_--decorate_--decorate-all_--all
new file mode 100644 (file)
index 0000000..5d22618
--- /dev/null
@@ -0,0 +1,61 @@
+$ git log --decorate --decorate-all --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode
+
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    update mode (file2)
+
+Notes:
+    note
+
+commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Rearranged lines in dir/sub
+
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:06:00 2006 +0000
+
+    Notes added by 'git notes add'
+
+commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master)
+Merge: 9a6d494 c7a2ab9
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:04:00 2006 +0000
+
+    Merge branch 'side'
+
+commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:03:00 2006 +0000
+
+    Side
+
+commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:02:00 2006 +0000
+
+    Third
+
+commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:01:00 2006 +0000
+
+    Second
+    
+    This is the second commit.
+
+commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial)
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+$
index f0aaa1fa02a56447d91e0815f71fed0a92989148..cc15cb4ff62ab4c939b369826e5795e46f218d84 100755 (executable)
@@ -704,9 +704,12 @@ test_expect_success 'set up more tangled history' '
        git checkout -b tangle HEAD~6 &&
        test_commit tangle-a tangle-a a &&
        git merge main~3 &&
+       git update-ref refs/prefetch/merge HEAD &&
        git merge side~1 &&
+       git update-ref refs/rewritten/merge HEAD &&
        git checkout main &&
        git merge tangle &&
+       git update-ref refs/hidden/tangle HEAD &&
        git checkout -b reach &&
        test_commit reach &&
        git checkout main &&
@@ -974,9 +977,9 @@ test_expect_success 'decorate-refs-exclude and simplify-by-decoration' '
        Merge-tag-reach (HEAD -> main)
        reach (tag: reach, reach)
        seventh (tag: seventh)
-       Merge-branch-tangle
-       Merge-branch-side-early-part-into-tangle (tangle)
-       tangle-a (tag: tangle-a)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
        EOF
        git log -n6 --decorate=short --pretty="tformat:%f%d" \
                --decorate-refs-exclude="*octopus*" \
@@ -1025,6 +1028,115 @@ test_expect_success 'decorate-refs and simplify-by-decoration without output' '
        test_cmp expect actual
 '
 
+test_expect_success 'decorate-refs-exclude HEAD' '
+       git log --decorate=full --oneline \
+               --decorate-refs-exclude="HEAD" >actual &&
+       ! grep HEAD actual
+'
+
+test_expect_success 'decorate-refs focus from default' '
+       git log --decorate=full --oneline \
+               --decorate-refs="refs/heads" >actual &&
+       ! grep HEAD actual
+'
+
+test_expect_success '--clear-decorations overrides defaults' '
+       cat >expect.default <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       Merge-tags-octopus-a-and-octopus-b
+       seventh (tag: refs/tags/seventh)
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-branch-tangle
+       Merge-branch-side-early-part-into-tangle (refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle
+       tangle-a (tag: refs/tags/tangle-a)
+       Merge-branch-side
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       Second
+       sixth
+       fifth
+       fourth
+       third
+       second
+       initial
+       EOF
+       git log --decorate=full --pretty="tformat:%f%d" >actual &&
+       test_cmp expect.default actual &&
+
+       cat >expect.all <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       Merge-tags-octopus-a-and-octopus-b
+       seventh (tag: refs/tags/seventh)
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
+       tangle-a (tag: refs/tags/tangle-a)
+       Merge-branch-side
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       Second
+       sixth
+       fifth
+       fourth
+       third
+       second
+       initial
+       EOF
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --clear-decorations >actual &&
+       test_cmp expect.all actual &&
+       git -c log.initialDecorationSet=all log \
+               --decorate=full --pretty="tformat:%f%d" >actual &&
+       test_cmp expect.all actual
+'
+
+test_expect_success '--clear-decorations clears previous exclusions' '
+       cat >expect.all <<-\EOF &&
+       Merge-tag-reach (HEAD -> refs/heads/main)
+       reach (tag: refs/tags/reach, refs/heads/reach)
+       Merge-tags-octopus-a-and-octopus-b
+       octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b)
+       octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a)
+       seventh (tag: refs/tags/seventh)
+       Merge-branch-tangle (refs/hidden/tangle)
+       Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle)
+       Merge-branch-main-early-part-into-tangle (refs/prefetch/merge)
+       tangle-a (tag: refs/tags/tangle-a)
+       side-2 (tag: refs/tags/side-2, refs/heads/side)
+       side-1 (tag: refs/tags/side-1)
+       initial
+       EOF
+
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --simplify-by-decoration \
+               --decorate-refs-exclude="heads/octopus*" \
+               --decorate-refs="heads" \
+               --clear-decorations >actual &&
+       test_cmp expect.all actual &&
+
+       cat >expect.filtered <<-\EOF &&
+       Merge-tags-octopus-a-and-octopus-b
+       octopus-b (refs/heads/octopus-b)
+       octopus-a (refs/heads/octopus-a)
+       initial
+       EOF
+
+       git log --decorate=full --pretty="tformat:%f%d" \
+               --simplify-by-decoration \
+               --decorate-refs-exclude="heads/octopus" \
+               --decorate-refs="heads" \
+               --clear-decorations \
+               --decorate-refs-exclude="tags/" \
+               --decorate-refs="heads/octopus*" >actual &&
+       test_cmp expect.filtered actual
+'
+
 test_expect_success 'log.decorate config parsing' '
        git log --oneline --decorate=full >expect.full &&
        git log --oneline --decorate=short >expect.short &&
@@ -2192,6 +2304,20 @@ test_expect_success 'log --decorate includes all levels of tag annotated tags' '
        test_cmp expect actual
 '
 
+test_expect_success 'log --decorate does not include things outside filter' '
+       reflist="refs/prefetch refs/rebase-merge refs/bundle" &&
+
+       for ref in $reflist
+       do
+               git update-ref $ref/fake HEAD || return 1
+       done &&
+
+       git log --decorate=full --oneline >actual &&
+
+       # None of the refs are visible:
+       ! grep /fake actual
+'
+
 test_expect_success 'log --end-of-options' '
        git update-ref refs/heads/--source HEAD &&
        git log --end-of-options --source >actual &&
index 36ac6aff1e40dd8ebc1bc894bf63d2228bcd32a3..ded33a82e2c94cd4f44c1df4c9fe93ce9bf5f148 100755 (executable)
@@ -3,7 +3,7 @@
 # Copyright (c) 2010 Nazri Ramliy
 #
 
-test_description='Test for "git log --decorate" colors'
+test_description='test "git log --decorate" colors'
 
 GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
@@ -17,6 +17,7 @@ test_expect_success setup '
        git config color.decorate.remoteBranch red &&
        git config color.decorate.tag "reverse bold yellow" &&
        git config color.decorate.stash magenta &&
+       git config color.decorate.grafted black &&
        git config color.decorate.HEAD cyan &&
 
        c_reset="<RESET>" &&
@@ -27,6 +28,7 @@ test_expect_success setup '
        c_tag="<BOLD;REVERSE;YELLOW>" &&
        c_stash="<MAGENTA>" &&
        c_HEAD="<CYAN>" &&
+       c_grafted="<BLACK>" &&
 
        test_commit A &&
        git clone . other &&
@@ -42,25 +44,79 @@ test_expect_success setup '
        git stash save Changes to A.t
 '
 
-cat >expected <<EOF
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD ->\
- ${c_reset}${c_branch}main${c_reset}${c_commit},\
- ${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit},\
- ${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit},\
- ${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset}\
- On main: Changes to A.t
-${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
-EOF
+cmp_filtered_decorations () {
+       sed "s/$OID_REGEX/COMMIT_ID/" actual | test_decode_color >filtered &&
+       test_cmp expect filtered
+}
 
 # We want log to show all, but the second parent to refs/stash is irrelevant
 # to this test since it does not contain any decoration, hence --first-parent
-test_expect_success 'Commit Decorations Colored Correctly' '
-       git log --first-parent --abbrev=10 --all --decorate --oneline --color=always |
-       sed "s/[0-9a-f]\{10,10\}/COMMIT_ID/" |
-       test_decode_color >out &&
-       test_cmp expected out
+test_expect_success 'commit decorations colored correctly' '
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
+${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit}, \
+${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset} \
+On main: Changes to A.t
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+       EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always --all >actual &&
+       cmp_filtered_decorations
+'
+
+test_expect_success 'test coloring with replace-objects' '
+       test_when_finished rm -rf .git/refs/replace* &&
+       test_commit C &&
+       test_commit D &&
+
+       git replace HEAD~1 HEAD~2 &&
+
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: D${c_reset}${c_commit})${c_reset} D
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: C${c_reset}${c_commit}, \
+${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} B
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations &&
+       git replace -d HEAD~1 &&
+
+       GIT_REPLACE_REF_BASE=refs/replace2/ git replace HEAD~1 HEAD~2 &&
+       GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \
+               --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations
+'
+
+test_expect_success 'test coloring with grafted commit' '
+       test_when_finished rm -rf .git/refs/replace* &&
+
+       git replace --graft HEAD HEAD~2 &&
+
+       cat >expect <<-EOF &&
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \
+${c_reset}${c_branch}main${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: D${c_reset}${c_commit}, \
+${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} D
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \
+${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B
+       ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A
+       EOF
+
+       git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations &&
+       git replace -d HEAD &&
+
+       GIT_REPLACE_REF_BASE=refs/replace2/ git replace --graft HEAD HEAD~2 &&
+       GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \
+               --no-abbrev --decorate --oneline --color=always HEAD >actual &&
+       cmp_filtered_decorations
 '
 
 test_done
index ee6d2dde9f35677fb9c83a228839bb7ad405a4fa..d18f2823d86e8b94c5331b1087f962469fc91f2c 100755 (executable)
@@ -407,6 +407,7 @@ test_expect_success 'in_vain not triggered before first ACK' '
 '
 
 test_expect_success 'in_vain resetted upon ACK' '
+       test_when_finished rm -f log trace2 &&
        rm -rf myserver myclient &&
        git init myserver &&
 
@@ -432,7 +433,8 @@ test_expect_success 'in_vain resetted upon ACK' '
        # first. The 256th commit is common between the client and the server,
        # and should reset in_vain. This allows negotiation to continue until
        # the client reports that first_anotherbranch_commit is common.
-       git -C myclient fetch --progress origin main 2>log &&
+       GIT_TRACE2_EVENT="$(pwd)/trace2" git -C myclient fetch --progress origin main 2>log &&
+       grep \"key\":\"total_rounds\",\"value\":\"6\" trace2 &&
        test_i18ngrep "Total 3 " log
 '
 
index b0b795aca97f8614dcae4a4a52f9b811386d7deb..ac4099ca8931930989eece26fa735e2f6c00bbc3 100755 (executable)
@@ -352,4 +352,21 @@ test_expect_success \
        grep "Cannot demote unterminatedheader" act
 '
 
+test_expect_success 'badFilemode is not a strict error' '
+       git init --bare badmode.git &&
+       tree=$(
+               cd badmode.git &&
+               blob=$(echo blob | git hash-object -w --stdin | hex2oct) &&
+               printf "123456 foo\0${blob}" |
+               git hash-object -t tree --stdin -w --literally
+       ) &&
+
+       rm -rf dst.git &&
+       git init --bare dst.git &&
+       git -C dst.git config transfer.fsckObjects true &&
+
+       git -C badmode.git push ../dst.git $tree:refs/tags/tree 2>err &&
+       grep "$tree: badFilemode" err
+'
+
 test_done
index f3356f9ea8cc1d893f756bf9af9806d210904124..3211002d466867fb2abb99eeff897029a7fdfa71 100755 (executable)
@@ -200,7 +200,10 @@ test_expect_success 'push with negotiation' '
        test_commit -C testrepo unrelated_commit &&
        git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
        test_when_finished "rm event" &&
-       GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main &&
+       GIT_TRACE2_EVENT="$(pwd)/event" \
+               git -c protocol.version=2 -c push.negotiate=1 \
+               push testrepo refs/heads/main:refs/remotes/origin/main &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
        grep_wrote 2 event # 1 commit, 1 tree
 '
 
@@ -224,7 +227,10 @@ test_expect_success 'push with negotiation does not attempt to fetch submodules'
        git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
        test_commit -C testrepo unrelated_commit &&
        git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
-       git -c submodule.recurse=true -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
+       GIT_TRACE2_EVENT="$(pwd)/event"  git -c submodule.recurse=true \
+               -c protocol.version=2 -c push.negotiate=1 \
+               push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
        ! grep "Fetching submodule" err
 '
 
index cf3be0584f40d13ba187cd8c04efc358910714a7..2e57de9c12a39a63b870079c17b014ad24fb93c3 100755 (executable)
@@ -743,7 +743,11 @@ test_expect_success 'batch missing blob request during checkout' '
 
        # Ensure that there is only one negotiation by checking that there is
        # only "done" line sent. ("done" marks the end of negotiation.)
-       GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+       GIT_TRACE_PACKET="$(pwd)/trace" \
+               GIT_TRACE2_EVENT="$(pwd)/trace2_event" \
+               git -C client -c trace2.eventNesting=5 checkout HEAD^ &&
+       grep \"key\":\"total_rounds\",\"value\":\"1\" trace2_event >trace_lines &&
+       test_line_count = 1 trace_lines &&
        grep "fetch> done" trace >done_lines &&
        test_line_count = 1 done_lines
 '
index 4a3778d04a82df6322048e34864578860b96f859..9aeacc2f6a5267cfdf8a05f2c924f4ecd04fe319 100755 (executable)
@@ -49,6 +49,13 @@ test_expect_success 'do partial clone 1' '
        test "$(git -C pc1 config --local remote.origin.partialclonefilter)" = "blob:none"
 '
 
+test_expect_success 'rev-list --missing=allow-promisor on partial clone' '
+       git -C pc1 rev-list --objects --missing=allow-promisor HEAD >actual &&
+       git -C pc1 rev-list --objects --missing=print HEAD >expect.raw &&
+       grep -v "^?" expect.raw >expect &&
+       test_cmp expect actual
+'
+
 test_expect_success 'verify that .promisor file contains refs fetched' '
        ls pc1/.git/objects/pack/pack-*.promisor >promisorlist &&
        test_line_count = 1 promisorlist &&
index 9d6cd7d98649c0fe0f40c474d5ed88528ac9ba8c..df74f80061c564b7f69961f0f3d665b1afca460f 100755 (executable)
@@ -229,14 +229,16 @@ test_expect_success 'setup repos for fetching with ref-in-want tests' '
 '
 
 test_expect_success 'fetching with exact OID' '
-       test_when_finished "rm -f log" &&
+       test_when_finished "rm -f log trace2" &&
 
        rm -rf local &&
        cp -r "$LOCAL_PRISTINE" local &&
        oid=$(git -C "$REPO" rev-parse d) &&
-       GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \
+       GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE2_EVENT="$(pwd)/trace2" \
+               git -C local fetch origin \
                "$oid":refs/heads/actual &&
 
+       grep \"key\":\"total_rounds\",\"value\":\"2\" trace2 &&
        git -C "$REPO" rev-parse "d" >expected &&
        git -C local rev-parse refs/heads/actual >actual &&
        test_cmp expected actual &&
index af57a04b7ffa01644768ee507714ad5f5d5b79c6..738da23628b12b4660105195e2bbcedf68416f33 100755 (executable)
@@ -8,8 +8,13 @@ test_description='--ancestry-path'
 #   /                     \
 #  A-------K---------------L--M
 #
-#  D..M                 == E F G H I J K L M
-#  --ancestry-path D..M == E F H I J L M
+#  D..M                                     == E F G H I J K L M
+#  --ancestry-path                     D..M == E F   H I J   L M
+#  --ancestry-path=F                   D..M == E F       J   L M
+#  --ancestry-path=G                   D..M ==     G H I J   L M
+#  --ancestry-path=H                   D..M == E   G H I J   L M
+#  --ancestry-path=K                   D..M ==             K L M
+#  --ancestry-path=K --ancestry-path=F D..M == E F       J K L M
 #
 #  D..M -- M.t                 == M
 #  --ancestry-path D..M -- M.t == M
@@ -50,73 +55,41 @@ test_expect_success setup '
        test_commit M
 '
 
-test_expect_success 'rev-list D..M' '
-       test_write_lines E F G H I J K L M >expect &&
-       git rev-list --format=%s D..M |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list --ancestry-path D..M' '
-       test_write_lines E F H I J L M >expect &&
-       git rev-list --ancestry-path --format=%s D..M |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list D..M -- M.t' '
-       echo M >expect &&
-       git rev-list --format=%s D..M -- M.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'rev-list --ancestry-path D..M -- M.t' '
-       echo M >expect &&
-       git rev-list --ancestry-path --format=%s D..M -- M.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
+test_ancestry () {
+       args=$1
+       expected=$2
+       test_expect_success "log $args" "
+               test_write_lines $expected >expect &&
+               git log --format=%s $args >raw &&
+
+               if test -n \"$expected\"
+               then
+                       sort raw >actual &&
+                       test_cmp expect actual
+               else
+                       test_must_be_empty raw
+               fi
+       "
+}
 
-test_expect_success 'rev-list F...I' '
-       test_write_lines F G H I >expect &&
-       git rev-list --format=%s F...I |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "D..M" "E F G H I J K L M"
 
-test_expect_success 'rev-list --ancestry-path F...I' '
-       test_write_lines F H I >expect &&
-       git rev-list --ancestry-path --format=%s F...I |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "--ancestry-path D..M" "E F H I J L M"
+test_ancestry "--ancestry-path=F D..M" "E F J L M"
+test_ancestry "--ancestry-path=G D..M" "G H I J L M"
+test_ancestry "--ancestry-path=H D..M" "E G H I J L M"
+test_ancestry "--ancestry-path=K D..M" "K L M"
+test_ancestry "--ancestry-path=F --ancestry-path=K D..M" "E F J K L M"
 
-# G.t is dropped in an "-s ours" merge
-test_expect_success 'rev-list G..M -- G.t' '
-       git rev-list --format=%s G..M -- G.t |
-       sed -e "/^commit /d" >actual &&
-       test_must_be_empty actual
-'
+test_ancestry "D..M -- M.t" "M"
+test_ancestry "--ancestry-path D..M -- M.t" "M"
 
-test_expect_success 'rev-list --ancestry-path G..M -- G.t' '
-       echo L >expect &&
-       git rev-list --ancestry-path --format=%s G..M -- G.t |
-       sed -e "/^commit /d" >actual &&
-       test_cmp expect actual
-'
+test_ancestry "F...I" "F G H I"
+test_ancestry "--ancestry-path F...I" "F H I"
 
-test_expect_success 'rev-list --ancestry-path --simplify-merges G^..M -- G.t' '
-       test_write_lines G L >expect &&
-       git rev-list --ancestry-path --simplify-merges --format=%s G^..M -- G.t |
-       sed -e "/^commit /d" |
-       sort >actual &&
-       test_cmp expect actual
-'
+test_ancestry "G..M -- G.t" ""
+test_ancestry "--ancestry-path G..M -- G.t" "L"
+test_ancestry "--ancestry-path --simplify-merges G^..M -- G.t" "G L"
 
 #   b---bc
 #  / \ /
index b4aef32b713ca00f1d8ad4a098f02ca9dc9be126..d59111dedec8020a138296928649eaff4de45694 100755 (executable)
@@ -48,4 +48,26 @@ check_du HEAD
 check_du --objects HEAD
 check_du --objects HEAD^..HEAD
 
+# As mentioned above, don't use hardcode sizes as actual size, but use the
+# output from git cat-file.
+test_expect_success 'rev-list --disk-usage=human' '
+       git rev-list --objects HEAD --disk-usage=human >actual &&
+       disk_usage_slow --objects HEAD >actual_size &&
+       grep "$(cat actual_size) bytes" actual
+'
+
+test_expect_success 'rev-list --disk-usage=human with bitmaps' '
+       git rev-list --objects HEAD --use-bitmap-index --disk-usage=human >actual &&
+       disk_usage_slow --objects HEAD >actual_size &&
+       grep "$(cat actual_size) bytes" actual
+'
+
+test_expect_success 'rev-list use --disk-usage unproperly' '
+       test_must_fail git rev-list --objects HEAD --disk-usage=typo 2>err &&
+       cat >expect <<-\EOF &&
+       fatal: invalid value for '\''--disk-usage=<format>'\'': '\''typo'\'', the only allowed format is '\''human'\''
+       EOF
+       test_cmp err expect
+'
+
 test_done
index c253bf759ab12629ad4bba872af8f9bd51f78e1b..d5f3e0fed65b2167bd6b2ddb6a4b7ae4144f1604 100755 (executable)
@@ -103,8 +103,25 @@ test_expect_success 'setup for merge search' '
         echo "file-c" > file-c &&
         git add file-c &&
         git commit -m "sub-c") &&
-       git commit -a -m "c" &&
+       git commit -a -m "c")
+'
 
+test_expect_success 'merging should conflict for non fast-forward' '
+       test_when_finished "git -C merge-search reset --hard" &&
+       (cd merge-search &&
+        git checkout -b test-nonforward-a b &&
+         if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+         then
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
+         else
+               test_must_fail git merge c 2> actual
+         fi)
+'
+
+test_expect_success 'finish setup for merge-search' '
+       (cd merge-search &&
        git checkout -b d a &&
        (cd sub &&
         git checkout -b sub-d sub-b &&
@@ -129,14 +146,16 @@ test_expect_success 'merge with one side as a fast-forward of the other' '
         test_cmp expect actual)
 '
 
-test_expect_success 'merging should conflict for non fast-forward' '
+test_expect_success 'merging should conflict for non fast-forward (resolution exists)' '
        (cd merge-search &&
-        git checkout -b test-nonforward b &&
+        git checkout -b test-nonforward-b b &&
         (cd sub &&
          git rev-parse --short sub-d > ../expect) &&
          if test "$GIT_TEST_MERGE_ALGORITHM" = ort
          then
-               test_must_fail git merge c >actual
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
          else
                test_must_fail git merge c 2> actual
          fi &&
@@ -161,7 +180,9 @@ test_expect_success 'merging should fail for ambiguous common parent' '
         ) &&
         if test "$GIT_TEST_MERGE_ALGORITHM" = ort
         then
-               test_must_fail git merge c >actual
+               test_must_fail git merge c >actual &&
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" &&
+               grep "$sub_expect" actual
         else
                test_must_fail git merge c 2> actual
         fi &&
@@ -205,7 +226,12 @@ test_expect_success 'merging should fail for changes that are backwards' '
        git commit -a -m "f" &&
 
        git checkout -b test-backward e &&
-       test_must_fail git merge f)
+       test_must_fail git merge f >actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-d)" &&
+               grep "$sub_expect" actual
+       fi)
 '
 
 
@@ -476,4 +502,44 @@ test_expect_failure 'directory/submodule conflict; merge --abort works afterward
        )
 '
 
+# Setup:
+#   - Submodule has 2 commits: a and b
+#   - Superproject branch 'a' adds and commits submodule pointing to 'commit a'
+#   - Superproject branch 'b' adds and commits submodule pointing to 'commit b'
+# If these two branches are now merged, there is no merge base
+test_expect_success 'setup for null merge base' '
+       mkdir no-merge-base &&
+       (cd no-merge-base &&
+       git init &&
+       mkdir sub &&
+       (cd sub &&
+        git init &&
+        echo "file-a" > file-a &&
+        git add file-a &&
+        git commit -m "commit a") &&
+       git commit --allow-empty -m init &&
+       git branch init &&
+       git checkout -b a init &&
+       git add sub &&
+       git commit -m "a" &&
+       git switch main &&
+       (cd sub &&
+        echo "file-b" > file-b &&
+        git add file-b &&
+        git commit -m "commit b"))
+'
+
+test_expect_success 'merging should fail with no merge base' '
+       (cd no-merge-base &&
+       git checkout -b b init &&
+       git add sub &&
+       git commit -m "b" &&
+       test_must_fail git merge a >actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short HEAD^1)" &&
+               grep "$sub_expect" actual
+       fi)
+'
+
 test_done
index 8e32f190077474274dc5046df5a64f837ee696f3..ebeca12a71115f60d10fa3ffa4725b1c7080f6f0 100755 (executable)
@@ -104,7 +104,7 @@ test_expect_success 'rebasing submodule that should conflict' '
        test_tick &&
        git commit -m fourth &&
 
-       test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 &&
+       test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 >actual_output &&
        git ls-files -s submodule >actual &&
        (
                cd submodule &&
@@ -112,7 +112,12 @@ test_expect_success 'rebasing submodule that should conflict' '
                echo "160000 $(git rev-parse HEAD^^) 2  submodule" &&
                echo "160000 $(git rev-parse HEAD) 3    submodule"
        ) >expect &&
-       test_cmp expect actual
+       test_cmp expect actual &&
+       if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+    then
+               sub_expect="go to submodule (submodule), and either merge commit $(git -C submodule rev-parse --short HEAD^0)" &&
+               grep "$sub_expect" actual_output
+       fi
 '
 
 test_done
index 74aa6384755ec6d53cf061a48dbe2cdfc93725f9..62ed694a404294f2ba61f07d0158b49c76ce384b 100755 (executable)
@@ -162,7 +162,6 @@ test_expect_success 'prefetch multiple remotes' '
        test_cmp_rev refs/remotes/remote1/one refs/prefetch/remotes/remote1/one &&
        test_cmp_rev refs/remotes/remote2/two refs/prefetch/remotes/remote2/two &&
 
-       test_cmp_config refs/prefetch/ log.excludedecoration &&
        git log --oneline --decorate --all >log &&
        ! grep "prefetch" log &&
 
@@ -173,26 +172,6 @@ test_expect_success 'prefetch multiple remotes' '
        test_subcommand git fetch remote2 $fetchargs <skip-remote1.txt
 '
 
-test_expect_success 'prefetch and existing log.excludeDecoration values' '
-       git config --unset-all log.excludeDecoration &&
-       git config log.excludeDecoration refs/remotes/remote1/ &&
-       git maintenance run --task=prefetch &&
-
-       git config --get-all log.excludeDecoration >out &&
-       grep refs/remotes/remote1/ out &&
-       grep refs/prefetch/ out &&
-
-       git log --oneline --decorate --all >log &&
-       ! grep "prefetch" log &&
-       ! grep "remote1" log &&
-       grep "remote2" log &&
-
-       # a second run does not change the config
-       git maintenance run --task=prefetch &&
-       git log --oneline --decorate --all >log2 &&
-       test_cmp log log2
-'
-
 test_expect_success 'loose-objects task' '
        # Repack everything so we know the state of the object dir
        git repack -adk &&
index 506234b4b8138894d6516fc80e9fc42753f78f00..74f4d710e8f0f28410fae49a6b9c31bf1045e0ca 100644 (file)
@@ -47,17 +47,20 @@ static int decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned l
 
        /* Initialize the descriptor entry */
        desc->entry.path = path;
-       desc->entry.mode = canon_mode(mode);
+       desc->entry.mode = (desc->flags & TREE_DESC_RAW_MODES) ? mode : canon_mode(mode);
        desc->entry.pathlen = len - 1;
        oidread(&desc->entry.oid, (const unsigned char *)path + len);
 
        return 0;
 }
 
-static int init_tree_desc_internal(struct tree_desc *desc, const void *buffer, unsigned long size, struct strbuf *err)
+static int init_tree_desc_internal(struct tree_desc *desc, const void *buffer,
+                                  unsigned long size, struct strbuf *err,
+                                  enum tree_desc_flags flags)
 {
        desc->buffer = buffer;
        desc->size = size;
+       desc->flags = flags;
        if (size)
                return decode_tree_entry(desc, buffer, size, err);
        return 0;
@@ -66,15 +69,16 @@ static int init_tree_desc_internal(struct tree_desc *desc, const void *buffer, u
 void init_tree_desc(struct tree_desc *desc, const void *buffer, unsigned long size)
 {
        struct strbuf err = STRBUF_INIT;
-       if (init_tree_desc_internal(desc, buffer, size, &err))
+       if (init_tree_desc_internal(desc, buffer, size, &err, 0))
                die("%s", err.buf);
        strbuf_release(&err);
 }
 
-int init_tree_desc_gently(struct tree_desc *desc, const void *buffer, unsigned long size)
+int init_tree_desc_gently(struct tree_desc *desc, const void *buffer, unsigned long size,
+                         enum tree_desc_flags flags)
 {
        struct strbuf err = STRBUF_INIT;
-       int result = init_tree_desc_internal(desc, buffer, size, &err);
+       int result = init_tree_desc_internal(desc, buffer, size, &err, flags);
        if (result)
                error("%s", err.buf);
        strbuf_release(&err);
index a5058469e9b3a83fdd58a04f2af72690742d08bd..6305d531503f25cd0b2632914d9aa7ddea55ff8e 100644 (file)
@@ -34,6 +34,11 @@ struct tree_desc {
 
        /* counts the number of bytes left in the `buffer`. */
        unsigned int size;
+
+       /* option flags passed via init_tree_desc_gently() */
+       enum tree_desc_flags {
+               TREE_DESC_RAW_MODES = (1 << 0),
+       } flags;
 };
 
 /**
@@ -79,7 +84,8 @@ int update_tree_entry_gently(struct tree_desc *);
  */
 void init_tree_desc(struct tree_desc *desc, const void *buf, unsigned long size);
 
-int init_tree_desc_gently(struct tree_desc *desc, const void *buf, unsigned long size);
+int init_tree_desc_gently(struct tree_desc *desc, const void *buf, unsigned long size,
+                         enum tree_desc_flags flags);
 
 /*
  * Visit the next entry in a tree. Returns 1 when there are more entries
index 8a454e03bff796452b6ffad3327dde6c592847f3..90b92114be8558b63371531aecfa246829317840 100644 (file)
@@ -1069,6 +1069,67 @@ static struct cache_entry *create_ce_entry(const struct traverse_info *info,
        return ce;
 }
 
+/*
+ * Determine whether the path specified by 'p' should be unpacked as a new
+ * sparse directory in a sparse index. A new sparse directory 'A/':
+ * - must be outside the sparse cone.
+ * - must not already be in the index (i.e., no index entry with name 'A/'
+ *   exists).
+ * - must not have any child entries in the index (i.e., no index entry
+ *   'A/<something>' exists).
+ * If 'p' meets the above requirements, return 1; otherwise, return 0.
+ */
+static int entry_is_new_sparse_dir(const struct traverse_info *info,
+                                  const struct name_entry *p)
+{
+       int res, pos;
+       struct strbuf dirpath = STRBUF_INIT;
+       struct unpack_trees_options *o = info->data;
+
+       if (!S_ISDIR(p->mode))
+               return 0;
+
+       /*
+        * If the path is inside the sparse cone, it can't be a sparse directory.
+        */
+       strbuf_add(&dirpath, info->traverse_path, info->pathlen);
+       strbuf_add(&dirpath, p->path, p->pathlen);
+       strbuf_addch(&dirpath, '/');
+       if (path_in_cone_mode_sparse_checkout(dirpath.buf, o->src_index)) {
+               res = 0;
+               goto cleanup;
+       }
+
+       pos = index_name_pos_sparse(o->src_index, dirpath.buf, dirpath.len);
+       if (pos >= 0) {
+               /* Path is already in the index, not a new sparse dir */
+               res = 0;
+               goto cleanup;
+       }
+
+       /* Where would this sparse dir be inserted into the index? */
+       pos = -pos - 1;
+       if (pos >= o->src_index->cache_nr) {
+               /*
+                * Sparse dir would be inserted at the end of the index, so we
+                * know it has no child entries.
+                */
+               res = 1;
+               goto cleanup;
+       }
+
+       /*
+        * If the dir has child entries in the index, the first would be at the
+        * position the sparse directory would be inserted. If the entry at this
+        * position is inside the dir, not a new sparse dir.
+        */
+       res = strncmp(o->src_index->cache[pos]->name, dirpath.buf, dirpath.len);
+
+cleanup:
+       strbuf_release(&dirpath);
+       return res;
+}
+
 /*
  * Note that traverse_by_cache_tree() duplicates some logic in this function
  * without actually calling it. If you change the logic here you may need to
@@ -1078,21 +1139,44 @@ static int unpack_single_entry(int n, unsigned long mask,
                               unsigned long dirmask,
                               struct cache_entry **src,
                               const struct name_entry *names,
-                              const struct traverse_info *info)
+                              const struct traverse_info *info,
+                              int *is_new_sparse_dir)
 {
        int i;
        struct unpack_trees_options *o = info->data;
        unsigned long conflicts = info->df_conflicts | dirmask;
+       const struct name_entry *p = names;
 
-       if (mask == dirmask && !src[0])
-               return 0;
+       *is_new_sparse_dir = 0;
+       if (mask == dirmask && !src[0]) {
+               /*
+                * If we're not in a sparse index, we can't unpack a directory
+                * without recursing into it, so we return.
+                */
+               if (!o->src_index->sparse_index)
+                       return 0;
+
+               /* Find first entry with a real name (we could use "mask" too) */
+               while (!p->mode)
+                       p++;
+
+               /*
+                * If the directory is completely missing from the index but
+                * would otherwise be a sparse directory, we should unpack it.
+                * If not, we'll return and continue recursively traversing the
+                * tree.
+                */
+               *is_new_sparse_dir = entry_is_new_sparse_dir(info, p);
+               if (!*is_new_sparse_dir)
+                       return 0;
+       }
 
        /*
-        * When we have a sparse directory entry for src[0],
-        * then this isn't necessarily a directory-file conflict.
+        * When we are unpacking a sparse directory, then this isn't necessarily
+        * a directory-file conflict.
         */
-       if (mask == dirmask && src[0] &&
-           S_ISSPARSEDIR(src[0]->ce_mode))
+       if (mask == dirmask &&
+           (*is_new_sparse_dir || (src[0] && S_ISSPARSEDIR(src[0]->ce_mode))))
                conflicts = 0;
 
        /*
@@ -1352,7 +1436,7 @@ static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirma
 {
        struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
        struct unpack_trees_options *o = info->data;
-       int ret;
+       int ret, is_new_sparse_dir;
 
        assert(o->merge);
 
@@ -1376,7 +1460,7 @@ static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirma
         * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and
         * 'dirmask' accordingly.
         */
-       ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info);
+       ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info, &is_new_sparse_dir);
 
        if (src[0])
                discard_cache_entry(src[0]);
@@ -1394,6 +1478,7 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
        struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
        struct unpack_trees_options *o = info->data;
        const struct name_entry *p = names;
+       int is_new_sparse_dir;
 
        /* Find first entry with a real name (we could use "mask" too) */
        while (!p->mode)
@@ -1440,7 +1525,7 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
                }
        }
 
-       if (unpack_single_entry(n, mask, dirmask, src, names, info) < 0)
+       if (unpack_single_entry(n, mask, dirmask, src, names, info, &is_new_sparse_dir))
                return -1;
 
        if (o->merge && src[0]) {
@@ -1478,6 +1563,7 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
                }
 
                if (!is_sparse_directory_entry(src[0], names, info) &&
+                   !is_new_sparse_dir &&
                    traverse_trees_recursive(n, dirmask, mask & ~dirmask,
                                                    names, info) < 0) {
                        return -1;
index cfe79bd081ff75837b626f1db7e3960e6dc4e5e4..299d6489a6b0a148b57fa6c8a11f9245e1dfd0dd 100644 (file)
--- a/wrapper.c
+++ b/wrapper.c
@@ -161,28 +161,6 @@ void xsetenv(const char *name, const char *value, int overwrite)
                die_errno(_("could not setenv '%s'"), name ? name : "(null)");
 }
 
-/*
- * Limit size of IO chunks, because huge chunks only cause pain.  OS X
- * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
- * the absence of bugs, large chunks can result in bad latencies when
- * you decide to kill the process.
- *
- * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX
- * that is smaller than that, clip it to SSIZE_MAX, as a call to
- * read(2) or write(2) larger than that is allowed to fail.  As the last
- * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value"
- * to override this, if the definition of SSIZE_MAX given by the platform
- * is broken.
- */
-#ifndef MAX_IO_SIZE
-# define MAX_IO_SIZE_DEFAULT (8*1024*1024)
-# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT)
-#  define MAX_IO_SIZE SSIZE_MAX
-# else
-#  define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT
-# endif
-#endif
-
 /**
  * xopen() is the same as open(), but it die()s if the open() fails.
  */