]> git.ipfire.org Git - thirdparty/git.git/blobdiff - builtin/ls-files.c
Merge branch 'en/ort-perf-batch-9'
[thirdparty/git.git] / builtin / ls-files.c
index f6f9e483b27e183e29e9c409669c6b23d5590cfc..60a2913a01e9d00bad828fa8eca57fdb18eff787 100644 (file)
@@ -12,6 +12,7 @@
 #include "dir.h"
 #include "builtin.h"
 #include "tree.h"
+#include "cache-tree.h"
 #include "parse-options.h"
 #include "resolve-undo.h"
 #include "string-list.h"
@@ -420,6 +421,53 @@ static int get_common_prefix_len(const char *common_prefix)
        return common_prefix_len;
 }
 
+static int read_one_entry_opt(struct index_state *istate,
+                             const struct object_id *oid,
+                             struct strbuf *base,
+                             const char *pathname,
+                             unsigned mode, int opt)
+{
+       int len;
+       struct cache_entry *ce;
+
+       if (S_ISDIR(mode))
+               return READ_TREE_RECURSIVE;
+
+       len = strlen(pathname);
+       ce = make_empty_cache_entry(istate, base->len + len);
+
+       ce->ce_mode = create_ce_mode(mode);
+       ce->ce_flags = create_ce_flags(1);
+       ce->ce_namelen = base->len + len;
+       memcpy(ce->name, base->buf, base->len);
+       memcpy(ce->name + base->len, pathname, len+1);
+       oidcpy(&ce->oid, oid);
+       return add_index_entry(istate, ce, opt);
+}
+
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
+                         const char *pathname, unsigned mode,
+                         void *context)
+{
+       struct index_state *istate = context;
+       return read_one_entry_opt(istate, oid, base, pathname,
+                                 mode,
+                                 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
+                               const char *pathname, unsigned mode,
+                               void *context)
+{
+       struct index_state *istate = context;
+       return read_one_entry_opt(istate, oid, base, pathname,
+                                 mode, ADD_CACHE_JUST_APPEND);
+}
+
 /*
  * Read the tree specified with --with-tree option
  * (typically, HEAD) into stage #1 and then
@@ -436,6 +484,8 @@ void overlay_tree_on_index(struct index_state *istate,
        struct pathspec pathspec;
        struct cache_entry *last_stage0 = NULL;
        int i;
+       read_tree_fn_t fn = NULL;
+       int err;
 
        if (get_oid(tree_name, &oid))
                die("tree-ish %s not found.", tree_name);
@@ -458,9 +508,32 @@ void overlay_tree_on_index(struct index_state *istate,
                               PATHSPEC_PREFER_CWD, prefix, matchbuf);
        } else
                memset(&pathspec, 0, sizeof(pathspec));
-       if (read_tree(the_repository, tree, 1, &pathspec, istate))
+
+       /*
+        * See if we have cache entry at the stage.  If so,
+        * do it the original slow way, otherwise, append and then
+        * sort at the end.
+        */
+       for (i = 0; !fn && i < istate->cache_nr; i++) {
+               const struct cache_entry *ce = istate->cache[i];
+               if (ce_stage(ce) == 1)
+                       fn = read_one_entry;
+       }
+
+       if (!fn)
+               fn = read_one_entry_quick;
+       err = read_tree(the_repository, tree, &pathspec, fn, istate);
+       if (err)
                die("unable to read tree entries %s", tree_name);
 
+       /*
+        * Sort the cache entry -- we need to nuke the cache tree, though.
+        */
+       if (fn == read_one_entry_quick) {
+               cache_tree_free(&istate->cache_tree);
+               QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
+       }
+
        for (i = 0; i < istate->cache_nr; i++) {
                struct cache_entry *ce = istate->cache[i];
                switch (ce_stage(ce)) {