]> git.ipfire.org Git - thirdparty/git.git/commitdiff
Merge branch 'md/exclude-promisor-objects-fix'
authorJunio C Hamano <gitster@pobox.com>
Tue, 6 Nov 2018 06:50:21 +0000 (15:50 +0900)
committerJunio C Hamano <gitster@pobox.com>
Tue, 6 Nov 2018 06:50:21 +0000 (15:50 +0900)
Operations on promisor objects make sense in the context of only a
small subset of the commands that internally use the revisions
machinery, but the "--exclude-promisor-objects" option were taken
and led to nonsense results by commands like "log", to which it
didn't make much sense.  This has been corrected.

* md/exclude-promisor-objects-fix:
  exclude-promisor-objects: declare when option is allowed
  Documentation/git-log.txt: do not show --exclude-promisor-objects

1  2 
Documentation/rev-list-options.txt
builtin/pack-objects.c
builtin/prune.c
builtin/rev-list.c
revision.c
revision.h
t/t4202-log.sh

index 5f1672913b8ab19f6b2cf20d5fa79cfe6b636356,21978ebbacf838328e6e686c333f97a2bddf1d5a..bab5f50b1724913c7607180897b7f92d1517dda9
@@@ -731,11 -731,6 +731,11 @@@ the requested refs
  +
  The form '--filter=sparse:path=<path>' similarly uses a sparse-checkout
  specification contained in <path>.
 ++
 +The form '--filter=tree:<depth>' omits all blobs and trees whose depth
 +from the root tree is >= <depth> (minimum depth if an object is located
 +at multiple depths in the commits traversed). Currently, only <depth>=0
 +is supported, which omits all blobs and trees.
  
  --no-filter::
        Turn off any previous `--filter=` argument.
@@@ -761,7 -756,6 +761,6 @@@ Unexpected missing objects will raise a
  +
  The form '--missing=print' is like 'allow-any', but will also print a
  list of the missing objects.  Object IDs are prefixed with a ``?'' character.
- endif::git-rev-list[]
  
  --exclude-promisor-objects::
        (For internal use only.)  Prefilter object traversal at
        stronger than `--missing=allow-promisor` because it limits the
        traversal, rather than just silencing errors about missing
        objects.
+ endif::git-rev-list[]
  
  --no-walk[=(sorted|unsorted)]::
        Only show the given commits, but do not traverse their ancestors.
diff --combined builtin/pack-objects.c
index e50c6cd1ff25ce4d65e0ebd854d052c3d1160a2a,8ac8ca1d2609422d603b2a1f8e65027e4e8c0697..c99ee79c31a84a3f67735cb44e85c156f63ad966
@@@ -1,6 -1,5 +1,6 @@@
  #include "builtin.h"
  #include "cache.h"
 +#include "repository.h"
  #include "config.h"
  #include "attr.h"
  #include "object.h"
  #include "streaming.h"
  #include "thread-utils.h"
  #include "pack-bitmap.h"
 +#include "delta-islands.h"
  #include "reachable.h"
  #include "sha1-array.h"
  #include "argv-array.h"
  #include "list.h"
  #include "packfile.h"
 +#include "object-store.h"
 +#include "dir.h"
 +#include "midx.h"
 +
 +#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
 +#define SIZE(obj) oe_size(&to_pack, obj)
 +#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
 +#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
 +#define DELTA(obj) oe_delta(&to_pack, obj)
 +#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
 +#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
 +#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
 +#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
 +#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
 +#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
 +#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
  
  static const char *pack_usage[] = {
        N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
@@@ -61,9 -43,7 +61,9 @@@
  static struct packing_data to_pack;
  
  static struct pack_idx_entry **written_list;
 -static uint32_t nr_result, nr_written;
 +static uint32_t nr_result, nr_written, nr_seen;
 +static struct bitmap_index *bitmap_git;
 +static uint32_t write_layer;
  
  static int non_empty;
  static int reuse_delta = 1, reuse_object = 1;
@@@ -73,8 -53,7 +73,8 @@@ static int pack_loose_unreachable
  static int local;
  static int have_non_local_packs;
  static int incremental;
 -static int ignore_packed_keep;
 +static int ignore_packed_keep_on_disk;
 +static int ignore_packed_keep_in_core;
  static int allow_ofs_delta;
  static struct pack_idx_option pack_idx_opts;
  static const char *base_name;
@@@ -84,7 -63,6 +84,7 @@@ static unsigned long pack_size_limit
  static int depth = 50;
  static int delta_search_threads;
  static int pack_to_stdout;
 +static int thin;
  static int num_preferred_base;
  static struct progress *progress_state;
  
@@@ -99,10 -77,8 +99,10 @@@ static uint16_t write_bitmap_options
  
  static int exclude_promisor_objects;
  
 +static int use_delta_islands;
 +
  static unsigned long delta_cache_size = 0;
 -static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
 +static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
  static unsigned long cache_max_small_delta_size = 1000;
  
  static unsigned long window_memory_limit = 0;
@@@ -146,23 -122,18 +146,23 @@@ static void *get_delta(struct object_en
        void *buf, *base_buf, *delta_buf;
        enum object_type type;
  
 -      buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
 +      buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
 -              die("unable to read %s", oid_to_hex(&entry->idx.oid));
 -      base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
 -                                &base_size);
 +              die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
 +      base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
 +                                  &base_size);
        if (!base_buf)
                die("unable to read %s",
 -                  oid_to_hex(&entry->delta->idx.oid));
 +                  oid_to_hex(&DELTA(entry)->idx.oid));
        delta_buf = diff_delta(base_buf, base_size,
                               buf, size, &delta_size, 0);
 -      if (!delta_buf || delta_size != entry->delta_size)
 -              die("delta size changed");
 +      /*
 +       * We succesfully computed this delta once but dropped it for
 +       * memory reasons. Something is very wrong if this time we
 +       * recompute and create a different delta.
 +       */
 +      if (!delta_buf || delta_size != DELTA_SIZE(entry))
 +              BUG("delta size changed");
        free(buf);
        free(base_buf);
        return delta_buf;
@@@ -292,15 -263,15 +292,15 @@@ static unsigned long write_no_reuse_obj
        enum object_type type;
        void *buf;
        struct git_istream *st = NULL;
 +      const unsigned hashsz = the_hash_algo->rawsz;
  
        if (!usable_delta) {
 -              if (entry->type == OBJ_BLOB &&
 -                  entry->size > big_file_threshold &&
 -                  (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
 +              if (oe_type(entry) == OBJ_BLOB &&
 +                  oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
 +                  (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
                        buf = NULL;
                else {
 -                      buf = read_sha1_file(entry->idx.oid.hash, &type,
 -                                           &size);
 +                      buf = read_object_file(&entry->idx.oid, &type, &size);
                        if (!buf)
                                die(_("unable to read %s"),
                                    oid_to_hex(&entry->idx.oid));
                FREE_AND_NULL(entry->delta_data);
                entry->z_delta_size = 0;
        } else if (entry->delta_data) {
 -              size = entry->delta_size;
 +              size = DELTA_SIZE(entry);
                buf = entry->delta_data;
                entry->delta_data = NULL;
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        } else {
                buf = get_delta(entry);
 -              size = entry->delta_size;
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +              size = DELTA_SIZE(entry);
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        }
  
                 * encoding of the relative offset for the delta
                 * base from this object's position in the pack.
                 */
 -              off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 +              off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
                        dheader[--pos] = 128 | (--ofs & 127);
 -              if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 +              if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
                        if (st)
                                close_istream(st);
                        free(buf);
        } else if (type == OBJ_REF_DELTA) {
                /*
                 * Deltas with a base reference contain
 -               * an additional 20 bytes for the base sha1.
 +               * additional bytes for the base object ID.
                 */
 -              if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 +              if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
                        if (st)
                                close_istream(st);
                        free(buf);
                        return 0;
                }
                hashwrite(f, header, hdrlen);
 -              hashwrite(f, entry->delta->idx.oid.hash, 20);
 -              hdrlen += 20;
 +              hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
 +              hdrlen += hashsz;
        } else {
 -              if (limit && hdrlen + datalen + 20 >= limit) {
 +              if (limit && hdrlen + datalen + hashsz >= limit) {
                        if (st)
                                close_istream(st);
                        free(buf);
  static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
  {
 -      struct packed_git *p = entry->in_pack;
 +      struct packed_git *p = IN_PACK(entry);
        struct pack_window *w_curs = NULL;
        struct revindex_entry *revidx;
        off_t offset;
 -      enum object_type type = entry->type;
 +      enum object_type type = oe_type(entry);
        off_t datalen;
        unsigned char header[MAX_PACK_OBJECT_HEADER],
                      dheader[MAX_PACK_OBJECT_HEADER];
        unsigned hdrlen;
 +      const unsigned hashsz = the_hash_algo->rawsz;
 +      unsigned long entry_size = SIZE(entry);
  
 -      if (entry->delta)
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +      if (DELTA(entry))
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        hdrlen = encode_in_pack_object_header(header, sizeof(header),
 -                                            type, entry->size);
 +                                            type, entry_size);
  
        offset = entry->in_pack_offset;
        revidx = find_pack_revindex(p, offset);
        datalen = revidx[1].offset - offset;
        if (!pack_to_stdout && p->index_version > 1 &&
            check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
 -              error("bad packed object CRC for %s",
 +              error(_("bad packed object CRC for %s"),
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
                return write_no_reuse_object(f, entry, limit, usable_delta);
        datalen -= entry->in_pack_header_size;
  
        if (!pack_to_stdout && p->index_version == 1 &&
 -          check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
 -              error("corrupt packed object for %s",
 +          check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
 +              error(_("corrupt packed object for %s"),
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
                return write_no_reuse_object(f, entry, limit, usable_delta);
        }
  
        if (type == OBJ_OFS_DELTA) {
 -              off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 +              off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
                        dheader[--pos] = 128 | (--ofs & 127);
 -              if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 +              if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
                        unuse_pack(&w_curs);
                        return 0;
                }
                hdrlen += sizeof(dheader) - pos;
                reused_delta++;
        } else if (type == OBJ_REF_DELTA) {
 -              if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 +              if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
                        unuse_pack(&w_curs);
                        return 0;
                }
                hashwrite(f, header, hdrlen);
 -              hashwrite(f, entry->delta->idx.oid.hash, 20);
 -              hdrlen += 20;
 +              hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
 +              hdrlen += hashsz;
                reused_delta++;
        } else {
 -              if (limit && hdrlen + datalen + 20 >= limit) {
 +              if (limit && hdrlen + datalen + hashsz >= limit) {
                        unuse_pack(&w_curs);
                        return 0;
                }
@@@ -496,29 -465,28 +496,29 @@@ static off_t write_object(struct hashfi
        else
                limit = pack_size_limit - write_offset;
  
 -      if (!entry->delta)
 +      if (!DELTA(entry))
                usable_delta = 0;       /* no delta */
        else if (!pack_size_limit)
               usable_delta = 1;        /* unlimited packfile */
 -      else if (entry->delta->idx.offset == (off_t)-1)
 +      else if (DELTA(entry)->idx.offset == (off_t)-1)
                usable_delta = 0;       /* base was written to another pack */
 -      else if (entry->delta->idx.offset)
 +      else if (DELTA(entry)->idx.offset)
                usable_delta = 1;       /* base already exists in this pack */
        else
                usable_delta = 0;       /* base could end up in another pack */
  
        if (!reuse_object)
                to_reuse = 0;   /* explicit */
 -      else if (!entry->in_pack)
 +      else if (!IN_PACK(entry))
                to_reuse = 0;   /* can't reuse what we don't have */
 -      else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
 +      else if (oe_type(entry) == OBJ_REF_DELTA ||
 +               oe_type(entry) == OBJ_OFS_DELTA)
                                /* check_object() decided it for us ... */
                to_reuse = usable_delta;
                                /* ... but pack split may override that */
 -      else if (entry->type != entry->in_pack_type)
 +      else if (oe_type(entry) != entry->in_pack_type)
                to_reuse = 0;   /* pack has delta which is unusable */
 -      else if (entry->delta)
 +      else if (DELTA(entry))
                to_reuse = 0;   /* we want to pack afresh */
        else
                to_reuse = 1;   /* we have it in-pack undeltified,
@@@ -561,7 -529,7 +561,7 @@@ static enum write_one_status write_one(
         */
        recursing = (e->idx.offset == 1);
        if (recursing) {
 -              warning("recursive delta detected for object %s",
 +              warning(_("recursive delta detected for object %s"),
                        oid_to_hex(&e->idx.oid));
                return WRITE_ONE_RECURSIVE;
        } else if (e->idx.offset || e->preferred_base) {
        }
  
        /* if we are deltified, write out base object first. */
 -      if (e->delta) {
 +      if (DELTA(e)) {
                e->idx.offset = 1; /* now recurse */
 -              switch (write_one(f, e->delta, offset)) {
 +              switch (write_one(f, DELTA(e), offset)) {
                case WRITE_ONE_RECURSIVE:
                        /* we cannot depend on this one */
 -                      e->delta = NULL;
 +                      SET_DELTA(e, NULL);
                        break;
                default:
                        break;
  
        /* make sure off_t is sufficiently large not to wrap */
        if (signed_add_overflows(*offset, size))
 -              die("pack too large for current definition of off_t");
 +              die(_("pack too large for current definition of off_t"));
        *offset += size;
        return WRITE_ONE_WRITTEN;
  }
@@@ -620,7 -588,7 +620,7 @@@ static inline void add_to_write_order(s
                               unsigned int *endp,
                               struct object_entry *e)
  {
 -      if (e->filled)
 +      if (e->filled || oe_layer(&to_pack, e) != write_layer)
                return;
        wo[(*endp)++] = e;
        e->filled = 1;
@@@ -637,34 -605,34 +637,34 @@@ static void add_descendants_to_write_or
                        /* add this node... */
                        add_to_write_order(wo, endp, e);
                        /* all its siblings... */
 -                      for (s = e->delta_sibling; s; s = s->delta_sibling) {
 +                      for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
                                add_to_write_order(wo, endp, s);
                        }
                }
                /* drop down a level to add left subtree nodes if possible */
 -              if (e->delta_child) {
 +              if (DELTA_CHILD(e)) {
                        add_to_order = 1;
 -                      e = e->delta_child;
 +                      e = DELTA_CHILD(e);
                } else {
                        add_to_order = 0;
                        /* our sibling might have some children, it is next */
 -                      if (e->delta_sibling) {
 -                              e = e->delta_sibling;
 +                      if (DELTA_SIBLING(e)) {
 +                              e = DELTA_SIBLING(e);
                                continue;
                        }
                        /* go back to our parent node */
 -                      e = e->delta;
 -                      while (e && !e->delta_sibling) {
 +                      e = DELTA(e);
 +                      while (e && !DELTA_SIBLING(e)) {
                                /* we're on the right side of a subtree, keep
                                 * going up until we can go right again */
 -                              e = e->delta;
 +                              e = DELTA(e);
                        }
                        if (!e) {
                                /* done- we hit our original root node */
                                return;
                        }
                        /* pass it off to sibling at this level */
 -                      e = e->delta_sibling;
 +                      e = DELTA_SIBLING(e);
                }
        };
  }
@@@ -675,20 -643,53 +675,20 @@@ static void add_family_to_write_order(s
  {
        struct object_entry *root;
  
 -      for (root = e; root->delta; root = root->delta)
 +      for (root = e; DELTA(root); root = DELTA(root))
                ; /* nothing */
        add_descendants_to_write_order(wo, endp, root);
  }
  
 -static struct object_entry **compute_write_order(void)
 +static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
  {
 -      unsigned int i, wo_end, last_untagged;
 -
 -      struct object_entry **wo;
 +      unsigned int i, last_untagged;
        struct object_entry *objects = to_pack.objects;
  
        for (i = 0; i < to_pack.nr_objects; i++) {
 -              objects[i].tagged = 0;
 -              objects[i].filled = 0;
 -              objects[i].delta_child = NULL;
 -              objects[i].delta_sibling = NULL;
 -      }
 -
 -      /*
 -       * Fully connect delta_child/delta_sibling network.
 -       * Make sure delta_sibling is sorted in the original
 -       * recency order.
 -       */
 -      for (i = to_pack.nr_objects; i > 0;) {
 -              struct object_entry *e = &objects[--i];
 -              if (!e->delta)
 -                      continue;
 -              /* Mark me as the first child */
 -              e->delta_sibling = e->delta->delta_child;
 -              e->delta->delta_child = e;
 -      }
 -
 -      /*
 -       * Mark objects that are at the tip of tags.
 -       */
 -      for_each_tag_ref(mark_tagged, NULL);
 -
 -      /*
 -       * Give the objects in the original recency order until
 -       * we see a tagged tip.
 -       */
 -      ALLOC_ARRAY(wo, to_pack.nr_objects);
 -      for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
                if (objects[i].tagged)
                        break;
 -              add_to_write_order(wo, &wo_end, &objects[i]);
 +              add_to_write_order(wo, wo_end, &objects[i]);
        }
        last_untagged = i;
  
         */
        for (; i < to_pack.nr_objects; i++) {
                if (objects[i].tagged)
 -                      add_to_write_order(wo, &wo_end, &objects[i]);
 +                      add_to_write_order(wo, wo_end, &objects[i]);
        }
  
        /*
         * And then all remaining commits and tags.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 -              if (objects[i].type != OBJ_COMMIT &&
 -                  objects[i].type != OBJ_TAG)
 +              if (oe_type(&objects[i]) != OBJ_COMMIT &&
 +                  oe_type(&objects[i]) != OBJ_TAG)
                        continue;
 -              add_to_write_order(wo, &wo_end, &objects[i]);
 +              add_to_write_order(wo, wo_end, &objects[i]);
        }
  
        /*
         * And then all the trees.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 -              if (objects[i].type != OBJ_TREE)
 +              if (oe_type(&objects[i]) != OBJ_TREE)
                        continue;
 -              add_to_write_order(wo, &wo_end, &objects[i]);
 +              add_to_write_order(wo, wo_end, &objects[i]);
        }
  
        /*
         * Finally all the rest in really tight order
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 -              if (!objects[i].filled)
 -                      add_family_to_write_order(wo, &wo_end, &objects[i]);
 +              if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
 +                      add_family_to_write_order(wo, wo_end, &objects[i]);
 +      }
 +}
 +
 +static struct object_entry **compute_write_order(void)
 +{
 +      uint32_t max_layers = 1;
 +      unsigned int i, wo_end;
 +
 +      struct object_entry **wo;
 +      struct object_entry *objects = to_pack.objects;
 +
 +      for (i = 0; i < to_pack.nr_objects; i++) {
 +              objects[i].tagged = 0;
 +              objects[i].filled = 0;
 +              SET_DELTA_CHILD(&objects[i], NULL);
 +              SET_DELTA_SIBLING(&objects[i], NULL);
 +      }
 +
 +      /*
 +       * Fully connect delta_child/delta_sibling network.
 +       * Make sure delta_sibling is sorted in the original
 +       * recency order.
 +       */
 +      for (i = to_pack.nr_objects; i > 0;) {
 +              struct object_entry *e = &objects[--i];
 +              if (!DELTA(e))
 +                      continue;
 +              /* Mark me as the first child */
 +              e->delta_sibling_idx = DELTA(e)->delta_child_idx;
 +              SET_DELTA_CHILD(DELTA(e), e);
        }
  
 +      /*
 +       * Mark objects that are at the tip of tags.
 +       */
 +      for_each_tag_ref(mark_tagged, NULL);
 +
 +      if (use_delta_islands)
 +              max_layers = compute_pack_layers(&to_pack);
 +
 +      ALLOC_ARRAY(wo, to_pack.nr_objects);
 +      wo_end = 0;
 +
 +      for (; write_layer < max_layers; ++write_layer)
 +              compute_layer_order(wo, &wo_end);
 +
        if (wo_end != to_pack.nr_objects)
 -              die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
 +              die(_("ordered %u objects, expected %"PRIu32),
 +                  wo_end, to_pack.nr_objects);
  
        return wo;
  }
@@@ -785,18 -741,18 +785,18 @@@ static off_t write_reused_pack(struct h
        int fd;
  
        if (!is_pack_valid(reuse_packfile))
 -              die("packfile is invalid: %s", reuse_packfile->pack_name);
 +              die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
  
        fd = git_open(reuse_packfile->pack_name);
        if (fd < 0)
 -              die_errno("unable to open packfile for reuse: %s",
 +              die_errno(_("unable to open packfile for reuse: %s"),
                          reuse_packfile->pack_name);
  
        if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
 -              die_errno("unable to seek in reused packfile");
 +              die_errno(_("unable to seek in reused packfile"));
  
        if (reuse_packfile_offset < 0)
 -              reuse_packfile_offset = reuse_packfile->pack_size - 20;
 +              reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
  
        total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
  
                int read_pack = xread(fd, buffer, sizeof(buffer));
  
                if (read_pack <= 0)
 -                      die_errno("unable to read from reused packfile");
 +                      die_errno(_("unable to read from reused packfile"));
  
                if (read_pack > to_write)
                        read_pack = to_write;
@@@ -881,11 -837,11 +881,11 @@@ static void write_pack_file(void
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
 -                      hashclose(f, oid.hash, CSUM_CLOSE);
 +                      finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
 -                      hashclose(f, oid.hash, CSUM_FSYNC);
 +                      finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
                } else {
 -                      int fd = hashclose(f, oid.hash, 0);
 +                      int fd = finalize_hashfile(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);
                         * to preserve this property.
                         */
                        if (stat(pack_tmp_name, &st) < 0) {
 -                              warning_errno("failed to stat %s", pack_tmp_name);
 +                              warning_errno(_("failed to stat %s"), pack_tmp_name);
                        } else if (!last_mtime) {
                                last_mtime = st.st_mtime;
                        } else {
                                utb.actime = st.st_atime;
                                utb.modtime = --last_mtime;
                                if (utime(pack_tmp_name, &utb) < 0)
 -                                      warning_errno("failed utime() on %s", pack_tmp_name);
 +                                      warning_errno(_("failed utime() on %s"), pack_tmp_name);
                        }
  
                        strbuf_addf(&tmpname, "%s-", base_name);
  
                        if (write_bitmap_index) {
                                bitmap_writer_set_checksum(oid.hash);
 -                              bitmap_writer_build_type_index(written_list, nr_written);
 +                              bitmap_writer_build_type_index(
 +                                      &to_pack, written_list, nr_written);
                        }
  
                        finish_tmp_packfile(&tmpname, pack_tmp_name,
        free(write_order);
        stop_progress(&progress_state);
        if (written != nr_result)
 -              die("wrote %"PRIu32" objects while expecting %"PRIu32,
 -                      written, nr_result);
 +              die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
 +                  written, nr_result);
  }
  
  static int no_try_delta(const char *path)
  
        if (!check)
                check = attr_check_initl("delta", NULL);
 -      if (git_check_attr(path, check))
 -              return 0;
 +      git_check_attr(&the_index, path, check);
        if (ATTR_FALSE(check->items[0].value))
                return 1;
        return 0;
@@@ -1026,16 -982,13 +1026,16 @@@ static int want_found_object(int exclud
         * Otherwise, we signal "-1" at the end to tell the caller that we do
         * not know either way, and it needs to check more packs.
         */
 -      if (!ignore_packed_keep &&
 +      if (!ignore_packed_keep_on_disk &&
 +          !ignore_packed_keep_in_core &&
            (!local || !have_non_local_packs))
                return 1;
  
        if (local && !p->pack_local)
                return 0;
 -      if (ignore_packed_keep && p->pack_local && p->pack_keep)
 +      if (p->pack_local &&
 +          ((ignore_packed_keep_on_disk && p->pack_keep) ||
 +           (ignore_packed_keep_in_core && p->pack_keep_in_core)))
                return 0;
  
        /* we don't know yet; keep looking for more packs */
@@@ -1058,9 -1011,8 +1058,9 @@@ static int want_object_in_pack(const st
  {
        int want;
        struct list_head *pos;
 +      struct multi_pack_index *m;
  
 -      if (!exclude && local && has_loose_object_nonlocal(oid->hash))
 +      if (!exclude && local && has_loose_object_nonlocal(oid))
                return 0;
  
        /*
                        return want;
        }
  
 -      list_for_each(pos, &packed_git_mru) {
 +      for (m = get_multi_pack_index(the_repository); m; m = m->next) {
 +              struct pack_entry e;
 +              if (fill_midx_entry(oid, &e, m)) {
 +                      struct packed_git *p = e.p;
 +                      off_t offset;
 +
 +                      if (p == *found_pack)
 +                              offset = *found_offset;
 +                      else
 +                              offset = find_pack_entry_one(oid->hash, p);
 +
 +                      if (offset) {
 +                              if (!*found_pack) {
 +                                      if (!is_pack_valid(p))
 +                                              continue;
 +                                      *found_offset = offset;
 +                                      *found_pack = p;
 +                              }
 +                              want = want_found_object(exclude, p);
 +                              if (want != -1)
 +                                      return want;
 +                      }
 +              }
 +      }
 +
 +      list_for_each(pos, get_packed_git_mru(the_repository)) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                off_t offset;
  
                        }
                        want = want_found_object(exclude, p);
                        if (!exclude && want > 0)
 -                              list_move(&p->mru, &packed_git_mru);
 +                              list_move(&p->mru,
 +                                        get_packed_git_mru(the_repository));
                        if (want != -1)
                                return want;
                }
@@@ -1140,13 -1066,14 +1140,13 @@@ static void create_object_entry(const s
  
        entry = packlist_alloc(&to_pack, oid->hash, index_pos);
        entry->hash = hash;
 -      if (type)
 -              entry->type = type;
 +      oe_set_type(entry, type);
        if (exclude)
                entry->preferred_base = 1;
        else
                nr_result++;
        if (found_pack) {
 -              entry->in_pack = found_pack;
 +              oe_set_in_pack(&to_pack, entry, found_pack);
                entry->in_pack_offset = found_offset;
        }
  
@@@ -1164,8 -1091,6 +1164,8 @@@ static int add_object_entry(const struc
        off_t found_offset = 0;
        uint32_t index_pos;
  
 +      display_progress(progress_state, ++nr_seen);
 +
        if (have_duplicate_entry(oid, exclude, &index_pos))
                return 0;
  
        create_object_entry(oid, type, pack_name_hash(name),
                            exclude, name && no_try_delta(name),
                            index_pos, found_pack, found_offset);
 -
 -      display_progress(progress_state, nr_result);
        return 1;
  }
  
@@@ -1191,8 -1118,6 +1191,8 @@@ static int add_object_entry_from_bitmap
  {
        uint32_t index_pos;
  
 +      display_progress(progress_state, ++nr_seen);
 +
        if (have_duplicate_entry(oid, 0, &index_pos))
                return 0;
  
                return 0;
  
        create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
 -
 -      display_progress(progress_state, nr_result);
        return 1;
  }
  
@@@ -1247,7 -1174,7 +1247,7 @@@ static struct pbase_tree_cache *pbase_t
         */
        for (neigh = 0; neigh < 8; neigh++) {
                ent = pbase_tree_cache[my_ix];
 -              if (ent && !oidcmp(&ent->oid, oid)) {
 +              if (ent && oideq(&ent->oid, oid)) {
                        ent->ref++;
                        return ent;
                }
        /* Did not find one.  Either we got a bogus request or
         * we need to read and perhaps cache.
         */
 -      data = read_sha1_file(oid->hash, &type, &size);
 +      data = read_object_file(oid, &type, &size);
        if (!data)
                return NULL;
        if (type != OBJ_TREE) {
@@@ -1424,12 -1351,12 +1424,12 @@@ static void add_preferred_base(struct o
        if (window <= num_preferred_base++)
                return;
  
 -      data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
 +      data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
        if (!data)
                return;
  
        for (it = pbase_tree; it; it = it->next) {
 -              if (!oidcmp(&it->pcache.oid, &tree_oid)) {
 +              if (oideq(&it->pcache.oid, &tree_oid)) {
                        free(data);
                        return;
                }
@@@ -1469,63 -1396,10 +1469,63 @@@ static void cleanup_preferred_base(void
        done_pbase_paths_num = done_pbase_paths_alloc = 0;
  }
  
 +/*
 + * Return 1 iff the object specified by "delta" can be sent
 + * literally as a delta against the base in "base_sha1". If
 + * so, then *base_out will point to the entry in our packing
 + * list, or NULL if we must use the external-base list.
 + *
 + * Depth value does not matter - find_deltas() will
 + * never consider reused delta as the base object to
 + * deltify other objects against, in order to avoid
 + * circular deltas.
 + */
 +static int can_reuse_delta(const unsigned char *base_sha1,
 +                         struct object_entry *delta,
 +                         struct object_entry **base_out)
 +{
 +      struct object_entry *base;
 +
 +      if (!base_sha1)
 +              return 0;
 +
 +      /*
 +       * First see if we're already sending the base (or it's explicitly in
 +       * our "excluded" list).
 +       */
 +      base = packlist_find(&to_pack, base_sha1, NULL);
 +      if (base) {
 +              if (!in_same_island(&delta->idx.oid, &base->idx.oid))
 +                      return 0;
 +              *base_out = base;
 +              return 1;
 +      }
 +
 +      /*
 +       * Otherwise, reachability bitmaps may tell us if the receiver has it,
 +       * even if it was buried too deep in history to make it into the
 +       * packing list.
 +       */
 +      if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
 +              if (use_delta_islands) {
 +                      struct object_id base_oid;
 +                      hashcpy(base_oid.hash, base_sha1);
 +                      if (!in_same_island(&delta->idx.oid, &base_oid))
 +                              return 0;
 +              }
 +              *base_out = NULL;
 +              return 1;
 +      }
 +
 +      return 0;
 +}
 +
  static void check_object(struct object_entry *entry)
  {
 -      if (entry->in_pack) {
 -              struct packed_git *p = entry->in_pack;
 +      unsigned long canonical_size;
 +
 +      if (IN_PACK(entry)) {
 +              struct packed_git *p = IN_PACK(entry);
                struct pack_window *w_curs = NULL;
                const unsigned char *base_ref = NULL;
                struct object_entry *base_entry;
                unsigned long avail;
                off_t ofs;
                unsigned char *buf, c;
 +              enum object_type type;
 +              unsigned long in_pack_size;
  
                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
  
                 * since non-delta representations could still be reused.
                 */
                used = unpack_object_header_buffer(buf, avail,
 -                                                 &entry->in_pack_type,
 -                                                 &entry->size);
 +                                                 &type,
 +                                                 &in_pack_size);
                if (used == 0)
                        goto give_up;
  
 +              if (type < 0)
 +                      BUG("invalid type %d", type);
 +              entry->in_pack_type = type;
 +
                /*
                 * Determine if this is a delta and if so whether we can
                 * reuse it or not.  Otherwise let's find out as cheaply as
                switch (entry->in_pack_type) {
                default:
                        /* Not a delta hence we've already got all we need. */
 -                      entry->type = entry->in_pack_type;
 +                      oe_set_type(entry, entry->in_pack_type);
 +                      SET_SIZE(entry, in_pack_size);
                        entry->in_pack_header_size = used;
 -                      if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
 +                      if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
                                goto give_up;
                        unuse_pack(&w_curs);
                        return;
                        if (reuse_delta && !entry->preferred_base)
                                base_ref = use_pack(p, &w_curs,
                                                entry->in_pack_offset + used, NULL);
 -                      entry->in_pack_header_size = used + 20;
 +                      entry->in_pack_header_size = used + the_hash_algo->rawsz;
                        break;
                case OBJ_OFS_DELTA:
                        buf = use_pack(p, &w_curs,
                        while (c & 128) {
                                ofs += 1;
                                if (!ofs || MSB(ofs, 7)) {
 -                                      error("delta base offset overflow in pack for %s",
 +                                      error(_("delta base offset overflow in pack for %s"),
                                              oid_to_hex(&entry->idx.oid));
                                        goto give_up;
                                }
                        }
                        ofs = entry->in_pack_offset - ofs;
                        if (ofs <= 0 || ofs >= entry->in_pack_offset) {
 -                              error("delta base offset out of bound for %s",
 +                              error(_("delta base offset out of bound for %s"),
                                      oid_to_hex(&entry->idx.oid));
                                goto give_up;
                        }
                        break;
                }
  
 -              if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
 -                      /*
 -                       * If base_ref was set above that means we wish to
 -                       * reuse delta data, and we even found that base
 -                       * in the list of objects we want to pack. Goodie!
 -                       *
 -                       * Depth value does not matter - find_deltas() will
 -                       * never consider reused delta as the base object to
 -                       * deltify other objects against, in order to avoid
 -                       * circular deltas.
 -                       */
 -                      entry->type = entry->in_pack_type;
 -                      entry->delta = base_entry;
 -                      entry->delta_size = entry->size;
 -                      entry->delta_sibling = base_entry->delta_child;
 -                      base_entry->delta_child = entry;
 +              if (can_reuse_delta(base_ref, entry, &base_entry)) {
 +                      oe_set_type(entry, entry->in_pack_type);
 +                      SET_SIZE(entry, in_pack_size); /* delta size */
 +                      SET_DELTA_SIZE(entry, in_pack_size);
 +
 +                      if (base_entry) {
 +                              SET_DELTA(entry, base_entry);
 +                              entry->delta_sibling_idx = base_entry->delta_child_idx;
 +                              SET_DELTA_CHILD(base_entry, entry);
 +                      } else {
 +                              SET_DELTA_EXT(entry, base_ref);
 +                      }
 +
                        unuse_pack(&w_curs);
                        return;
                }
  
 -              if (entry->type) {
 +              if (oe_type(entry)) {
 +                      off_t delta_pos;
 +
                        /*
                         * This must be a delta and we already know what the
                         * final object type is.  Let's extract the actual
                         * object size from the delta header.
                         */
 -                      entry->size = get_size_from_delta(p, &w_curs,
 -                                      entry->in_pack_offset + entry->in_pack_header_size);
 -                      if (entry->size == 0)
 +                      delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
 +                      canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
 +                      if (canonical_size == 0)
                                goto give_up;
 +                      SET_SIZE(entry, canonical_size);
                        unuse_pack(&w_curs);
                        return;
                }
                unuse_pack(&w_curs);
        }
  
 -      entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
 -      /*
 -       * The error condition is checked in prepare_pack().  This is
 -       * to permit a missing preferred base object to be ignored
 -       * as a preferred base.  Doing so can result in a larger
 -       * pack file, but the transfer will still take place.
 -       */
 +      oe_set_type(entry,
 +                  oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
 +      if (entry->type_valid) {
 +              SET_SIZE(entry, canonical_size);
 +      } else {
 +              /*
 +               * Bad object type is checked in prepare_pack().  This is
 +               * to permit a missing preferred base object to be ignored
 +               * as a preferred base.  Doing so can result in a larger
 +               * pack file, but the transfer will still take place.
 +               */
 +      }
  }
  
  static int pack_offset_sort(const void *_a, const void *_b)
  {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
 +      const struct packed_git *a_in_pack = IN_PACK(a);
 +      const struct packed_git *b_in_pack = IN_PACK(b);
  
        /* avoid filesystem trashing with loose objects */
 -      if (!a->in_pack && !b->in_pack)
 +      if (!a_in_pack && !b_in_pack)
                return oidcmp(&a->idx.oid, &b->idx.oid);
  
 -      if (a->in_pack < b->in_pack)
 +      if (a_in_pack < b_in_pack)
                return -1;
 -      if (a->in_pack > b->in_pack)
 +      if (a_in_pack > b_in_pack)
                return 1;
        return a->in_pack_offset < b->in_pack_offset ? -1 :
                        (a->in_pack_offset > b->in_pack_offset);
   */
  static void drop_reused_delta(struct object_entry *entry)
  {
 -      struct object_entry **p = &entry->delta->delta_child;
 +      unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
        struct object_info oi = OBJECT_INFO_INIT;
 +      enum object_type type;
 +      unsigned long size;
  
 -      while (*p) {
 -              if (*p == entry)
 -                      *p = (*p)->delta_sibling;
 +      while (*idx) {
 +              struct object_entry *oe = &to_pack.objects[*idx - 1];
 +
 +              if (oe == entry)
 +                      *idx = oe->delta_sibling_idx;
                else
 -                      p = &(*p)->delta_sibling;
 +                      idx = &oe->delta_sibling_idx;
        }
 -      entry->delta = NULL;
 +      SET_DELTA(entry, NULL);
        entry->depth = 0;
  
 -      oi.sizep = &entry->size;
 -      oi.typep = &entry->type;
 -      if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
 +      oi.sizep = &size;
 +      oi.typep = &type;
 +      if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
                /*
                 * We failed to get the info from this pack for some reason;
                 * fall back to sha1_object_info, which may find another copy.
 -               * And if that fails, the error will be recorded in entry->type
 +               * And if that fails, the error will be recorded in oe_type(entry)
                 * and dealt with in prepare_pack().
                 */
 -              entry->type = sha1_object_info(entry->idx.oid.hash,
 -                                             &entry->size);
 +              oe_set_type(entry,
 +                          oid_object_info(the_repository, &entry->idx.oid, &size));
 +      } else {
 +              oe_set_type(entry, type);
        }
 +      SET_SIZE(entry, size);
  }
  
  /*
@@@ -1751,7 -1604,7 +1751,7 @@@ static void break_delta_chains(struct o
  
        for (cur = entry, total_depth = 0;
             cur;
 -           cur = cur->delta, total_depth++) {
 +           cur = DELTA(cur), total_depth++) {
                if (cur->dfs_state == DFS_DONE) {
                        /*
                         * We've already seen this object and know it isn't
                 * is a bug.
                 */
                if (cur->dfs_state != DFS_NONE)
 -                      die("BUG: confusing delta dfs state in first pass: %d",
 +                      BUG("confusing delta dfs state in first pass: %d",
                            cur->dfs_state);
  
                /*
                 * it's not a delta, we're done traversing, but we'll mark it
                 * done to save time on future traversals.
                 */
 -              if (!cur->delta) {
 +              if (!DELTA(cur)) {
                        cur->dfs_state = DFS_DONE;
                        break;
                }
                 * We keep all commits in the chain that we examined.
                 */
                cur->dfs_state = DFS_ACTIVE;
 -              if (cur->delta->dfs_state == DFS_ACTIVE) {
 +              if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
                        drop_reused_delta(cur);
                        cur->dfs_state = DFS_DONE;
                        break;
         * an extra "next" pointer to keep going after we reset cur->delta.
         */
        for (cur = entry; cur; cur = next) {
 -              next = cur->delta;
 +              next = DELTA(cur);
  
                /*
                 * We should have a chain of zero or more ACTIVE states down to
                if (cur->dfs_state == DFS_DONE)
                        break;
                else if (cur->dfs_state != DFS_ACTIVE)
 -                      die("BUG: confusing delta dfs state in second pass: %d",
 +                      BUG("confusing delta dfs state in second pass: %d",
                            cur->dfs_state);
  
                /*
@@@ -1859,10 -1712,6 +1859,10 @@@ static void get_object_details(void
        uint32_t i;
        struct object_entry **sorted_by_offset;
  
 +      if (progress)
 +              progress_state = start_progress(_("Counting objects"),
 +                                              to_pack.nr_objects);
 +
        sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
        for (i = 0; i < to_pack.nr_objects; i++)
                sorted_by_offset[i] = to_pack.objects + i;
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = sorted_by_offset[i];
                check_object(entry);
 -              if (big_file_threshold < entry->size)
 +              if (entry->type_valid &&
 +                  oe_size_greater_than(&to_pack, entry, big_file_threshold))
                        entry->no_try_delta = 1;
 +              display_progress(progress_state, i + 1);
        }
 +      stop_progress(&progress_state);
  
        /*
         * This must happen in a second pass, since we rely on the delta
@@@ -1901,14 -1747,10 +1901,14 @@@ static int type_size_sort(const void *_
  {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
 +      enum object_type a_type = oe_type(a);
 +      enum object_type b_type = oe_type(b);
 +      unsigned long a_size = SIZE(a);
 +      unsigned long b_size = SIZE(b);
  
 -      if (a->type > b->type)
 +      if (a_type > b_type)
                return -1;
 -      if (a->type < b->type)
 +      if (a_type < b_type)
                return 1;
        if (a->hash > b->hash)
                return -1;
                return -1;
        if (a->preferred_base < b->preferred_base)
                return 1;
 -      if (a->size > b->size)
 +      if (use_delta_islands) {
 +              int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
 +              if (island_cmp)
 +                      return island_cmp;
 +      }
 +      if (a_size > b_size)
                return -1;
 -      if (a->size < b->size)
 +      if (a_size < b_size)
                return 1;
        return a < b ? -1 : (a > b);  /* newest first */
  }
@@@ -1955,30 -1792,18 +1955,30 @@@ static int delta_cacheable(unsigned lon
  
  #ifndef NO_PTHREADS
  
 +/* Protect access to object database */
  static pthread_mutex_t read_mutex;
  #define read_lock()           pthread_mutex_lock(&read_mutex)
  #define read_unlock()         pthread_mutex_unlock(&read_mutex)
  
 +/* Protect delta_cache_size */
  static pthread_mutex_t cache_mutex;
  #define cache_lock()          pthread_mutex_lock(&cache_mutex)
  #define cache_unlock()                pthread_mutex_unlock(&cache_mutex)
  
 +/*
 + * Protect object list partitioning (e.g. struct thread_param) and
 + * progress_state
 + */
  static pthread_mutex_t progress_mutex;
  #define progress_lock()               pthread_mutex_lock(&progress_mutex)
  #define progress_unlock()     pthread_mutex_unlock(&progress_mutex)
  
 +/*
 + * Access to struct object_entry is unprotected since each thread owns
 + * a portion of the main object list. Just don't access object entries
 + * ahead in the list because they can be stolen and would need
 + * progress_mutex for protection.
 + */
  #else
  
  #define read_lock()           (void)0
  
  #endif
  
 +/*
 + * Return the size of the object without doing any delta
 + * reconstruction (so non-deltas are true object sizes, but deltas
 + * return the size of the delta data).
 + */
 +unsigned long oe_get_size_slow(struct packing_data *pack,
 +                             const struct object_entry *e)
 +{
 +      struct packed_git *p;
 +      struct pack_window *w_curs;
 +      unsigned char *buf;
 +      enum object_type type;
 +      unsigned long used, avail, size;
 +
 +      if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
 +              read_lock();
 +              if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
 +                      die(_("unable to get size of %s"),
 +                          oid_to_hex(&e->idx.oid));
 +              read_unlock();
 +              return size;
 +      }
 +
 +      p = oe_in_pack(pack, e);
 +      if (!p)
 +              BUG("when e->type is a delta, it must belong to a pack");
 +
 +      read_lock();
 +      w_curs = NULL;
 +      buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
 +      used = unpack_object_header_buffer(buf, avail, &type, &size);
 +      if (used == 0)
 +              die(_("unable to parse object header of %s"),
 +                  oid_to_hex(&e->idx.oid));
 +
 +      unuse_pack(&w_curs);
 +      read_unlock();
 +      return size;
 +}
 +
  static int try_delta(struct unpacked *trg, struct unpacked *src,
                     unsigned max_depth, unsigned long *mem_usage)
  {
        void *delta_buf;
  
        /* Don't bother doing diffs between different types */
 -      if (trg_entry->type != src_entry->type)
 +      if (oe_type(trg_entry) != oe_type(src_entry))
                return -1;
  
        /*
         * it, we will still save the transfer cost, as we already know
         * the other side has it and we won't send src_entry at all.
         */
 -      if (reuse_delta && trg_entry->in_pack &&
 -          trg_entry->in_pack == src_entry->in_pack &&
 +      if (reuse_delta && IN_PACK(trg_entry) &&
 +          IN_PACK(trg_entry) == IN_PACK(src_entry) &&
            !src_entry->preferred_base &&
            trg_entry->in_pack_type != OBJ_REF_DELTA &&
            trg_entry->in_pack_type != OBJ_OFS_DELTA)
                return 0;
  
        /* Now some size filtering heuristics. */
 -      trg_size = trg_entry->size;
 -      if (!trg_entry->delta) {
 -              max_size = trg_size/2 - 20;
 +      trg_size = SIZE(trg_entry);
 +      if (!DELTA(trg_entry)) {
 +              max_size = trg_size/2 - the_hash_algo->rawsz;
                ref_depth = 1;
        } else {
 -              max_size = trg_entry->delta_size;
 +              max_size = DELTA_SIZE(trg_entry);
                ref_depth = trg->depth;
        }
        max_size = (uint64_t)max_size * (max_depth - src->depth) /
                                                (max_depth - ref_depth + 1);
        if (max_size == 0)
                return 0;
 -      src_size = src_entry->size;
 +      src_size = SIZE(src_entry);
        sizediff = src_size < trg_size ? trg_size - src_size : 0;
        if (sizediff >= max_size)
                return 0;
        if (trg_size < src_size / 32)
                return 0;
  
 +      if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
 +              return 0;
 +
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
 -              trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
 -                                         &sz);
 +              trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
 -                      die("object %s cannot be read",
 +                      die(_("object %s cannot be read"),
                            oid_to_hex(&trg_entry->idx.oid));
                if (sz != trg_size)
 -                      die("object %s inconsistent object length (%lu vs %lu)",
 +                      die(_("object %s inconsistent object length (%lu vs %lu)"),
                            oid_to_hex(&trg_entry->idx.oid), sz,
                            trg_size);
                *mem_usage += sz;
        }
        if (!src->data) {
                read_lock();
 -              src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
 -                                         &sz);
 +              src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!src->data) {
                        if (src_entry->preferred_base) {
                                static int warned = 0;
                                if (!warned++)
 -                                      warning("object %s cannot be read",
 +                                      warning(_("object %s cannot be read"),
                                                oid_to_hex(&src_entry->idx.oid));
                                /*
                                 * Those objects are not included in the
                                 */
                                return 0;
                        }
 -                      die("object %s cannot be read",
 +                      die(_("object %s cannot be read"),
                            oid_to_hex(&src_entry->idx.oid));
                }
                if (sz != src_size)
 -                      die("object %s inconsistent object length (%lu vs %lu)",
 +                      die(_("object %s inconsistent object length (%lu vs %lu)"),
                            oid_to_hex(&src_entry->idx.oid), sz,
                            src_size);
                *mem_usage += sz;
                if (!src->index) {
                        static int warned = 0;
                        if (!warned++)
 -                              warning("suboptimal pack - out of memory");
 +                              warning(_("suboptimal pack - out of memory"));
                        return 0;
                }
                *mem_usage += sizeof_delta_index(src->index);
        if (!delta_buf)
                return 0;
  
 -      if (trg_entry->delta) {
 +      if (DELTA(trg_entry)) {
                /* Prefer only shallower same-sized deltas. */
 -              if (delta_size == trg_entry->delta_size &&
 +              if (delta_size == DELTA_SIZE(trg_entry) &&
                    src->depth + 1 >= trg->depth) {
                        free(delta_buf);
                        return 0;
        free(trg_entry->delta_data);
        cache_lock();
        if (trg_entry->delta_data) {
 -              delta_cache_size -= trg_entry->delta_size;
 +              delta_cache_size -= DELTA_SIZE(trg_entry);
                trg_entry->delta_data = NULL;
        }
        if (delta_cacheable(src_size, trg_size, delta_size)) {
                free(delta_buf);
        }
  
 -      trg_entry->delta = src_entry;
 -      trg_entry->delta_size = delta_size;
 +      SET_DELTA(trg_entry, src_entry);
 +      SET_DELTA_SIZE(trg_entry, delta_size);
        trg->depth = src->depth + 1;
  
        return 1;
  
  static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
  {
 -      struct object_entry *child = me->delta_child;
 +      struct object_entry *child = DELTA_CHILD(me);
        unsigned int m = n;
        while (child) {
                unsigned int c = check_delta_limit(child, n + 1);
                if (m < c)
                        m = c;
 -              child = child->delta_sibling;
 +              child = DELTA_SIBLING(child);
        }
        return m;
  }
@@@ -2197,7 -1981,7 +2197,7 @@@ static unsigned long free_unpacked(stru
        free_delta_index(n->index);
        n->index = NULL;
        if (n->data) {
 -              freed_mem += n->entry->size;
 +              freed_mem += SIZE(n->entry);
                FREE_AND_NULL(n->data);
        }
        n->entry = NULL;
@@@ -2255,7 -2039,7 +2255,7 @@@ static void find_deltas(struct object_e
                 * otherwise they would become too deep.
                 */
                max_depth = depth;
 -              if (entry->delta_child) {
 +              if (DELTA_CHILD(entry)) {
                        max_depth -= check_delta_limit(entry, 0);
                        if (max_depth <= 0)
                                goto next;
                 * between writes at that moment.
                 */
                if (entry->delta_data && !pack_to_stdout) {
 -                      entry->z_delta_size = do_compress(&entry->delta_data,
 -                                                        entry->delta_size);
 -                      cache_lock();
 -                      delta_cache_size -= entry->delta_size;
 -                      delta_cache_size += entry->z_delta_size;
 -                      cache_unlock();
 +                      unsigned long size;
 +
 +                      size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
 +                      if (size < (1U << OE_Z_DELTA_BITS)) {
 +                              entry->z_delta_size = size;
 +                              cache_lock();
 +                              delta_cache_size -= DELTA_SIZE(entry);
 +                              delta_cache_size += entry->z_delta_size;
 +                              cache_unlock();
 +                      } else {
 +                              FREE_AND_NULL(entry->delta_data);
 +                              entry->z_delta_size = 0;
 +                      }
                }
  
                /* if we made n a delta, and if n is already at max
                 * depth, leaving it in the window is pointless.  we
                 * should evict it first.
                 */
 -              if (entry->delta && max_depth <= n->depth)
 +              if (DELTA(entry) && max_depth <= n->depth)
                        continue;
  
                /*
                 * currently deltified object, to keep it longer.  It will
                 * be the first base object to be attempted next.
                 */
 -              if (entry->delta) {
 +              if (DELTA(entry)) {
                        struct unpacked swap = array[best_base];
                        int dist = (window + idx - best_base) % window;
                        int dst = best_base;
@@@ -2359,19 -2136,12 +2359,19 @@@ static void try_to_free_from_threads(si
  static try_to_free_t old_try_to_free_routine;
  
  /*
 + * The main object list is split into smaller lists, each is handed to
 + * one worker.
 + *
   * The main thread waits on the condition that (at least) one of the workers
   * has stopped working (which is indicated in the .working member of
   * struct thread_params).
 + *
   * When a work thread has completed its work, it sets .working to 0 and
   * signals the main thread and waits on the condition that .data_ready
   * becomes 1.
 + *
 + * The main thread steals half of the work from the worker that has
 + * most work left to hand it to the idle worker.
   */
  
  struct thread_params {
@@@ -2462,8 -2232,8 +2462,8 @@@ static void ll_find_deltas(struct objec
                return;
        }
        if (progress > pack_to_stdout)
 -              fprintf(stderr, "Delta compression using up to %d threads.\n",
 -                              delta_search_threads);
 +              fprintf_ln(stderr, _("Delta compression using up to %d threads"),
 +                         delta_search_threads);
        p = xcalloc(delta_search_threads, sizeof(*p));
  
        /* Partition the work amongst work threads. */
                ret = pthread_create(&p[i].thread, NULL,
                                     threaded_find_deltas, &p[i]);
                if (ret)
 -                      die("unable to create thread: %s", strerror(ret));
 +                      die(_("unable to create thread: %s"), strerror(ret));
                active_threads++;
        }
  
@@@ -2595,10 -2365,10 +2595,10 @@@ static void add_tag_chain(const struct 
        if (packlist_find(&to_pack, oid->hash, NULL))
                return;
  
 -      tag = lookup_tag(oid);
 +      tag = lookup_tag(the_repository, oid);
        while (1) {
                if (!tag || parse_tag(tag) || !tag->tagged)
 -                      die("unable to pack objects reachable from tag %s",
 +                      die(_("unable to pack objects reachable from tag %s"),
                            oid_to_hex(oid));
  
                add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
@@@ -2627,9 -2397,6 +2627,9 @@@ static void prepare_pack(int window, in
        uint32_t i, nr_deltas;
        unsigned n;
  
 +      if (use_delta_islands)
 +              resolve_tree_islands(progress, &to_pack);
 +
        get_object_details();
  
        /*
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = to_pack.objects + i;
  
 -              if (entry->delta)
 +              if (DELTA(entry))
                        /* This happens if we decided to reuse existing
                         * delta from a pack.  "reuse_delta &&" is implied.
                         */
                        continue;
  
 -              if (entry->size < 50)
 +              if (!entry->type_valid ||
 +                  oe_size_less_than(&to_pack, entry, 50))
                        continue;
  
                if (entry->no_try_delta)
  
                if (!entry->preferred_base) {
                        nr_deltas++;
 -                      if (entry->type < 0)
 -                              die("unable to get type of object %s",
 +                      if (oe_type(entry) < 0)
 +                              die(_("unable to get type of object %s"),
                                    oid_to_hex(&entry->idx.oid));
                } else {
 -                      if (entry->type < 0) {
 +                      if (oe_type(entry) < 0) {
                                /*
                                 * This object is not found, but we
                                 * don't have to include it anyway.
                ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
                stop_progress(&progress_state);
                if (nr_done != nr_deltas)
 -                      die("inconsistency with delta count");
 +                      die(_("inconsistency with delta count"));
        }
        free(delta_list);
  }
@@@ -2731,11 -2497,11 +2731,11 @@@ static int git_pack_config(const char *
        if (!strcmp(k, "pack.threads")) {
                delta_search_threads = git_config_int(k, v);
                if (delta_search_threads < 0)
 -                      die("invalid number of threads specified (%d)",
 +                      die(_("invalid number of threads specified (%d)"),
                            delta_search_threads);
  #ifdef NO_PTHREADS
                if (delta_search_threads != 1) {
 -                      warning("no threads support, ignoring %s", k);
 +                      warning(_("no threads support, ignoring %s"), k);
                        delta_search_threads = 0;
                }
  #endif
        if (!strcmp(k, "pack.indexversion")) {
                pack_idx_opts.version = git_config_int(k, v);
                if (pack_idx_opts.version > 2)
 -                      die("bad pack.indexversion=%"PRIu32,
 +                      die(_("bad pack.indexversion=%"PRIu32),
                            pack_idx_opts.version);
                return 0;
        }
@@@ -2762,7 -2528,7 +2762,7 @@@ static void read_object_list_from_stdin
                        if (feof(stdin))
                                break;
                        if (!ferror(stdin))
 -                              die("fgets returned NULL, not EOF, not error!");
 +                              die("BUG: fgets returned NULL, not EOF, not error!");
                        if (errno != EINTR)
                                die_errno("fgets");
                        clearerr(stdin);
                }
                if (line[0] == '-') {
                        if (get_oid_hex(line+1, &oid))
 -                              die("expected edge object ID, got garbage:\n %s",
 +                              die(_("expected edge object ID, got garbage:\n %s"),
                                    line);
                        add_preferred_base(&oid);
                        continue;
                }
                if (parse_oid_hex(line, &oid, &p))
 -                      die("expected object ID, got garbage:\n %s", line);
 +                      die(_("expected object ID, got garbage:\n %s"), line);
  
                add_preferred_base_object(p + 1);
 -              add_object_entry(&oid, 0, p + 1, 0);
 +              add_object_entry(&oid, OBJ_NONE, p + 1, 0);
        }
  }
  
@@@ -2793,9 -2559,6 +2793,9 @@@ static void show_commit(struct commit *
  
        if (write_bitmap_index)
                index_commit_for_bitmap(commit);
 +
 +      if (use_delta_islands)
 +              propagate_island_marks(commit);
  }
  
  static void show_object(struct object *obj, const char *name, void *data)
        add_preferred_base_object(name);
        add_object_entry(&obj->oid, obj->type, name, 0);
        obj->flags |= OBJECT_ADDED;
 +
 +      if (use_delta_islands) {
 +              const char *p;
 +              unsigned depth = 0;
 +              struct object_entry *ent;
 +
 +              for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
 +                      depth++;
 +
 +              ent = packlist_find(&to_pack, obj->oid.hash, NULL);
 +              if (ent && depth > oe_tree_depth(&to_pack, ent))
 +                      oe_set_tree_depth(&to_pack, ent, depth);
 +      }
  }
  
  static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
@@@ -2924,14 -2674,14 +2924,14 @@@ static void add_objects_in_unpacked_pac
  
        memset(&in_pack, 0, sizeof(in_pack));
  
 -      for (p = packed_git; p; p = p->next) {
 +      for (p = get_all_packs(the_repository); p; p = p->next) {
                struct object_id oid;
                struct object *o;
  
 -              if (!p->pack_local || p->pack_keep)
 +              if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
                if (open_pack_index(p))
 -                      die("cannot open pack index");
 +                      die(_("cannot open pack index"));
  
                ALLOC_GROW(in_pack.array,
                           in_pack.nr + p->num_objects,
  static int add_loose_object(const struct object_id *oid, const char *path,
                            void *data)
  {
 -      enum object_type type = sha1_object_info(oid->hash, NULL);
 +      enum object_type type = oid_object_info(the_repository, oid, NULL);
  
        if (type < 0) {
 -              warning("loose object at %s could not be examined", path);
 +              warning(_("loose object at %s could not be examined"), path);
                return 0;
        }
  
@@@ -2987,18 -2737,16 +2987,18 @@@ static int has_sha1_pack_kept_or_nonloc
        static struct packed_git *last_found = (void *)1;
        struct packed_git *p;
  
 -      p = (last_found != (void *)1) ? last_found : packed_git;
 +      p = (last_found != (void *)1) ? last_found :
 +                                      get_all_packs(the_repository);
  
        while (p) {
 -              if ((!p->pack_local || p->pack_keep) &&
 +              if ((!p->pack_local || p->pack_keep ||
 +                              p->pack_keep_in_core) &&
                        find_pack_entry_one(oid->hash, p)) {
                        last_found = p;
                        return 1;
                }
                if (p == last_found)
 -                      p = packed_git;
 +                      p = get_all_packs(the_repository);
                else
                        p = p->next;
                if (p == last_found)
@@@ -3034,12 -2782,12 +3034,12 @@@ static void loosen_unused_packed_object
        uint32_t i;
        struct object_id oid;
  
 -      for (p = packed_git; p; p = p->next) {
 -              if (!p->pack_local || p->pack_keep)
 +      for (p = get_all_packs(the_repository); p; p = p->next) {
 +              if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
  
                if (open_pack_index(p))
 -                      die("cannot open pack index");
 +                      die(_("cannot open pack index"));
  
                for (i = 0; i < p->num_objects; i++) {
                        nth_packed_object_oid(&oid, p, i);
                            !has_sha1_pack_kept_or_nonlocal(&oid) &&
                            !loosened_object_can_be_discarded(&oid, p->mtime))
                                if (force_object_loose(&oid, p->mtime))
 -                                      die("unable to force loose object");
 +                                      die(_("unable to force loose object"));
                }
        }
  }
@@@ -3061,20 -2809,18 +3061,20 @@@ static int pack_options_allow_reuse(voi
  {
        return pack_to_stdout &&
               allow_ofs_delta &&
 -             !ignore_packed_keep &&
 +             !ignore_packed_keep_on_disk &&
 +             !ignore_packed_keep_in_core &&
               (!local || !have_non_local_packs) &&
               !incremental;
  }
  
  static int get_object_list_from_bitmap(struct rev_info *revs)
  {
 -      if (prepare_bitmap_walk(revs) < 0)
 +      if (!(bitmap_git = prepare_bitmap_walk(revs)))
                return -1;
  
        if (pack_options_allow_reuse() &&
            !reuse_partial_packfile_from_bitmap(
 +                      bitmap_git,
                        &reuse_packfile,
                        &reuse_packfile_objects,
                        &reuse_packfile_offset)) {
                display_progress(progress_state, nr_result);
        }
  
 -      traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
 +      traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
        return 0;
  }
  
@@@ -3105,12 -2851,13 +3105,13 @@@ static void get_object_list(int ac, con
        char line[1000];
        int flags = 0;
  
 -      init_revisions(&revs, NULL);
 +      repo_init_revisions(the_repository, &revs, NULL);
        save_commit_buffer = 0;
+       revs.allow_exclude_promisor_objects_opt = 1;
        setup_revisions(ac, av, &revs, NULL);
  
        /* make sure shallows are read */
 -      is_repository_shallow();
 +      is_repository_shallow(the_repository);
  
        while (fgets(line, sizeof(line), stdin) != NULL) {
                int len = strlen(line);
                                struct object_id oid;
                                if (get_oid_hex(line + 10, &oid))
                                        die("not an SHA-1 '%s'", line + 10);
 -                              register_shallow(&oid);
 +                              register_shallow(the_repository, &oid);
                                use_bitmap_index = 0;
                                continue;
                        }
 -                      die("not a rev '%s'", line);
 +                      die(_("not a rev '%s'"), line);
                }
                if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
 -                      die("bad revision '%s'", line);
 +                      die(_("bad revision '%s'"), line);
        }
  
        if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
                return;
  
 +      if (use_delta_islands)
 +              load_delta_islands();
 +
        if (prepare_revision_walk(&revs))
 -              die("revision walk setup failed");
 +              die(_("revision walk setup failed"));
        mark_edges_uninteresting(&revs, show_edge);
  
        if (!fn_show_object)
                revs.ignore_missing_links = 1;
                if (add_unseen_recent_objects_to_traversal(&revs,
                                unpack_unreachable_expiration))
 -                      die("unable to add recent objects");
 +                      die(_("unable to add recent objects"));
                if (prepare_revision_walk(&revs))
 -                      die("revision walk setup failed");
 +                      die(_("revision walk setup failed"));
                traverse_commit_list(&revs, record_recent_commit,
                                     record_recent_object, NULL);
        }
        oid_array_clear(&recent_objects);
  }
  
 +static void add_extra_kept_packs(const struct string_list *names)
 +{
 +      struct packed_git *p;
 +
 +      if (!names->nr)
 +              return;
 +
 +      for (p = get_all_packs(the_repository); p; p = p->next) {
 +              const char *name = basename(p->pack_name);
 +              int i;
 +
 +              if (!p->pack_local)
 +                      continue;
 +
 +              for (i = 0; i < names->nr; i++)
 +                      if (!fspathcmp(name, names->items[i].string))
 +                              break;
 +
 +              if (i < names->nr) {
 +                      p->pack_keep_in_core = 1;
 +                      ignore_packed_keep_in_core = 1;
 +                      continue;
 +              }
 +      }
 +}
 +
  static int option_parse_index_version(const struct option *opt,
                                      const char *arg, int unset)
  {
@@@ -3234,12 -2952,12 +3235,12 @@@ static int option_parse_unpack_unreacha
  int cmd_pack_objects(int argc, const char **argv, const char *prefix)
  {
        int use_internal_rev_list = 0;
 -      int thin = 0;
        int shallow = 0;
        int all_progress_implied = 0;
        struct argv_array rp = ARGV_ARRAY_INIT;
        int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
        int rev_list_index = 0;
 +      struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
        struct option pack_objects_options[] = {
                OPT_SET_INT('q', "quiet", &progress,
                            N_("do not show progress meter"), 0),
                OPT_BOOL(0, "all-progress-implied",
                         &all_progress_implied,
                         N_("similar to --all-progress when progress meter is shown")),
 -              { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
 +              { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
                  N_("write the pack index file in the specified idx format version"),
                  0, option_parse_index_version },
                OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
                         N_("do not create an empty pack output")),
                OPT_BOOL(0, "revs", &use_internal_rev_list,
                         N_("read revision arguments from standard input")),
 -              { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
 -                N_("limit the objects to those that are not yet packed"),
 -                PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
 -              { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
 -                N_("include objects reachable from any reference"),
 -                PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
 -              { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
 -                N_("include objects referred by reflog entries"),
 -                PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
 -              { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
 -                N_("include objects referred to by the index"),
 -                PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
 +              OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
 +                            N_("limit the objects to those that are not yet packed"),
 +                            1, PARSE_OPT_NONEG),
 +              OPT_SET_INT_F(0, "all", &rev_list_all,
 +                            N_("include objects reachable from any reference"),
 +                            1, PARSE_OPT_NONEG),
 +              OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
 +                            N_("include objects referred by reflog entries"),
 +                            1, PARSE_OPT_NONEG),
 +              OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
 +                            N_("include objects referred to by the index"),
 +                            1, PARSE_OPT_NONEG),
                OPT_BOOL(0, "stdout", &pack_to_stdout,
                         N_("output pack to stdout")),
                OPT_BOOL(0, "include-tag", &include_tag,
                         N_("create thin packs")),
                OPT_BOOL(0, "shallow", &shallow,
                         N_("create packs suitable for shallow fetches")),
 -              OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
 +              OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
                         N_("ignore packs that have companion .keep file")),
 +              OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
 +                              N_("ignore this pack")),
                OPT_INTEGER(0, "compression", &pack_compression_level,
                            N_("pack compression level")),
                OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
                  option_parse_missing_action },
                OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
                         N_("do not pack objects in promisor packfiles")),
 +              OPT_BOOL(0, "delta-islands", &use_delta_islands,
 +                       N_("respect islands during delta compression")),
                OPT_END(),
        };
  
 -      check_replace_refs = 0;
 +      if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
 +              BUG("too many dfs states, increase OE_DFS_STATE_BITS");
 +
 +      read_replace_refs = 0;
  
        reset_pack_idx_option(&pack_idx_opts);
        git_config(git_pack_config, NULL);
        if (pack_to_stdout != !base_name || argc)
                usage_with_options(pack_usage, pack_objects_options);
  
 +      if (depth >= (1 << OE_DEPTH_BITS)) {
 +              warning(_("delta chain depth %d is too deep, forcing %d"),
 +                      depth, (1 << OE_DEPTH_BITS) - 1);
 +              depth = (1 << OE_DEPTH_BITS) - 1;
 +      }
 +      if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
 +              warning(_("pack.deltaCacheLimit is too high, forcing %d"),
 +                      (1U << OE_Z_DELTA_BITS) - 1);
 +              cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
 +      }
 +
        argv_array_push(&rp, "pack-objects");
        if (thin) {
                use_internal_rev_list = 1;
                fetch_if_missing = 0;
                argv_array_push(&rp, "--exclude-promisor-objects");
        }
 +      if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
 +              use_internal_rev_list = 1;
  
        if (!reuse_object)
                reuse_delta = 0;
        if (pack_compression_level == -1)
                pack_compression_level = Z_DEFAULT_COMPRESSION;
        else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
 -              die("bad pack compression level %d", pack_compression_level);
 +              die(_("bad pack compression level %d"), pack_compression_level);
  
        if (!delta_search_threads)      /* --threads=0 means autodetect */
                delta_search_threads = online_cpus();
  
  #ifdef NO_PTHREADS
        if (delta_search_threads != 1)
 -              warning("no threads support, ignoring --threads");
 +              warning(_("no threads support, ignoring --threads"));
  #endif
        if (!pack_to_stdout && !pack_size_limit)
                pack_size_limit = pack_size_limit_cfg;
        if (pack_to_stdout && pack_size_limit)
 -              die("--max-pack-size cannot be used to build a pack for transfer.");
 +              die(_("--max-pack-size cannot be used to build a pack for transfer"));
        if (pack_size_limit && pack_size_limit < 1024*1024) {
 -              warning("minimum pack size limit is 1 MiB");
 +              warning(_("minimum pack size limit is 1 MiB"));
                pack_size_limit = 1024*1024;
        }
  
        if (!pack_to_stdout && thin)
 -              die("--thin cannot be used to build an indexable pack.");
 +              die(_("--thin cannot be used to build an indexable pack"));
  
        if (keep_unreachable && unpack_unreachable)
 -              die("--keep-unreachable and --unpack-unreachable are incompatible.");
 +              die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
        if (!rev_list_all || !rev_list_reflog || !rev_list_index)
                unpack_unreachable_expiration = 0;
  
        if (filter_options.choice) {
                if (!pack_to_stdout)
 -                      die("cannot use --filter without --stdout.");
 +                      die(_("cannot use --filter without --stdout"));
                use_bitmap_index = 0;
        }
  
                use_bitmap_index = use_bitmap_index_default;
  
        /* "hard" reasons not to use bitmaps; these just won't work at all */
 -      if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
 +      if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
                use_bitmap_index = 0;
  
        if (pack_to_stdout || !rev_list_all)
                write_bitmap_index = 0;
  
 +      if (use_delta_islands)
 +              argv_array_push(&rp, "--topo-order");
 +
        if (progress && all_progress_implied)
                progress = 2;
  
 -      prepare_packed_git();
 -      if (ignore_packed_keep) {
 +      add_extra_kept_packs(&keep_pack_list);
 +      if (ignore_packed_keep_on_disk) {
                struct packed_git *p;
 -              for (p = packed_git; p; p = p->next)
 +              for (p = get_all_packs(the_repository); p; p = p->next)
                        if (p->pack_local && p->pack_keep)
                                break;
                if (!p) /* no keep-able packs found */
 -                      ignore_packed_keep = 0;
 +                      ignore_packed_keep_on_disk = 0;
        }
        if (local) {
                /*
 -               * unlike ignore_packed_keep above, we do not want to
 -               * unset "local" based on looking at packs, as it
 -               * also covers non-local objects
 +               * unlike ignore_packed_keep_on_disk above, we do not
 +               * want to unset "local" based on looking at packs, as
 +               * it also covers non-local objects
                 */
                struct packed_git *p;
 -              for (p = packed_git; p; p = p->next) {
 +              for (p = get_all_packs(the_repository); p; p = p->next) {
                        if (!p->pack_local) {
                                have_non_local_packs = 1;
                                break;
                }
        }
  
 +      prepare_packing_data(&to_pack);
 +
        if (progress)
 -              progress_state = start_progress(_("Counting objects"), 0);
 +              progress_state = start_progress(_("Enumerating objects"), 0);
        if (!use_internal_rev_list)
                read_object_list_from_stdin();
        else {
                prepare_pack(window, depth);
        write_pack_file();
        if (progress)
 -              fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
 -                      " reused %"PRIu32" (delta %"PRIu32")\n",
 -                      written, written_delta, reused, reused_delta);
 +              fprintf_ln(stderr,
 +                         _("Total %"PRIu32" (delta %"PRIu32"),"
 +                           " reused %"PRIu32" (delta %"PRIu32")"),
 +                         written, written_delta, reused, reused_delta);
        return 0;
  }
diff --combined builtin/prune.c
index 1ec9ddd751df6644d2c39ace41a1494800082638,a5c784749eaebd92655800ab87b2df5512501603..e42653b99cffe2f42f2fb853999a41465878ce02
@@@ -6,7 -6,6 +6,7 @@@
  #include "reachable.h"
  #include "parse-options.h"
  #include "progress.h"
 +#include "object-store.h"
  
  static const char * const prune_usage[] = {
        N_("git prune [-n] [-v] [--progress] [--expire <time>] [--] [<head>...]"),
@@@ -40,7 -39,7 +40,7 @@@ static int prune_object(const struct ob
         * Do we know about this object?
         * It must have been reachable
         */
 -      if (lookup_object(oid->hash))
 +      if (lookup_object(the_repository, oid->hash))
                return 0;
  
        if (lstat(fullpath, &st)) {
@@@ -51,8 -50,7 +51,8 @@@
        if (st.st_mtime > expire)
                return 0;
        if (show_only || verbose) {
 -              enum object_type type = sha1_object_info(oid->hash, NULL);
 +              enum object_type type = oid_object_info(the_repository, oid,
 +                                                      NULL);
                printf("%s %s\n", oid_to_hex(oid),
                       (type > 0) ? type_name(type) : "unknown");
        }
@@@ -118,9 -116,10 +118,10 @@@ int cmd_prune(int argc, const char **ar
  
        expire = TIME_MAX;
        save_commit_buffer = 0;
 -      check_replace_refs = 0;
 +      read_replace_refs = 0;
        ref_paranoia = 1;
 -      init_revisions(&revs, prefix);
+       revs.allow_exclude_promisor_objects_opt = 1;
 +      repo_init_revisions(the_repository, &revs, prefix);
  
        argc = parse_options(argc, argv, prefix, options, prune_usage, 0);
  
        remove_temporary_files(s);
        free(s);
  
 -      if (is_repository_shallow())
 -              prune_shallow(show_only);
 +      if (is_repository_shallow(the_repository))
 +              prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
  
        return 0;
  }
diff --combined builtin/rev-list.c
index 5064d08e1b8ad04544a76d1f0496134e2c15079c,c8f3ac8d0927e67ea913299bc26173d870d7b29c..2880ed37e3f97193d2374657346b4de52bc44954
@@@ -6,8 -6,6 +6,8 @@@
  #include "list-objects.h"
  #include "list-objects-filter.h"
  #include "list-objects-filter-options.h"
 +#include "object.h"
 +#include "object-store.h"
  #include "pack.h"
  #include "pack-bitmap.h"
  #include "builtin.h"
@@@ -18,7 -16,6 +18,7 @@@
  #include "reflog-walk.h"
  #include "oidset.h"
  #include "packfile.h"
 +#include "object-store.h"
  
  static const char rev_list_usage[] =
  "git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
@@@ -111,7 -108,7 +111,7 @@@ static void show_commit(struct commit *
        if (!revs->graph)
                fputs(get_revision_mark(revs, commit), stdout);
        if (revs->abbrev_commit && revs->abbrev)
 -              fputs(find_unique_abbrev(commit->object.oid.hash, revs->abbrev),
 +              fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
                      stdout);
        else
                fputs(oid_to_hex(&commit->object.oid), stdout);
@@@ -210,8 -207,7 +210,8 @@@ static inline void finish_object__ma(st
         */
        switch (arg_missing_action) {
        case MA_ERROR:
 -              die("missing blob object '%s'", oid_to_hex(&obj->oid));
 +              die("missing %s object '%s'",
 +                  type_name(obj->type), oid_to_hex(&obj->oid));
                return;
  
        case MA_ALLOW_ANY:
        case MA_ALLOW_PROMISOR:
                if (is_promisor_object(&obj->oid))
                        return;
 -              die("unexpected missing blob object '%s'",
 -                  oid_to_hex(&obj->oid));
 +              die("unexpected missing %s object '%s'",
 +                  type_name(obj->type), oid_to_hex(&obj->oid));
                return;
  
        default:
  static int finish_object(struct object *obj, const char *name, void *cb_data)
  {
        struct rev_list_info *info = cb_data;
 -      if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
 +      if (!has_object_file(&obj->oid)) {
                finish_object__ma(obj);
                return 1;
        }
        if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
 -              parse_object(&obj->oid);
 +              parse_object(the_repository, &obj->oid);
        return 0;
  }
  
@@@ -372,10 -368,10 +372,11 @@@ int cmd_rev_list(int argc, const char *
                usage(rev_list_usage);
  
        git_config(git_default_config, NULL);
 -      init_revisions(&revs, prefix);
 +      repo_init_revisions(the_repository, &revs, prefix);
        revs.abbrev = DEFAULT_ABBREV;
+       revs.allow_exclude_promisor_objects_opt = 1;
        revs.commit_format = CMIT_FMT_UNSPECIFIED;
 +      revs.do_not_die_on_missing_tree = 1;
  
        /*
         * Scan the argument list before invoking setup_revisions(), so that we
        if ((!revs.commits && reflog_walk_empty(revs.reflog_info) &&
             (!(revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
              !revs.pending.nr) &&
 -           !revs.rev_input_given) ||
 +           !revs.rev_input_given && !revs.read_from_stdin) ||
            revs.diff)
                usage(rev_list_usage);
  
                if (revs.count && !revs.left_right && !revs.cherry_mark) {
                        uint32_t commit_count;
                        int max_count = revs.max_count;
 -                      if (!prepare_bitmap_walk(&revs)) {
 -                              count_bitmap_commit_list(&commit_count, NULL, NULL, NULL);
 +                      struct bitmap_index *bitmap_git;
 +                      if ((bitmap_git = prepare_bitmap_walk(&revs))) {
 +                              count_bitmap_commit_list(bitmap_git, &commit_count, NULL, NULL, NULL);
                                if (max_count >= 0 && max_count < commit_count)
                                        commit_count = max_count;
                                printf("%d\n", commit_count);
 +                              free_bitmap_index(bitmap_git);
                                return 0;
                        }
                } else if (revs.max_count < 0 &&
                           revs.tag_objects && revs.tree_objects && revs.blob_objects) {
 -                      if (!prepare_bitmap_walk(&revs)) {
 -                              traverse_bitmap_commit_list(&show_object_fast);
 +                      struct bitmap_index *bitmap_git;
 +                      if ((bitmap_git = prepare_bitmap_walk(&revs))) {
 +                              traverse_bitmap_commit_list(bitmap_git, &show_object_fast);
 +                              free_bitmap_index(bitmap_git);
                                return 0;
                        }
                }
diff --combined revision.c
index a1ddb9e11cbe3a52bb8d3eee3785524db4055854,748310c2a3b194322dae8fe586b9a247c186a6b1..28fb2a70cdaaab93e8665a37c2ce02f89a90ed4c
@@@ -1,5 -1,4 +1,5 @@@
  #include "cache.h"
 +#include "object-store.h"
  #include "tag.h"
  #include "blob.h"
  #include "tree.h"
@@@ -7,7 -6,6 +7,7 @@@
  #include "diff.h"
  #include "refs.h"
  #include "revision.h"
 +#include "repository.h"
  #include "graph.h"
  #include "grep.h"
  #include "reflog-walk.h"
  #include "packfile.h"
  #include "worktree.h"
  #include "argv-array.h"
 +#include "commit-reach.h"
  
  volatile show_early_output_fn_t show_early_output;
  
  static const char *term_bad;
  static const char *term_good;
  
 +implement_shared_commit_slab(revision_sources, char *);
 +
  void show_object_with_name(FILE *out, struct object *obj, const char *name)
  {
        const char *p;
@@@ -52,23 -47,25 +52,23 @@@ static void mark_blob_uninteresting(str
        blob->object.flags |= UNINTERESTING;
  }
  
 -static void mark_tree_contents_uninteresting(struct tree *tree)
 +static void mark_tree_contents_uninteresting(struct repository *r,
 +                                           struct tree *tree)
  {
        struct tree_desc desc;
        struct name_entry entry;
 -      struct object *obj = &tree->object;
  
 -      if (!has_object_file(&obj->oid))
 +      if (parse_tree_gently(tree, 1) < 0)
                return;
 -      if (parse_tree(tree) < 0)
 -              die("bad tree %s", oid_to_hex(&obj->oid));
  
        init_tree_desc(&desc, tree->buffer, tree->size);
        while (tree_entry(&desc, &entry)) {
                switch (object_type(entry.mode)) {
                case OBJ_TREE:
 -                      mark_tree_uninteresting(lookup_tree(entry.oid));
 +                      mark_tree_uninteresting(r, lookup_tree(r, entry.oid));
                        break;
                case OBJ_BLOB:
 -                      mark_blob_uninteresting(lookup_blob(entry.oid));
 +                      mark_blob_uninteresting(lookup_blob(r, entry.oid));
                        break;
                default:
                        /* Subproject commit - not in this repository */
@@@ -83,7 -80,7 +83,7 @@@
        free_tree_buffer(tree);
  }
  
 -void mark_tree_uninteresting(struct tree *tree)
 +void mark_tree_uninteresting(struct repository *r, struct tree *tree)
  {
        struct object *obj;
  
        if (obj->flags & UNINTERESTING)
                return;
        obj->flags |= UNINTERESTING;
 -      mark_tree_contents_uninteresting(tree);
 +      mark_tree_contents_uninteresting(r, tree);
  }
  
 -void mark_parents_uninteresting(struct commit *commit)
 +struct commit_stack {
 +      struct commit **items;
 +      size_t nr, alloc;
 +};
 +#define COMMIT_STACK_INIT { NULL, 0, 0 }
 +
 +static void commit_stack_push(struct commit_stack *stack, struct commit *commit)
  {
 -      struct commit_list *parents = NULL, *l;
 +      ALLOC_GROW(stack->items, stack->nr + 1, stack->alloc);
 +      stack->items[stack->nr++] = commit;
 +}
  
 -      for (l = commit->parents; l; l = l->next)
 -              commit_list_insert(l->item, &parents);
 +static struct commit *commit_stack_pop(struct commit_stack *stack)
 +{
 +      return stack->nr ? stack->items[--stack->nr] : NULL;
 +}
  
 -      while (parents) {
 -              struct commit *commit = pop_commit(&parents);
 +static void commit_stack_clear(struct commit_stack *stack)
 +{
 +      FREE_AND_NULL(stack->items);
 +      stack->nr = stack->alloc = 0;
 +}
  
 -              while (commit) {
 -                      /*
 -                       * A missing commit is ok iff its parent is marked
 -                       * uninteresting.
 -                       *
 -                       * We just mark such a thing parsed, so that when
 -                       * it is popped next time around, we won't be trying
 -                       * to parse it and get an error.
 -                       */
 -                      if (!commit->object.parsed &&
 -                          !has_object_file(&commit->object.oid))
 -                              commit->object.parsed = 1;
 +static void mark_one_parent_uninteresting(struct commit *commit,
 +                                        struct commit_stack *pending)
 +{
 +      struct commit_list *l;
  
 -                      if (commit->object.flags & UNINTERESTING)
 -                              break;
 +      if (commit->object.flags & UNINTERESTING)
 +              return;
 +      commit->object.flags |= UNINTERESTING;
  
 -                      commit->object.flags |= UNINTERESTING;
 +      /*
 +       * Normally we haven't parsed the parent
 +       * yet, so we won't have a parent of a parent
 +       * here. However, it may turn out that we've
 +       * reached this commit some other way (where it
 +       * wasn't uninteresting), in which case we need
 +       * to mark its parents recursively too..
 +       */
 +      for (l = commit->parents; l; l = l->next)
 +              commit_stack_push(pending, l->item);
 +}
  
 -                      /*
 -                       * Normally we haven't parsed the parent
 -                       * yet, so we won't have a parent of a parent
 -                       * here. However, it may turn out that we've
 -                       * reached this commit some other way (where it
 -                       * wasn't uninteresting), in which case we need
 -                       * to mark its parents recursively too..
 -                       */
 -                      if (!commit->parents)
 -                              break;
 +void mark_parents_uninteresting(struct commit *commit)
 +{
 +      struct commit_stack pending = COMMIT_STACK_INIT;
 +      struct commit_list *l;
  
 -                      for (l = commit->parents->next; l; l = l->next)
 -                              commit_list_insert(l->item, &parents);
 -                      commit = commit->parents->item;
 -              }
 -      }
 +      for (l = commit->parents; l; l = l->next)
 +              mark_one_parent_uninteresting(l->item, &pending);
 +
 +      while (pending.nr > 0)
 +              mark_one_parent_uninteresting(commit_stack_pop(&pending),
 +                                            &pending);
 +
 +      commit_stack_clear(&pending);
  }
  
  static void add_pending_object_with_path(struct rev_info *revs,
@@@ -199,7 -183,7 +199,7 @@@ void add_head_to_pending(struct rev_inf
        struct object *obj;
        if (get_oid("HEAD", &oid))
                return;
 -      obj = parse_object(&oid);
 +      obj = parse_object(revs->repo, &oid);
        if (!obj)
                return;
        add_pending_object(revs, obj, "HEAD");
@@@ -211,7 -195,7 +211,7 @@@ static struct object *get_reference(str
  {
        struct object *object;
  
 -      object = parse_object(oid);
 +      object = parse_object(revs->repo, oid);
        if (!object) {
                if (revs->ignore_missing)
                        return object;
@@@ -248,13 -232,10 +248,13 @@@ static struct commit *handle_commit(str
                        add_pending_object(revs, object, tag->tag);
                if (!tag->tagged)
                        die("bad tag");
 -              object = parse_object(&tag->tagged->oid);
 +              object = parse_object(revs->repo, &tag->tagged->oid);
                if (!object) {
                        if (revs->ignore_missing_links || (flags & UNINTERESTING))
                                return NULL;
 +                      if (revs->exclude_promisor_objects &&
 +                          is_promisor_object(&tag->tagged->oid))
 +                              return NULL;
                        die("bad object %s", oid_to_hex(&tag->tagged->oid));
                }
                object->flags |= flags;
         */
        if (object->type == OBJ_COMMIT) {
                struct commit *commit = (struct commit *)object;
 +
                if (parse_commit(commit) < 0)
                        die("unable to parse commit %s", name);
                if (flags & UNINTERESTING) {
                        mark_parents_uninteresting(commit);
                        revs->limited = 1;
                }
 -              if (revs->show_source && !commit->util)
 -                      commit->util = xstrdup(name);
 +              if (revs->sources) {
 +                      char **slot = revision_sources_at(revs->sources, commit);
 +
 +                      if (!*slot)
 +                              *slot = xstrdup(name);
 +              }
                return commit;
        }
  
                if (!revs->tree_objects)
                        return NULL;
                if (flags & UNINTERESTING) {
 -                      mark_tree_contents_uninteresting(tree);
 +                      mark_tree_contents_uninteresting(revs->repo, tree);
                        return NULL;
                }
                add_pending_object_with_path(revs, object, name, mode, path);
@@@ -464,8 -440,8 +464,8 @@@ static void file_change(struct diff_opt
  static int rev_compare_tree(struct rev_info *revs,
                            struct commit *parent, struct commit *commit)
  {
 -      struct tree *t1 = parent->tree;
 -      struct tree *t2 = commit->tree;
 +      struct tree *t1 = get_commit_tree(parent);
 +      struct tree *t2 = get_commit_tree(commit);
  
        if (!t1)
                return REV_TREE_NEW;
  static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
  {
        int retval;
 -      struct tree *t1 = commit->tree;
 +      struct tree *t1 = get_commit_tree(commit);
  
        if (!t1)
                return 0;
@@@ -639,7 -615,7 +639,7 @@@ static void try_to_simplify_commit(stru
        if (!revs->prune)
                return;
  
 -      if (!commit->tree)
 +      if (!get_commit_tree(commit))
                return;
  
        if (!commit->parents) {
@@@ -837,12 -813,8 +837,12 @@@ static int add_parents_to_list(struct r
                        }
                        return -1;
                }
 -              if (revs->show_source && !p->util)
 -                      p->util = commit->util;
 +              if (revs->sources) {
 +                      char **slot = revision_sources_at(revs->sources, p);
 +
 +                      if (!*slot)
 +                              *slot = *revision_sources_at(revs->sources, commit);
 +              }
                p->object.flags |= left_flag;
                if (!(p->object.flags & SEEN)) {
                        p->object.flags |= SEEN;
@@@ -878,7 -850,7 +878,7 @@@ static void cherry_pick_list(struct com
                return;
  
        left_first = left_count < right_count;
 -      init_patch_ids(&ids);
 +      init_patch_ids(revs->repo, &ids);
        ids.diffopts.pathspec = revs->diffopt.pathspec;
  
        /* Compute patch-ids for one side */
@@@ -1254,7 -1226,7 +1254,7 @@@ static void handle_one_reflog_commit(st
  {
        struct all_refs_cb *cb = cb_data;
        if (!is_null_oid(oid)) {
 -              struct object *o = parse_object(oid);
 +              struct object *o = parse_object(cb->all_revs->repo, oid);
                if (o) {
                        o->flags |= cb->all_flags;
                        /* ??? CMDLINEFLAGS ??? */
@@@ -1313,7 -1285,7 +1313,7 @@@ void add_reflogs_to_pending(struct rev_
  
        cb.all_revs = revs;
        cb.all_flags = flags;
 -      cb.refs = get_main_ref_store();
 +      cb.refs = get_main_ref_store(revs->repo);
        for_each_reflog(handle_one_reflog, &cb);
  
        if (!revs->single_worktree)
@@@ -1327,7 -1299,7 +1327,7 @@@ static void add_cache_tree(struct cache
        int i;
  
        if (it->entry_count >= 0) {
 -              struct tree *tree = lookup_tree(&it->oid);
 +              struct tree *tree = lookup_tree(revs->repo, &it->oid);
                add_pending_object_with_path(revs, &tree->object, "",
                                             040000, path->buf);
        }
@@@ -1353,7 -1325,7 +1353,7 @@@ static void do_add_index_objects_to_pen
                if (S_ISGITLINK(ce->ce_mode))
                        continue;
  
 -              blob = lookup_blob(&ce->oid);
 +              blob = lookup_blob(revs->repo, &ce->oid);
                if (!blob)
                        die("unable to add index blob to traversal");
                add_pending_object_with_path(revs, &blob->object, "",
@@@ -1371,8 -1343,8 +1371,8 @@@ void add_index_objects_to_pending(struc
  {
        struct worktree **worktrees, **p;
  
 -      read_cache();
 -      do_add_index_objects_to_pending(revs, &the_index);
 +      read_index(revs->repo->index);
 +      do_add_index_objects_to_pending(revs, revs->repo->index);
  
        if (revs->single_worktree)
                return;
@@@ -1440,13 -1412,10 +1440,13 @@@ static int add_parents_only(struct rev_
        return 1;
  }
  
 -void init_revisions(struct rev_info *revs, const char *prefix)
 +void repo_init_revisions(struct repository *r,
 +                       struct rev_info *revs,
 +                       const char *prefix)
  {
        memset(revs, 0, sizeof(*revs));
  
 +      revs->repo = r;
        revs->abbrev = DEFAULT_ABBREV;
        revs->ignore_merges = 1;
        revs->simplify_history = 1;
        revs->commit_format = CMIT_FMT_DEFAULT;
        revs->expand_tabs_in_log_default = 8;
  
 -      init_grep_defaults();
 -      grep_init(&revs->grep_filter, prefix);
 +      init_grep_defaults(revs->repo);
 +      grep_init(&revs->grep_filter, revs->repo, prefix);
        revs->grep_filter.status_only = 1;
  
 -      diff_setup(&revs->diffopt);
 +      repo_diff_setup(revs->repo, &revs->diffopt);
        if (prefix && !revs->diffopt.prefix) {
                revs->diffopt.prefix = prefix;
                revs->diffopt.prefix_length = strlen(prefix);
@@@ -1500,7 -1469,6 +1500,7 @@@ static void prepare_show_merge(struct r
        struct object_id oid;
        const char **prune = NULL;
        int i, prune_num = 1; /* counting terminating NULL */
 +      struct index_state *istate = revs->repo->index;
  
        if (get_oid("HEAD", &oid))
                die("--merge without HEAD?");
        free_commit_list(bases);
        head->object.flags |= SYMMETRIC_LEFT;
  
 -      if (!active_nr)
 -              read_cache();
 -      for (i = 0; i < active_nr; i++) {
 -              const struct cache_entry *ce = active_cache[i];
 +      if (!istate->cache_nr)
 +              read_index(istate);
 +      for (i = 0; i < istate->cache_nr; i++) {
 +              const struct cache_entry *ce = istate->cache[i];
                if (!ce_stage(ce))
                        continue;
 -              if (ce_path_match(ce, &revs->prune_data, NULL)) {
 +              if (ce_path_match(istate, ce, &revs->prune_data, NULL)) {
                        prune_num++;
                        REALLOC_ARRAY(prune, prune_num);
                        prune[prune_num-2] = ce->name;
                        prune[prune_num-1] = NULL;
                }
 -              while ((i+1 < active_nr) &&
 -                     ce_same_name(ce, active_cache[i+1]))
 +              while ((i+1 < istate->cache_nr) &&
 +                     ce_same_name(ce, istate->cache[i+1]))
                        i++;
        }
        clear_pathspec(&revs->prune_data);
@@@ -1586,8 -1554,8 +1586,8 @@@ static int handle_dotdot_1(const char *
                *dotdot = '\0';
        }
  
 -      a_obj = parse_object(&a_oid);
 -      b_obj = parse_object(&b_oid);
 +      a_obj = parse_object(revs->repo, &a_oid);
 +      b_obj = parse_object(revs->repo, &b_oid);
        if (!a_obj || !b_obj)
                return dotdot_missing(arg, dotdot, revs, symmetric);
  
                struct commit *a, *b;
                struct commit_list *exclude;
  
 -              a = lookup_commit_reference(&a_obj->oid);
 -              b = lookup_commit_reference(&b_obj->oid);
 +              a = lookup_commit_reference(revs->repo, &a_obj->oid);
 +              b = lookup_commit_reference(revs->repo, &b_obj->oid);
                if (!a || !b)
                        return dotdot_missing(arg, dotdot, revs, symmetric);
  
@@@ -1783,7 -1751,6 +1783,7 @@@ static int handle_revision_opt(struct r
        const char *arg = argv[0];
        const char *optarg;
        int argcount;
 +      const unsigned hexsz = the_hash_algo->hexsz;
  
        /* pseudo revision arguments */
        if (!strcmp(arg, "--all") || !strcmp(arg, "--branches") ||
                revs->abbrev = strtoul(optarg, NULL, 10);
                if (revs->abbrev < MINIMUM_ABBREV)
                        revs->abbrev = MINIMUM_ABBREV;
 -              else if (revs->abbrev > 40)
 -                      revs->abbrev = 40;
 +              else if (revs->abbrev > hexsz)
 +                      revs->abbrev = hexsz;
        } else if (!strcmp(arg, "--abbrev-commit")) {
                revs->abbrev_commit = 1;
                revs->abbrev_commit_given = 1;
                revs->limited = 1;
        } else if (!strcmp(arg, "--ignore-missing")) {
                revs->ignore_missing = 1;
-       } else if (!strcmp(arg, "--exclude-promisor-objects")) {
+       } else if (revs->allow_exclude_promisor_objects_opt &&
+                  !strcmp(arg, "--exclude-promisor-objects")) {
                if (fetch_if_missing)
 -                      die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
 +                      BUG("exclude_promisor_objects can only be used when fetch_if_missing is 0");
                revs->exclude_promisor_objects = 1;
        } else {
                int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
@@@ -2206,10 -2174,10 +2207,10 @@@ static int handle_revision_pseudo_opt(c
                 * supported right now, so stick to single worktree.
                 */
                if (!revs->single_worktree)
 -                      die("BUG: --single-worktree cannot be used together with submodule");
 +                      BUG("--single-worktree cannot be used together with submodule");
                refs = get_submodule_ref_store(submodule);
        } else
 -              refs = get_main_ref_store();
 +              refs = get_main_ref_store(revs->repo);
  
        /*
         * NOTE!
@@@ -2323,7 -2291,7 +2324,7 @@@ static void NORETURN diagnose_missing_d
   */
  int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt)
  {
 -      int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt;
 +      int i, flags, left, seen_dashdash, got_rev_arg = 0, revarg_opt;
        struct argv_array prune_data = ARGV_ARRAY_INIT;
        const char *submodule = NULL;
  
        revarg_opt = opt ? opt->revarg_opt : 0;
        if (seen_dashdash)
                revarg_opt |= REVARG_CANNOT_BE_FILENAME;
 -      read_from_stdin = 0;
        for (left = i = 1; i < argc; i++) {
                const char *arg = argv[i];
                if (*arg == '-') {
                                        argv[left++] = arg;
                                        continue;
                                }
 -                              if (read_from_stdin++)
 +                              if (revs->read_from_stdin++)
                                        die("--stdin given twice?");
                                read_revisions_from_stdin(revs, &prune_data);
                                continue;
@@@ -2889,10 -2858,9 +2890,10 @@@ void reset_revision_walk(void
  static int mark_uninteresting(const struct object_id *oid,
                              struct packed_git *pack,
                              uint32_t pos,
 -                            void *unused)
 +                            void *cb)
  {
 -      struct object *o = parse_object(oid);
 +      struct rev_info *revs = cb;
 +      struct object *o = parse_object(revs->repo, oid);
        o->flags |= UNINTERESTING | SEEN;
        return 0;
  }
@@@ -2925,7 -2893,7 +2926,7 @@@ int prepare_revision_walk(struct rev_in
                revs->treesame.name = "treesame";
  
        if (revs->exclude_promisor_objects) {
 -              for_each_packed_object(mark_uninteresting, NULL,
 +              for_each_packed_object(mark_uninteresting, revs,
                                       FOR_EACH_OBJECT_PROMISOR_ONLY);
        }
  
@@@ -3119,7 -3087,7 +3120,7 @@@ enum commit_action get_commit_action(st
  {
        if (commit->object.flags & SHOWN)
                return commit_ignore;
 -      if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
 +      if (revs->unpacked && has_object_pack(&commit->object.oid))
                return commit_ignore;
        if (commit->object.flags & UNINTERESTING)
                return commit_ignore;
@@@ -3243,7 -3211,7 +3244,7 @@@ static void track_linear(struct rev_inf
                struct commit_list *p;
                for (p = revs->previous_parents; p; p = p->next)
                        if (p->item == NULL || /* first commit */
 -                          !oidcmp(&p->item->object.oid, &commit->object.oid))
 +                          oideq(&p->item->object.oid, &commit->object.oid))
                                break;
                revs->linear = p != NULL;
        }
diff --combined revision.h
index 1cd0c4b200887e6b73d6a2473712bb5832f7e079,e892a40cd9a0df6e1c184457d69245cb36a711f6..0d2abc2d36ec579c281135a2a3b866fb37911326
@@@ -1,13 -1,11 +1,13 @@@
  #ifndef REVISION_H
  #define REVISION_H
  
 +#include "commit.h"
  #include "parse-options.h"
  #include "grep.h"
  #include "notes.h"
  #include "pretty.h"
  #include "diff.h"
 +#include "commit-slab-decl.h"
  
  /* Remember to update object flag allocation in object.h */
  #define SEEN          (1u<<0)
  #define SYMMETRIC_LEFT        (1u<<8)
  #define PATCHSAME     (1u<<9)
  #define BOTTOM                (1u<<10)
 +/*
 + * Indicates object was reached by traversal. i.e. not given by user on
 + * command-line or stdin.
 + * NEEDSWORK: NOT_USER_GIVEN doesn't apply to commits because we only support
 + * filtering trees and blobs, but it may be useful to support filtering commits
 + * in the future.
 + */
 +#define NOT_USER_GIVEN        (1u<<25)
  #define TRACK_LINEAR  (1u<<26)
 -#define ALL_REV_FLAGS (((1u<<11)-1) | TRACK_LINEAR)
 +#define ALL_REV_FLAGS (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR)
  
  #define DECORATE_SHORT_REFS   1
  #define DECORATE_FULL_REFS    2
  
 -struct rev_info;
  struct log_info;
 +struct repository;
 +struct rev_info;
  struct string_list;
  struct saved_parents;
 +define_shared_commit_slab(revision_sources, char *);
  
  struct rev_cmdline_info {
        unsigned int nr;
@@@ -68,7 -56,6 +68,7 @@@ struct rev_info 
        /* Starting list */
        struct commit_list *commits;
        struct object_array pending;
 +      struct repository *repo;
  
        /* Parents of shown commits */
        struct object_array boundary_commits;
         */
        int rev_input_given;
  
 +      /*
 +       * Whether we read from stdin due to the --stdin option.
 +       */
 +      int read_from_stdin;
 +
        /* topo-sort */
        enum rev_sort_order sort_order;
  
                        right_only:1,
                        rewrite_parents:1,
                        print_parents:1,
 -                      show_source:1,
                        show_decorations:1,
                        reverse:1,
                        reverse_output_stage:1,
                        line_level_traverse:1,
                        tree_blobs_in_commit_order:1,
  
 +                      /*
 +                       * Blobs are shown without regard for their existence.
 +                       * But not so for trees: unless exclude_promisor_objects
 +                       * is set and the tree in question is a promisor object;
 +                       * OR ignore_missing_links is set, the revision walker
 +                       * dies with a "bad tree object HASH" message when
 +                       * encountering a missing tree. For callers that can
 +                       * handle missing trees and want them to be filterable
 +                       * and showable, set this to true. The revision walker
 +                       * will filter and show such a missing tree as usual,
 +                       * but will not attempt to recurse into this tree
 +                       * object.
 +                       */
 +                      do_not_die_on_missing_tree:1,
 +
                        /* for internal use only */
+                       allow_exclude_promisor_objects_opt:1,
                        exclude_promisor_objects:1;
  
        /* Diff flags */
        /* notes-specific options: which refs to show */
        struct display_notes_opt notes_opt;
  
 +      /* interdiff */
 +      const struct object_id *idiff_oid1;
 +      const struct object_id *idiff_oid2;
 +      const char *idiff_title;
 +
 +      /* range-diff */
 +      const char *rdiff1;
 +      const char *rdiff2;
 +      int creation_factor;
 +      const char *rdiff_title;
 +
        /* commit counts */
        int count_left;
        int count_right;
  
        struct commit_list *previous_parents;
        const char *break_bar;
 +
 +      struct revision_sources *sources;
  };
  
 -extern int ref_excluded(struct string_list *, const char *path);
 +int ref_excluded(struct string_list *, const char *path);
  void clear_ref_exclusion(struct string_list **);
  void add_ref_exclusion(struct string_list **, const char *exclude);
  
@@@ -288,49 -244,44 +289,49 @@@ extern volatile show_early_output_fn_t 
  struct setup_revision_opt {
        const char *def;
        void (*tweak)(struct rev_info *, struct setup_revision_opt *);
 -      const char *submodule;
 +      const char *submodule;  /* TODO: drop this and use rev_info->repo */
        int assume_dashdash;
        unsigned revarg_opt;
  };
  
 -extern void init_revisions(struct rev_info *revs, const char *prefix);
 -extern int setup_revisions(int argc, const char **argv, struct rev_info *revs,
 -                         struct setup_revision_opt *);
 -extern void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
 -                             const struct option *options,
 -                             const char * const usagestr[]);
 +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
 +#define init_revisions(revs, prefix) repo_init_revisions(the_repository, revs, prefix)
 +#endif
 +void repo_init_revisions(struct repository *r,
 +                       struct rev_info *revs,
 +                       const char *prefix);
 +int setup_revisions(int argc, const char **argv, struct rev_info *revs,
 +                  struct setup_revision_opt *);
 +void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
 +                      const struct option *options,
 +                      const char * const usagestr[]);
  #define REVARG_CANNOT_BE_FILENAME 01
  #define REVARG_COMMITTISH 02
 -extern int handle_revision_arg(const char *arg, struct rev_info *revs,
 -                             int flags, unsigned revarg_opt);
 +int handle_revision_arg(const char *arg, struct rev_info *revs,
 +                      int flags, unsigned revarg_opt);
  
 -extern void reset_revision_walk(void);
 -extern int prepare_revision_walk(struct rev_info *revs);
 -extern struct commit *get_revision(struct rev_info *revs);
 -extern char *get_revision_mark(const struct rev_info *revs,
 -                             const struct commit *commit);
 -extern void put_revision_mark(const struct rev_info *revs,
 -                            const struct commit *commit);
 +void reset_revision_walk(void);
 +int prepare_revision_walk(struct rev_info *revs);
 +struct commit *get_revision(struct rev_info *revs);
 +char *get_revision_mark(const struct rev_info *revs,
 +                      const struct commit *commit);
 +void put_revision_mark(const struct rev_info *revs,
 +                     const struct commit *commit);
  
 -extern void mark_parents_uninteresting(struct commit *commit);
 -extern void mark_tree_uninteresting(struct tree *tree);
 +void mark_parents_uninteresting(struct commit *commit);
 +void mark_tree_uninteresting(struct repository *r, struct tree *tree);
  
 -extern void show_object_with_name(FILE *, struct object *, const char *);
 +void show_object_with_name(FILE *, struct object *, const char *);
  
 -extern void add_pending_object(struct rev_info *revs,
 -                             struct object *obj, const char *name);
 -extern void add_pending_oid(struct rev_info *revs,
 -                          const char *name, const struct object_id *oid,
 -                          unsigned int flags);
 +void add_pending_object(struct rev_info *revs,
 +                      struct object *obj, const char *name);
 +void add_pending_oid(struct rev_info *revs,
 +                   const char *name, const struct object_id *oid,
 +                   unsigned int flags);
  
 -extern void add_head_to_pending(struct rev_info *);
 -extern void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
 -extern void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
 +void add_head_to_pending(struct rev_info *);
 +void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
 +void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
  
  enum commit_action {
        commit_ignore,
        commit_error
  };
  
 -extern enum commit_action get_commit_action(struct rev_info *revs,
 -                                          struct commit *commit);
 -extern enum commit_action simplify_commit(struct rev_info *revs,
 -                                        struct commit *commit);
 +enum commit_action get_commit_action(struct rev_info *revs,
 +                                   struct commit *commit);
 +enum commit_action simplify_commit(struct rev_info *revs,
 +                                 struct commit *commit);
  
  enum rewrite_result {
        rewrite_one_ok,
  
  typedef enum rewrite_result (*rewrite_parent_fn_t)(struct rev_info *revs, struct commit **pp);
  
 -extern int rewrite_parents(struct rev_info *revs, struct commit *commit,
 -      rewrite_parent_fn_t rewrite_parent);
 +int rewrite_parents(struct rev_info *revs,
 +                  struct commit *commit,
 +                  rewrite_parent_fn_t rewrite_parent);
  
  /*
   * The log machinery saves the original parent list so that
   * get_saved_parents() will transparently return commit->parents if
   * history simplification is off.
   */
 -extern struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
 +struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
  
  #endif
diff --combined t/t4202-log.sh
index 153a506151e2afe3f1e047cd5a00270f1378ce14,61610ce08e921ed937bac60a3a12f12afc7fcfb1..819c24d10eaa3cb4a58b72ae8a8e96151b5cabe9
@@@ -340,9 -340,10 +340,9 @@@ test_expect_success PCRE 'log -F -E --p
  '
  
  test_expect_success 'log with grep.patternType configuration' '
 -      >expect &&
        git -c grep.patterntype=fixed \
        log -1 --pretty=tformat:%s --grep=s.c.nd >actual &&
 -      test_cmp expect actual
 +      test_must_be_empty actual
  '
  
  test_expect_success 'log with grep.patternType configuration and command line' '
@@@ -1555,28 -1556,12 +1555,28 @@@ test_expect_success GPG 'setup signed b
        git commit -S -m signed_commit
  '
  
 +test_expect_success GPGSM 'setup signed branch x509' '
 +      test_when_finished "git reset --hard && git checkout master" &&
 +      git checkout -b signed-x509 master &&
 +      echo foo >foo &&
 +      git add foo &&
 +      test_config gpg.format x509 &&
 +      test_config user.signingkey $GIT_COMMITTER_EMAIL &&
 +      git commit -S -m signed_commit
 +'
 +
  test_expect_success GPG 'log --graph --show-signature' '
        git log --graph --show-signature -n1 signed >actual &&
        grep "^| gpg: Signature made" actual &&
        grep "^| gpg: Good signature" actual
  '
  
 +test_expect_success GPGSM 'log --graph --show-signature x509' '
 +      git log --graph --show-signature -n1 signed-x509 >actual &&
 +      grep "^| gpgsm: Signature made" actual &&
 +      grep "^| gpgsm: Good signature" actual
 +'
 +
  test_expect_success GPG 'log --graph --show-signature for merged tag' '
        test_when_finished "git reset --hard && git checkout master" &&
        git checkout -b plain master &&
        grep "^| | gpg: Good signature" actual
  '
  
 +test_expect_success GPGSM 'log --graph --show-signature for merged tag x509' '
 +      test_when_finished "git reset --hard && git checkout master" &&
 +      test_config gpg.format x509 &&
 +      test_config user.signingkey $GIT_COMMITTER_EMAIL &&
 +      git checkout -b plain-x509 master &&
 +      echo aaa >bar &&
 +      git add bar &&
 +      git commit -m bar_commit &&
 +      git checkout -b tagged-x509 master &&
 +      echo bbb >baz &&
 +      git add baz &&
 +      git commit -m baz_commit &&
 +      git tag -s -m signed_tag_msg signed_tag_x509 &&
 +      git checkout plain-x509 &&
 +      git merge --no-ff -m msg signed_tag_x509 &&
 +      git log --graph --show-signature -n1 plain-x509 >actual &&
 +      grep "^|\\\  merged tag" actual &&
 +      grep "^| | gpgsm: Signature made" actual &&
 +      grep "^| | gpgsm: Good signature" actual
 +'
 +
  test_expect_success GPG '--no-show-signature overrides --show-signature' '
        git log -1 --show-signature --no-show-signature signed >actual &&
        ! grep "^gpg:" actual
@@@ -1661,8 -1625,9 +1661,8 @@@ test_expect_success 'log diagnoses bogu
  '
  
  test_expect_success 'log does not default to HEAD when rev input is given' '
 -      >expect &&
        git log --branches=does-not-exist >actual &&
 -      test_cmp expect actual
 +      test_must_be_empty actual
  '
  
  test_expect_success 'set up --source tests' '
@@@ -1703,4 -1668,8 +1703,8 @@@ test_expect_success 'log --source paint
        test_cmp expect actual
  '
  
+ test_expect_success '--exclude-promisor-objects does not BUG-crash' '
+       test_must_fail git log --exclude-promisor-objects source-a
+ '
  test_done