]> git.ipfire.org Git - thirdparty/git.git/blob - builtin/pack-objects.c
merge-recursive: introduce an enum for detect_directory_renames values
[thirdparty/git.git] / builtin / pack-objects.c
1 #include "builtin.h"
2 #include "cache.h"
3 #include "repository.h"
4 #include "config.h"
5 #include "attr.h"
6 #include "object.h"
7 #include "blob.h"
8 #include "commit.h"
9 #include "tag.h"
10 #include "tree.h"
11 #include "delta.h"
12 #include "pack.h"
13 #include "pack-revindex.h"
14 #include "csum-file.h"
15 #include "tree-walk.h"
16 #include "diff.h"
17 #include "revision.h"
18 #include "list-objects.h"
19 #include "list-objects-filter.h"
20 #include "list-objects-filter-options.h"
21 #include "pack-objects.h"
22 #include "progress.h"
23 #include "refs.h"
24 #include "streaming.h"
25 #include "thread-utils.h"
26 #include "pack-bitmap.h"
27 #include "delta-islands.h"
28 #include "reachable.h"
29 #include "sha1-array.h"
30 #include "argv-array.h"
31 #include "list.h"
32 #include "packfile.h"
33 #include "object-store.h"
34 #include "dir.h"
35 #include "midx.h"
36 #include "trace2.h"
37
38 #define IN_PACK(obj) oe_in_pack(&to_pack, obj)
39 #define SIZE(obj) oe_size(&to_pack, obj)
40 #define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
41 #define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
42 #define DELTA(obj) oe_delta(&to_pack, obj)
43 #define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
44 #define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
45 #define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
46 #define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
47 #define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
48 #define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
49 #define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
50
51 static const char *pack_usage[] = {
52 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
53 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
54 NULL
55 };
56
57 /*
58 * Objects we are going to pack are collected in the `to_pack` structure.
59 * It contains an array (dynamically expanded) of the object data, and a map
60 * that can resolve SHA1s to their position in the array.
61 */
62 static struct packing_data to_pack;
63
64 static struct pack_idx_entry **written_list;
65 static uint32_t nr_result, nr_written, nr_seen;
66 static struct bitmap_index *bitmap_git;
67 static uint32_t write_layer;
68
69 static int non_empty;
70 static int reuse_delta = 1, reuse_object = 1;
71 static int keep_unreachable, unpack_unreachable, include_tag;
72 static timestamp_t unpack_unreachable_expiration;
73 static int pack_loose_unreachable;
74 static int local;
75 static int have_non_local_packs;
76 static int incremental;
77 static int ignore_packed_keep_on_disk;
78 static int ignore_packed_keep_in_core;
79 static int allow_ofs_delta;
80 static struct pack_idx_option pack_idx_opts;
81 static const char *base_name;
82 static int progress = 1;
83 static int window = 10;
84 static unsigned long pack_size_limit;
85 static int depth = 50;
86 static int delta_search_threads;
87 static int pack_to_stdout;
88 static int sparse;
89 static int thin;
90 static int num_preferred_base;
91 static struct progress *progress_state;
92
93 static struct packed_git *reuse_packfile;
94 static uint32_t reuse_packfile_objects;
95 static off_t reuse_packfile_offset;
96
97 static int use_bitmap_index_default = 1;
98 static int use_bitmap_index = -1;
99 static enum {
100 WRITE_BITMAP_FALSE = 0,
101 WRITE_BITMAP_QUIET,
102 WRITE_BITMAP_TRUE,
103 } write_bitmap_index;
104 static uint16_t write_bitmap_options = BITMAP_OPT_HASH_CACHE;
105
106 static int exclude_promisor_objects;
107
108 static int use_delta_islands;
109
110 static unsigned long delta_cache_size = 0;
111 static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
112 static unsigned long cache_max_small_delta_size = 1000;
113
114 static unsigned long window_memory_limit = 0;
115
116 static struct list_objects_filter_options filter_options;
117
118 enum missing_action {
119 MA_ERROR = 0, /* fail if any missing objects are encountered */
120 MA_ALLOW_ANY, /* silently allow ALL missing objects */
121 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
122 };
123 static enum missing_action arg_missing_action;
124 static show_object_fn fn_show_object;
125
126 /*
127 * stats
128 */
129 static uint32_t written, written_delta;
130 static uint32_t reused, reused_delta;
131
132 /*
133 * Indexed commits
134 */
135 static struct commit **indexed_commits;
136 static unsigned int indexed_commits_nr;
137 static unsigned int indexed_commits_alloc;
138
139 static void index_commit_for_bitmap(struct commit *commit)
140 {
141 if (indexed_commits_nr >= indexed_commits_alloc) {
142 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
143 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
144 }
145
146 indexed_commits[indexed_commits_nr++] = commit;
147 }
148
149 static void *get_delta(struct object_entry *entry)
150 {
151 unsigned long size, base_size, delta_size;
152 void *buf, *base_buf, *delta_buf;
153 enum object_type type;
154
155 buf = read_object_file(&entry->idx.oid, &type, &size);
156 if (!buf)
157 die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
158 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
159 &base_size);
160 if (!base_buf)
161 die("unable to read %s",
162 oid_to_hex(&DELTA(entry)->idx.oid));
163 delta_buf = diff_delta(base_buf, base_size,
164 buf, size, &delta_size, 0);
165 /*
166 * We succesfully computed this delta once but dropped it for
167 * memory reasons. Something is very wrong if this time we
168 * recompute and create a different delta.
169 */
170 if (!delta_buf || delta_size != DELTA_SIZE(entry))
171 BUG("delta size changed");
172 free(buf);
173 free(base_buf);
174 return delta_buf;
175 }
176
177 static unsigned long do_compress(void **pptr, unsigned long size)
178 {
179 git_zstream stream;
180 void *in, *out;
181 unsigned long maxsize;
182
183 git_deflate_init(&stream, pack_compression_level);
184 maxsize = git_deflate_bound(&stream, size);
185
186 in = *pptr;
187 out = xmalloc(maxsize);
188 *pptr = out;
189
190 stream.next_in = in;
191 stream.avail_in = size;
192 stream.next_out = out;
193 stream.avail_out = maxsize;
194 while (git_deflate(&stream, Z_FINISH) == Z_OK)
195 ; /* nothing */
196 git_deflate_end(&stream);
197
198 free(in);
199 return stream.total_out;
200 }
201
202 static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
203 const struct object_id *oid)
204 {
205 git_zstream stream;
206 unsigned char ibuf[1024 * 16];
207 unsigned char obuf[1024 * 16];
208 unsigned long olen = 0;
209
210 git_deflate_init(&stream, pack_compression_level);
211
212 for (;;) {
213 ssize_t readlen;
214 int zret = Z_OK;
215 readlen = read_istream(st, ibuf, sizeof(ibuf));
216 if (readlen == -1)
217 die(_("unable to read %s"), oid_to_hex(oid));
218
219 stream.next_in = ibuf;
220 stream.avail_in = readlen;
221 while ((stream.avail_in || readlen == 0) &&
222 (zret == Z_OK || zret == Z_BUF_ERROR)) {
223 stream.next_out = obuf;
224 stream.avail_out = sizeof(obuf);
225 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
226 hashwrite(f, obuf, stream.next_out - obuf);
227 olen += stream.next_out - obuf;
228 }
229 if (stream.avail_in)
230 die(_("deflate error (%d)"), zret);
231 if (readlen == 0) {
232 if (zret != Z_STREAM_END)
233 die(_("deflate error (%d)"), zret);
234 break;
235 }
236 }
237 git_deflate_end(&stream);
238 return olen;
239 }
240
241 /*
242 * we are going to reuse the existing object data as is. make
243 * sure it is not corrupt.
244 */
245 static int check_pack_inflate(struct packed_git *p,
246 struct pack_window **w_curs,
247 off_t offset,
248 off_t len,
249 unsigned long expect)
250 {
251 git_zstream stream;
252 unsigned char fakebuf[4096], *in;
253 int st;
254
255 memset(&stream, 0, sizeof(stream));
256 git_inflate_init(&stream);
257 do {
258 in = use_pack(p, w_curs, offset, &stream.avail_in);
259 stream.next_in = in;
260 stream.next_out = fakebuf;
261 stream.avail_out = sizeof(fakebuf);
262 st = git_inflate(&stream, Z_FINISH);
263 offset += stream.next_in - in;
264 } while (st == Z_OK || st == Z_BUF_ERROR);
265 git_inflate_end(&stream);
266 return (st == Z_STREAM_END &&
267 stream.total_out == expect &&
268 stream.total_in == len) ? 0 : -1;
269 }
270
271 static void copy_pack_data(struct hashfile *f,
272 struct packed_git *p,
273 struct pack_window **w_curs,
274 off_t offset,
275 off_t len)
276 {
277 unsigned char *in;
278 unsigned long avail;
279
280 while (len) {
281 in = use_pack(p, w_curs, offset, &avail);
282 if (avail > len)
283 avail = (unsigned long)len;
284 hashwrite(f, in, avail);
285 offset += avail;
286 len -= avail;
287 }
288 }
289
290 /* Return 0 if we will bust the pack-size limit */
291 static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
292 unsigned long limit, int usable_delta)
293 {
294 unsigned long size, datalen;
295 unsigned char header[MAX_PACK_OBJECT_HEADER],
296 dheader[MAX_PACK_OBJECT_HEADER];
297 unsigned hdrlen;
298 enum object_type type;
299 void *buf;
300 struct git_istream *st = NULL;
301 const unsigned hashsz = the_hash_algo->rawsz;
302
303 if (!usable_delta) {
304 if (oe_type(entry) == OBJ_BLOB &&
305 oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
306 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
307 buf = NULL;
308 else {
309 buf = read_object_file(&entry->idx.oid, &type, &size);
310 if (!buf)
311 die(_("unable to read %s"),
312 oid_to_hex(&entry->idx.oid));
313 }
314 /*
315 * make sure no cached delta data remains from a
316 * previous attempt before a pack split occurred.
317 */
318 FREE_AND_NULL(entry->delta_data);
319 entry->z_delta_size = 0;
320 } else if (entry->delta_data) {
321 size = DELTA_SIZE(entry);
322 buf = entry->delta_data;
323 entry->delta_data = NULL;
324 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
325 OBJ_OFS_DELTA : OBJ_REF_DELTA;
326 } else {
327 buf = get_delta(entry);
328 size = DELTA_SIZE(entry);
329 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
330 OBJ_OFS_DELTA : OBJ_REF_DELTA;
331 }
332
333 if (st) /* large blob case, just assume we don't compress well */
334 datalen = size;
335 else if (entry->z_delta_size)
336 datalen = entry->z_delta_size;
337 else
338 datalen = do_compress(&buf, size);
339
340 /*
341 * The object header is a byte of 'type' followed by zero or
342 * more bytes of length.
343 */
344 hdrlen = encode_in_pack_object_header(header, sizeof(header),
345 type, size);
346
347 if (type == OBJ_OFS_DELTA) {
348 /*
349 * Deltas with relative base contain an additional
350 * encoding of the relative offset for the delta
351 * base from this object's position in the pack.
352 */
353 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
354 unsigned pos = sizeof(dheader) - 1;
355 dheader[pos] = ofs & 127;
356 while (ofs >>= 7)
357 dheader[--pos] = 128 | (--ofs & 127);
358 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
359 if (st)
360 close_istream(st);
361 free(buf);
362 return 0;
363 }
364 hashwrite(f, header, hdrlen);
365 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
366 hdrlen += sizeof(dheader) - pos;
367 } else if (type == OBJ_REF_DELTA) {
368 /*
369 * Deltas with a base reference contain
370 * additional bytes for the base object ID.
371 */
372 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
373 if (st)
374 close_istream(st);
375 free(buf);
376 return 0;
377 }
378 hashwrite(f, header, hdrlen);
379 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
380 hdrlen += hashsz;
381 } else {
382 if (limit && hdrlen + datalen + hashsz >= limit) {
383 if (st)
384 close_istream(st);
385 free(buf);
386 return 0;
387 }
388 hashwrite(f, header, hdrlen);
389 }
390 if (st) {
391 datalen = write_large_blob_data(st, f, &entry->idx.oid);
392 close_istream(st);
393 } else {
394 hashwrite(f, buf, datalen);
395 free(buf);
396 }
397
398 return hdrlen + datalen;
399 }
400
401 /* Return 0 if we will bust the pack-size limit */
402 static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
403 unsigned long limit, int usable_delta)
404 {
405 struct packed_git *p = IN_PACK(entry);
406 struct pack_window *w_curs = NULL;
407 struct revindex_entry *revidx;
408 off_t offset;
409 enum object_type type = oe_type(entry);
410 off_t datalen;
411 unsigned char header[MAX_PACK_OBJECT_HEADER],
412 dheader[MAX_PACK_OBJECT_HEADER];
413 unsigned hdrlen;
414 const unsigned hashsz = the_hash_algo->rawsz;
415 unsigned long entry_size = SIZE(entry);
416
417 if (DELTA(entry))
418 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
419 OBJ_OFS_DELTA : OBJ_REF_DELTA;
420 hdrlen = encode_in_pack_object_header(header, sizeof(header),
421 type, entry_size);
422
423 offset = entry->in_pack_offset;
424 revidx = find_pack_revindex(p, offset);
425 datalen = revidx[1].offset - offset;
426 if (!pack_to_stdout && p->index_version > 1 &&
427 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
428 error(_("bad packed object CRC for %s"),
429 oid_to_hex(&entry->idx.oid));
430 unuse_pack(&w_curs);
431 return write_no_reuse_object(f, entry, limit, usable_delta);
432 }
433
434 offset += entry->in_pack_header_size;
435 datalen -= entry->in_pack_header_size;
436
437 if (!pack_to_stdout && p->index_version == 1 &&
438 check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
439 error(_("corrupt packed object for %s"),
440 oid_to_hex(&entry->idx.oid));
441 unuse_pack(&w_curs);
442 return write_no_reuse_object(f, entry, limit, usable_delta);
443 }
444
445 if (type == OBJ_OFS_DELTA) {
446 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
447 unsigned pos = sizeof(dheader) - 1;
448 dheader[pos] = ofs & 127;
449 while (ofs >>= 7)
450 dheader[--pos] = 128 | (--ofs & 127);
451 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
452 unuse_pack(&w_curs);
453 return 0;
454 }
455 hashwrite(f, header, hdrlen);
456 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
457 hdrlen += sizeof(dheader) - pos;
458 reused_delta++;
459 } else if (type == OBJ_REF_DELTA) {
460 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
461 unuse_pack(&w_curs);
462 return 0;
463 }
464 hashwrite(f, header, hdrlen);
465 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
466 hdrlen += hashsz;
467 reused_delta++;
468 } else {
469 if (limit && hdrlen + datalen + hashsz >= limit) {
470 unuse_pack(&w_curs);
471 return 0;
472 }
473 hashwrite(f, header, hdrlen);
474 }
475 copy_pack_data(f, p, &w_curs, offset, datalen);
476 unuse_pack(&w_curs);
477 reused++;
478 return hdrlen + datalen;
479 }
480
481 /* Return 0 if we will bust the pack-size limit */
482 static off_t write_object(struct hashfile *f,
483 struct object_entry *entry,
484 off_t write_offset)
485 {
486 unsigned long limit;
487 off_t len;
488 int usable_delta, to_reuse;
489
490 if (!pack_to_stdout)
491 crc32_begin(f);
492
493 /* apply size limit if limited packsize and not first object */
494 if (!pack_size_limit || !nr_written)
495 limit = 0;
496 else if (pack_size_limit <= write_offset)
497 /*
498 * the earlier object did not fit the limit; avoid
499 * mistaking this with unlimited (i.e. limit = 0).
500 */
501 limit = 1;
502 else
503 limit = pack_size_limit - write_offset;
504
505 if (!DELTA(entry))
506 usable_delta = 0; /* no delta */
507 else if (!pack_size_limit)
508 usable_delta = 1; /* unlimited packfile */
509 else if (DELTA(entry)->idx.offset == (off_t)-1)
510 usable_delta = 0; /* base was written to another pack */
511 else if (DELTA(entry)->idx.offset)
512 usable_delta = 1; /* base already exists in this pack */
513 else
514 usable_delta = 0; /* base could end up in another pack */
515
516 if (!reuse_object)
517 to_reuse = 0; /* explicit */
518 else if (!IN_PACK(entry))
519 to_reuse = 0; /* can't reuse what we don't have */
520 else if (oe_type(entry) == OBJ_REF_DELTA ||
521 oe_type(entry) == OBJ_OFS_DELTA)
522 /* check_object() decided it for us ... */
523 to_reuse = usable_delta;
524 /* ... but pack split may override that */
525 else if (oe_type(entry) != entry->in_pack_type)
526 to_reuse = 0; /* pack has delta which is unusable */
527 else if (DELTA(entry))
528 to_reuse = 0; /* we want to pack afresh */
529 else
530 to_reuse = 1; /* we have it in-pack undeltified,
531 * and we do not need to deltify it.
532 */
533
534 if (!to_reuse)
535 len = write_no_reuse_object(f, entry, limit, usable_delta);
536 else
537 len = write_reuse_object(f, entry, limit, usable_delta);
538 if (!len)
539 return 0;
540
541 if (usable_delta)
542 written_delta++;
543 written++;
544 if (!pack_to_stdout)
545 entry->idx.crc32 = crc32_end(f);
546 return len;
547 }
548
549 enum write_one_status {
550 WRITE_ONE_SKIP = -1, /* already written */
551 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
552 WRITE_ONE_WRITTEN = 1, /* normal */
553 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
554 };
555
556 static enum write_one_status write_one(struct hashfile *f,
557 struct object_entry *e,
558 off_t *offset)
559 {
560 off_t size;
561 int recursing;
562
563 /*
564 * we set offset to 1 (which is an impossible value) to mark
565 * the fact that this object is involved in "write its base
566 * first before writing a deltified object" recursion.
567 */
568 recursing = (e->idx.offset == 1);
569 if (recursing) {
570 warning(_("recursive delta detected for object %s"),
571 oid_to_hex(&e->idx.oid));
572 return WRITE_ONE_RECURSIVE;
573 } else if (e->idx.offset || e->preferred_base) {
574 /* offset is non zero if object is written already. */
575 return WRITE_ONE_SKIP;
576 }
577
578 /* if we are deltified, write out base object first. */
579 if (DELTA(e)) {
580 e->idx.offset = 1; /* now recurse */
581 switch (write_one(f, DELTA(e), offset)) {
582 case WRITE_ONE_RECURSIVE:
583 /* we cannot depend on this one */
584 SET_DELTA(e, NULL);
585 break;
586 default:
587 break;
588 case WRITE_ONE_BREAK:
589 e->idx.offset = recursing;
590 return WRITE_ONE_BREAK;
591 }
592 }
593
594 e->idx.offset = *offset;
595 size = write_object(f, e, *offset);
596 if (!size) {
597 e->idx.offset = recursing;
598 return WRITE_ONE_BREAK;
599 }
600 written_list[nr_written++] = &e->idx;
601
602 /* make sure off_t is sufficiently large not to wrap */
603 if (signed_add_overflows(*offset, size))
604 die(_("pack too large for current definition of off_t"));
605 *offset += size;
606 return WRITE_ONE_WRITTEN;
607 }
608
609 static int mark_tagged(const char *path, const struct object_id *oid, int flag,
610 void *cb_data)
611 {
612 struct object_id peeled;
613 struct object_entry *entry = packlist_find(&to_pack, oid, NULL);
614
615 if (entry)
616 entry->tagged = 1;
617 if (!peel_ref(path, &peeled)) {
618 entry = packlist_find(&to_pack, &peeled, NULL);
619 if (entry)
620 entry->tagged = 1;
621 }
622 return 0;
623 }
624
625 static inline void add_to_write_order(struct object_entry **wo,
626 unsigned int *endp,
627 struct object_entry *e)
628 {
629 if (e->filled || oe_layer(&to_pack, e) != write_layer)
630 return;
631 wo[(*endp)++] = e;
632 e->filled = 1;
633 }
634
635 static void add_descendants_to_write_order(struct object_entry **wo,
636 unsigned int *endp,
637 struct object_entry *e)
638 {
639 int add_to_order = 1;
640 while (e) {
641 if (add_to_order) {
642 struct object_entry *s;
643 /* add this node... */
644 add_to_write_order(wo, endp, e);
645 /* all its siblings... */
646 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
647 add_to_write_order(wo, endp, s);
648 }
649 }
650 /* drop down a level to add left subtree nodes if possible */
651 if (DELTA_CHILD(e)) {
652 add_to_order = 1;
653 e = DELTA_CHILD(e);
654 } else {
655 add_to_order = 0;
656 /* our sibling might have some children, it is next */
657 if (DELTA_SIBLING(e)) {
658 e = DELTA_SIBLING(e);
659 continue;
660 }
661 /* go back to our parent node */
662 e = DELTA(e);
663 while (e && !DELTA_SIBLING(e)) {
664 /* we're on the right side of a subtree, keep
665 * going up until we can go right again */
666 e = DELTA(e);
667 }
668 if (!e) {
669 /* done- we hit our original root node */
670 return;
671 }
672 /* pass it off to sibling at this level */
673 e = DELTA_SIBLING(e);
674 }
675 };
676 }
677
678 static void add_family_to_write_order(struct object_entry **wo,
679 unsigned int *endp,
680 struct object_entry *e)
681 {
682 struct object_entry *root;
683
684 for (root = e; DELTA(root); root = DELTA(root))
685 ; /* nothing */
686 add_descendants_to_write_order(wo, endp, root);
687 }
688
689 static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
690 {
691 unsigned int i, last_untagged;
692 struct object_entry *objects = to_pack.objects;
693
694 for (i = 0; i < to_pack.nr_objects; i++) {
695 if (objects[i].tagged)
696 break;
697 add_to_write_order(wo, wo_end, &objects[i]);
698 }
699 last_untagged = i;
700
701 /*
702 * Then fill all the tagged tips.
703 */
704 for (; i < to_pack.nr_objects; i++) {
705 if (objects[i].tagged)
706 add_to_write_order(wo, wo_end, &objects[i]);
707 }
708
709 /*
710 * And then all remaining commits and tags.
711 */
712 for (i = last_untagged; i < to_pack.nr_objects; i++) {
713 if (oe_type(&objects[i]) != OBJ_COMMIT &&
714 oe_type(&objects[i]) != OBJ_TAG)
715 continue;
716 add_to_write_order(wo, wo_end, &objects[i]);
717 }
718
719 /*
720 * And then all the trees.
721 */
722 for (i = last_untagged; i < to_pack.nr_objects; i++) {
723 if (oe_type(&objects[i]) != OBJ_TREE)
724 continue;
725 add_to_write_order(wo, wo_end, &objects[i]);
726 }
727
728 /*
729 * Finally all the rest in really tight order
730 */
731 for (i = last_untagged; i < to_pack.nr_objects; i++) {
732 if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
733 add_family_to_write_order(wo, wo_end, &objects[i]);
734 }
735 }
736
737 static struct object_entry **compute_write_order(void)
738 {
739 uint32_t max_layers = 1;
740 unsigned int i, wo_end;
741
742 struct object_entry **wo;
743 struct object_entry *objects = to_pack.objects;
744
745 for (i = 0; i < to_pack.nr_objects; i++) {
746 objects[i].tagged = 0;
747 objects[i].filled = 0;
748 SET_DELTA_CHILD(&objects[i], NULL);
749 SET_DELTA_SIBLING(&objects[i], NULL);
750 }
751
752 /*
753 * Fully connect delta_child/delta_sibling network.
754 * Make sure delta_sibling is sorted in the original
755 * recency order.
756 */
757 for (i = to_pack.nr_objects; i > 0;) {
758 struct object_entry *e = &objects[--i];
759 if (!DELTA(e))
760 continue;
761 /* Mark me as the first child */
762 e->delta_sibling_idx = DELTA(e)->delta_child_idx;
763 SET_DELTA_CHILD(DELTA(e), e);
764 }
765
766 /*
767 * Mark objects that are at the tip of tags.
768 */
769 for_each_tag_ref(mark_tagged, NULL);
770
771 if (use_delta_islands)
772 max_layers = compute_pack_layers(&to_pack);
773
774 ALLOC_ARRAY(wo, to_pack.nr_objects);
775 wo_end = 0;
776
777 for (; write_layer < max_layers; ++write_layer)
778 compute_layer_order(wo, &wo_end);
779
780 if (wo_end != to_pack.nr_objects)
781 die(_("ordered %u objects, expected %"PRIu32),
782 wo_end, to_pack.nr_objects);
783
784 return wo;
785 }
786
787 static off_t write_reused_pack(struct hashfile *f)
788 {
789 unsigned char buffer[8192];
790 off_t to_write, total;
791 int fd;
792
793 if (!is_pack_valid(reuse_packfile))
794 die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
795
796 fd = git_open(reuse_packfile->pack_name);
797 if (fd < 0)
798 die_errno(_("unable to open packfile for reuse: %s"),
799 reuse_packfile->pack_name);
800
801 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
802 die_errno(_("unable to seek in reused packfile"));
803
804 if (reuse_packfile_offset < 0)
805 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
806
807 total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
808
809 while (to_write) {
810 int read_pack = xread(fd, buffer, sizeof(buffer));
811
812 if (read_pack <= 0)
813 die_errno(_("unable to read from reused packfile"));
814
815 if (read_pack > to_write)
816 read_pack = to_write;
817
818 hashwrite(f, buffer, read_pack);
819 to_write -= read_pack;
820
821 /*
822 * We don't know the actual number of objects written,
823 * only how many bytes written, how many bytes total, and
824 * how many objects total. So we can fake it by pretending all
825 * objects we are writing are the same size. This gives us a
826 * smooth progress meter, and at the end it matches the true
827 * answer.
828 */
829 written = reuse_packfile_objects *
830 (((double)(total - to_write)) / total);
831 display_progress(progress_state, written);
832 }
833
834 close(fd);
835 written = reuse_packfile_objects;
836 display_progress(progress_state, written);
837 return reuse_packfile_offset - sizeof(struct pack_header);
838 }
839
840 static const char no_split_warning[] = N_(
841 "disabling bitmap writing, packs are split due to pack.packSizeLimit"
842 );
843
844 static void write_pack_file(void)
845 {
846 uint32_t i = 0, j;
847 struct hashfile *f;
848 off_t offset;
849 uint32_t nr_remaining = nr_result;
850 time_t last_mtime = 0;
851 struct object_entry **write_order;
852
853 if (progress > pack_to_stdout)
854 progress_state = start_progress(_("Writing objects"), nr_result);
855 ALLOC_ARRAY(written_list, to_pack.nr_objects);
856 write_order = compute_write_order();
857
858 do {
859 struct object_id oid;
860 char *pack_tmp_name = NULL;
861
862 if (pack_to_stdout)
863 f = hashfd_throughput(1, "<stdout>", progress_state);
864 else
865 f = create_tmp_packfile(&pack_tmp_name);
866
867 offset = write_pack_header(f, nr_remaining);
868
869 if (reuse_packfile) {
870 off_t packfile_size;
871 assert(pack_to_stdout);
872
873 packfile_size = write_reused_pack(f);
874 offset += packfile_size;
875 }
876
877 nr_written = 0;
878 for (; i < to_pack.nr_objects; i++) {
879 struct object_entry *e = write_order[i];
880 if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
881 break;
882 display_progress(progress_state, written);
883 }
884
885 /*
886 * Did we write the wrong # entries in the header?
887 * If so, rewrite it like in fast-import
888 */
889 if (pack_to_stdout) {
890 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
891 } else if (nr_written == nr_remaining) {
892 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
893 } else {
894 int fd = finalize_hashfile(f, oid.hash, 0);
895 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
896 nr_written, oid.hash, offset);
897 close(fd);
898 if (write_bitmap_index) {
899 if (write_bitmap_index != WRITE_BITMAP_QUIET)
900 warning(_(no_split_warning));
901 write_bitmap_index = 0;
902 }
903 }
904
905 if (!pack_to_stdout) {
906 struct stat st;
907 struct strbuf tmpname = STRBUF_INIT;
908
909 /*
910 * Packs are runtime accessed in their mtime
911 * order since newer packs are more likely to contain
912 * younger objects. So if we are creating multiple
913 * packs then we should modify the mtime of later ones
914 * to preserve this property.
915 */
916 if (stat(pack_tmp_name, &st) < 0) {
917 warning_errno(_("failed to stat %s"), pack_tmp_name);
918 } else if (!last_mtime) {
919 last_mtime = st.st_mtime;
920 } else {
921 struct utimbuf utb;
922 utb.actime = st.st_atime;
923 utb.modtime = --last_mtime;
924 if (utime(pack_tmp_name, &utb) < 0)
925 warning_errno(_("failed utime() on %s"), pack_tmp_name);
926 }
927
928 strbuf_addf(&tmpname, "%s-", base_name);
929
930 if (write_bitmap_index) {
931 bitmap_writer_set_checksum(oid.hash);
932 bitmap_writer_build_type_index(
933 &to_pack, written_list, nr_written);
934 }
935
936 finish_tmp_packfile(&tmpname, pack_tmp_name,
937 written_list, nr_written,
938 &pack_idx_opts, oid.hash);
939
940 if (write_bitmap_index) {
941 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid));
942
943 stop_progress(&progress_state);
944
945 bitmap_writer_show_progress(progress);
946 bitmap_writer_reuse_bitmaps(&to_pack);
947 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
948 bitmap_writer_build(&to_pack);
949 bitmap_writer_finish(written_list, nr_written,
950 tmpname.buf, write_bitmap_options);
951 write_bitmap_index = 0;
952 }
953
954 strbuf_release(&tmpname);
955 free(pack_tmp_name);
956 puts(oid_to_hex(&oid));
957 }
958
959 /* mark written objects as written to previous pack */
960 for (j = 0; j < nr_written; j++) {
961 written_list[j]->offset = (off_t)-1;
962 }
963 nr_remaining -= nr_written;
964 } while (nr_remaining && i < to_pack.nr_objects);
965
966 free(written_list);
967 free(write_order);
968 stop_progress(&progress_state);
969 if (written != nr_result)
970 die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
971 written, nr_result);
972 trace2_data_intmax("pack-objects", the_repository,
973 "write_pack_file/wrote", nr_result);
974 }
975
976 static int no_try_delta(const char *path)
977 {
978 static struct attr_check *check;
979
980 if (!check)
981 check = attr_check_initl("delta", NULL);
982 git_check_attr(the_repository->index, path, check);
983 if (ATTR_FALSE(check->items[0].value))
984 return 1;
985 return 0;
986 }
987
988 /*
989 * When adding an object, check whether we have already added it
990 * to our packing list. If so, we can skip. However, if we are
991 * being asked to excludei t, but the previous mention was to include
992 * it, make sure to adjust its flags and tweak our numbers accordingly.
993 *
994 * As an optimization, we pass out the index position where we would have
995 * found the item, since that saves us from having to look it up again a
996 * few lines later when we want to add the new entry.
997 */
998 static int have_duplicate_entry(const struct object_id *oid,
999 int exclude,
1000 uint32_t *index_pos)
1001 {
1002 struct object_entry *entry;
1003
1004 entry = packlist_find(&to_pack, oid, index_pos);
1005 if (!entry)
1006 return 0;
1007
1008 if (exclude) {
1009 if (!entry->preferred_base)
1010 nr_result--;
1011 entry->preferred_base = 1;
1012 }
1013
1014 return 1;
1015 }
1016
1017 static int want_found_object(int exclude, struct packed_git *p)
1018 {
1019 if (exclude)
1020 return 1;
1021 if (incremental)
1022 return 0;
1023
1024 /*
1025 * When asked to do --local (do not include an object that appears in a
1026 * pack we borrow from elsewhere) or --honor-pack-keep (do not include
1027 * an object that appears in a pack marked with .keep), finding a pack
1028 * that matches the criteria is sufficient for us to decide to omit it.
1029 * However, even if this pack does not satisfy the criteria, we need to
1030 * make sure no copy of this object appears in _any_ pack that makes us
1031 * to omit the object, so we need to check all the packs.
1032 *
1033 * We can however first check whether these options can possible matter;
1034 * if they do not matter we know we want the object in generated pack.
1035 * Otherwise, we signal "-1" at the end to tell the caller that we do
1036 * not know either way, and it needs to check more packs.
1037 */
1038 if (!ignore_packed_keep_on_disk &&
1039 !ignore_packed_keep_in_core &&
1040 (!local || !have_non_local_packs))
1041 return 1;
1042
1043 if (local && !p->pack_local)
1044 return 0;
1045 if (p->pack_local &&
1046 ((ignore_packed_keep_on_disk && p->pack_keep) ||
1047 (ignore_packed_keep_in_core && p->pack_keep_in_core)))
1048 return 0;
1049
1050 /* we don't know yet; keep looking for more packs */
1051 return -1;
1052 }
1053
1054 /*
1055 * Check whether we want the object in the pack (e.g., we do not want
1056 * objects found in non-local stores if the "--local" option was used).
1057 *
1058 * If the caller already knows an existing pack it wants to take the object
1059 * from, that is passed in *found_pack and *found_offset; otherwise this
1060 * function finds if there is any pack that has the object and returns the pack
1061 * and its offset in these variables.
1062 */
1063 static int want_object_in_pack(const struct object_id *oid,
1064 int exclude,
1065 struct packed_git **found_pack,
1066 off_t *found_offset)
1067 {
1068 int want;
1069 struct list_head *pos;
1070 struct multi_pack_index *m;
1071
1072 if (!exclude && local && has_loose_object_nonlocal(oid))
1073 return 0;
1074
1075 /*
1076 * If we already know the pack object lives in, start checks from that
1077 * pack - in the usual case when neither --local was given nor .keep files
1078 * are present we will determine the answer right now.
1079 */
1080 if (*found_pack) {
1081 want = want_found_object(exclude, *found_pack);
1082 if (want != -1)
1083 return want;
1084 }
1085
1086 for (m = get_multi_pack_index(the_repository); m; m = m->next) {
1087 struct pack_entry e;
1088 if (fill_midx_entry(the_repository, oid, &e, m)) {
1089 struct packed_git *p = e.p;
1090 off_t offset;
1091
1092 if (p == *found_pack)
1093 offset = *found_offset;
1094 else
1095 offset = find_pack_entry_one(oid->hash, p);
1096
1097 if (offset) {
1098 if (!*found_pack) {
1099 if (!is_pack_valid(p))
1100 continue;
1101 *found_offset = offset;
1102 *found_pack = p;
1103 }
1104 want = want_found_object(exclude, p);
1105 if (want != -1)
1106 return want;
1107 }
1108 }
1109 }
1110
1111 list_for_each(pos, get_packed_git_mru(the_repository)) {
1112 struct packed_git *p = list_entry(pos, struct packed_git, mru);
1113 off_t offset;
1114
1115 if (p == *found_pack)
1116 offset = *found_offset;
1117 else
1118 offset = find_pack_entry_one(oid->hash, p);
1119
1120 if (offset) {
1121 if (!*found_pack) {
1122 if (!is_pack_valid(p))
1123 continue;
1124 *found_offset = offset;
1125 *found_pack = p;
1126 }
1127 want = want_found_object(exclude, p);
1128 if (!exclude && want > 0)
1129 list_move(&p->mru,
1130 get_packed_git_mru(the_repository));
1131 if (want != -1)
1132 return want;
1133 }
1134 }
1135
1136 return 1;
1137 }
1138
1139 static void create_object_entry(const struct object_id *oid,
1140 enum object_type type,
1141 uint32_t hash,
1142 int exclude,
1143 int no_try_delta,
1144 uint32_t index_pos,
1145 struct packed_git *found_pack,
1146 off_t found_offset)
1147 {
1148 struct object_entry *entry;
1149
1150 entry = packlist_alloc(&to_pack, oid->hash, index_pos);
1151 entry->hash = hash;
1152 oe_set_type(entry, type);
1153 if (exclude)
1154 entry->preferred_base = 1;
1155 else
1156 nr_result++;
1157 if (found_pack) {
1158 oe_set_in_pack(&to_pack, entry, found_pack);
1159 entry->in_pack_offset = found_offset;
1160 }
1161
1162 entry->no_try_delta = no_try_delta;
1163 }
1164
1165 static const char no_closure_warning[] = N_(
1166 "disabling bitmap writing, as some objects are not being packed"
1167 );
1168
1169 static int add_object_entry(const struct object_id *oid, enum object_type type,
1170 const char *name, int exclude)
1171 {
1172 struct packed_git *found_pack = NULL;
1173 off_t found_offset = 0;
1174 uint32_t index_pos;
1175
1176 display_progress(progress_state, ++nr_seen);
1177
1178 if (have_duplicate_entry(oid, exclude, &index_pos))
1179 return 0;
1180
1181 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
1182 /* The pack is missing an object, so it will not have closure */
1183 if (write_bitmap_index) {
1184 if (write_bitmap_index != WRITE_BITMAP_QUIET)
1185 warning(_(no_closure_warning));
1186 write_bitmap_index = 0;
1187 }
1188 return 0;
1189 }
1190
1191 create_object_entry(oid, type, pack_name_hash(name),
1192 exclude, name && no_try_delta(name),
1193 index_pos, found_pack, found_offset);
1194 return 1;
1195 }
1196
1197 static int add_object_entry_from_bitmap(const struct object_id *oid,
1198 enum object_type type,
1199 int flags, uint32_t name_hash,
1200 struct packed_git *pack, off_t offset)
1201 {
1202 uint32_t index_pos;
1203
1204 display_progress(progress_state, ++nr_seen);
1205
1206 if (have_duplicate_entry(oid, 0, &index_pos))
1207 return 0;
1208
1209 if (!want_object_in_pack(oid, 0, &pack, &offset))
1210 return 0;
1211
1212 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
1213 return 1;
1214 }
1215
1216 struct pbase_tree_cache {
1217 struct object_id oid;
1218 int ref;
1219 int temporary;
1220 void *tree_data;
1221 unsigned long tree_size;
1222 };
1223
1224 static struct pbase_tree_cache *(pbase_tree_cache[256]);
1225 static int pbase_tree_cache_ix(const struct object_id *oid)
1226 {
1227 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
1228 }
1229 static int pbase_tree_cache_ix_incr(int ix)
1230 {
1231 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
1232 }
1233
1234 static struct pbase_tree {
1235 struct pbase_tree *next;
1236 /* This is a phony "cache" entry; we are not
1237 * going to evict it or find it through _get()
1238 * mechanism -- this is for the toplevel node that
1239 * would almost always change with any commit.
1240 */
1241 struct pbase_tree_cache pcache;
1242 } *pbase_tree;
1243
1244 static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
1245 {
1246 struct pbase_tree_cache *ent, *nent;
1247 void *data;
1248 unsigned long size;
1249 enum object_type type;
1250 int neigh;
1251 int my_ix = pbase_tree_cache_ix(oid);
1252 int available_ix = -1;
1253
1254 /* pbase-tree-cache acts as a limited hashtable.
1255 * your object will be found at your index or within a few
1256 * slots after that slot if it is cached.
1257 */
1258 for (neigh = 0; neigh < 8; neigh++) {
1259 ent = pbase_tree_cache[my_ix];
1260 if (ent && oideq(&ent->oid, oid)) {
1261 ent->ref++;
1262 return ent;
1263 }
1264 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
1265 ((0 <= available_ix) &&
1266 (!ent && pbase_tree_cache[available_ix])))
1267 available_ix = my_ix;
1268 if (!ent)
1269 break;
1270 my_ix = pbase_tree_cache_ix_incr(my_ix);
1271 }
1272
1273 /* Did not find one. Either we got a bogus request or
1274 * we need to read and perhaps cache.
1275 */
1276 data = read_object_file(oid, &type, &size);
1277 if (!data)
1278 return NULL;
1279 if (type != OBJ_TREE) {
1280 free(data);
1281 return NULL;
1282 }
1283
1284 /* We need to either cache or return a throwaway copy */
1285
1286 if (available_ix < 0)
1287 ent = NULL;
1288 else {
1289 ent = pbase_tree_cache[available_ix];
1290 my_ix = available_ix;
1291 }
1292
1293 if (!ent) {
1294 nent = xmalloc(sizeof(*nent));
1295 nent->temporary = (available_ix < 0);
1296 }
1297 else {
1298 /* evict and reuse */
1299 free(ent->tree_data);
1300 nent = ent;
1301 }
1302 oidcpy(&nent->oid, oid);
1303 nent->tree_data = data;
1304 nent->tree_size = size;
1305 nent->ref = 1;
1306 if (!nent->temporary)
1307 pbase_tree_cache[my_ix] = nent;
1308 return nent;
1309 }
1310
1311 static void pbase_tree_put(struct pbase_tree_cache *cache)
1312 {
1313 if (!cache->temporary) {
1314 cache->ref--;
1315 return;
1316 }
1317 free(cache->tree_data);
1318 free(cache);
1319 }
1320
1321 static int name_cmp_len(const char *name)
1322 {
1323 int i;
1324 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1325 ;
1326 return i;
1327 }
1328
1329 static void add_pbase_object(struct tree_desc *tree,
1330 const char *name,
1331 int cmplen,
1332 const char *fullname)
1333 {
1334 struct name_entry entry;
1335 int cmp;
1336
1337 while (tree_entry(tree,&entry)) {
1338 if (S_ISGITLINK(entry.mode))
1339 continue;
1340 cmp = tree_entry_len(&entry) != cmplen ? 1 :
1341 memcmp(name, entry.path, cmplen);
1342 if (cmp > 0)
1343 continue;
1344 if (cmp < 0)
1345 return;
1346 if (name[cmplen] != '/') {
1347 add_object_entry(&entry.oid,
1348 object_type(entry.mode),
1349 fullname, 1);
1350 return;
1351 }
1352 if (S_ISDIR(entry.mode)) {
1353 struct tree_desc sub;
1354 struct pbase_tree_cache *tree;
1355 const char *down = name+cmplen+1;
1356 int downlen = name_cmp_len(down);
1357
1358 tree = pbase_tree_get(&entry.oid);
1359 if (!tree)
1360 return;
1361 init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1362
1363 add_pbase_object(&sub, down, downlen, fullname);
1364 pbase_tree_put(tree);
1365 }
1366 }
1367 }
1368
1369 static unsigned *done_pbase_paths;
1370 static int done_pbase_paths_num;
1371 static int done_pbase_paths_alloc;
1372 static int done_pbase_path_pos(unsigned hash)
1373 {
1374 int lo = 0;
1375 int hi = done_pbase_paths_num;
1376 while (lo < hi) {
1377 int mi = lo + (hi - lo) / 2;
1378 if (done_pbase_paths[mi] == hash)
1379 return mi;
1380 if (done_pbase_paths[mi] < hash)
1381 hi = mi;
1382 else
1383 lo = mi + 1;
1384 }
1385 return -lo-1;
1386 }
1387
1388 static int check_pbase_path(unsigned hash)
1389 {
1390 int pos = done_pbase_path_pos(hash);
1391 if (0 <= pos)
1392 return 1;
1393 pos = -pos - 1;
1394 ALLOC_GROW(done_pbase_paths,
1395 done_pbase_paths_num + 1,
1396 done_pbase_paths_alloc);
1397 done_pbase_paths_num++;
1398 if (pos < done_pbase_paths_num)
1399 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
1400 done_pbase_paths_num - pos - 1);
1401 done_pbase_paths[pos] = hash;
1402 return 0;
1403 }
1404
1405 static void add_preferred_base_object(const char *name)
1406 {
1407 struct pbase_tree *it;
1408 int cmplen;
1409 unsigned hash = pack_name_hash(name);
1410
1411 if (!num_preferred_base || check_pbase_path(hash))
1412 return;
1413
1414 cmplen = name_cmp_len(name);
1415 for (it = pbase_tree; it; it = it->next) {
1416 if (cmplen == 0) {
1417 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
1418 }
1419 else {
1420 struct tree_desc tree;
1421 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1422 add_pbase_object(&tree, name, cmplen, name);
1423 }
1424 }
1425 }
1426
1427 static void add_preferred_base(struct object_id *oid)
1428 {
1429 struct pbase_tree *it;
1430 void *data;
1431 unsigned long size;
1432 struct object_id tree_oid;
1433
1434 if (window <= num_preferred_base++)
1435 return;
1436
1437 data = read_object_with_reference(the_repository, oid,
1438 tree_type, &size, &tree_oid);
1439 if (!data)
1440 return;
1441
1442 for (it = pbase_tree; it; it = it->next) {
1443 if (oideq(&it->pcache.oid, &tree_oid)) {
1444 free(data);
1445 return;
1446 }
1447 }
1448
1449 it = xcalloc(1, sizeof(*it));
1450 it->next = pbase_tree;
1451 pbase_tree = it;
1452
1453 oidcpy(&it->pcache.oid, &tree_oid);
1454 it->pcache.tree_data = data;
1455 it->pcache.tree_size = size;
1456 }
1457
1458 static void cleanup_preferred_base(void)
1459 {
1460 struct pbase_tree *it;
1461 unsigned i;
1462
1463 it = pbase_tree;
1464 pbase_tree = NULL;
1465 while (it) {
1466 struct pbase_tree *tmp = it;
1467 it = tmp->next;
1468 free(tmp->pcache.tree_data);
1469 free(tmp);
1470 }
1471
1472 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1473 if (!pbase_tree_cache[i])
1474 continue;
1475 free(pbase_tree_cache[i]->tree_data);
1476 FREE_AND_NULL(pbase_tree_cache[i]);
1477 }
1478
1479 FREE_AND_NULL(done_pbase_paths);
1480 done_pbase_paths_num = done_pbase_paths_alloc = 0;
1481 }
1482
1483 /*
1484 * Return 1 iff the object specified by "delta" can be sent
1485 * literally as a delta against the base in "base_sha1". If
1486 * so, then *base_out will point to the entry in our packing
1487 * list, or NULL if we must use the external-base list.
1488 *
1489 * Depth value does not matter - find_deltas() will
1490 * never consider reused delta as the base object to
1491 * deltify other objects against, in order to avoid
1492 * circular deltas.
1493 */
1494 static int can_reuse_delta(const unsigned char *base_sha1,
1495 struct object_entry *delta,
1496 struct object_entry **base_out)
1497 {
1498 struct object_entry *base;
1499 struct object_id base_oid;
1500
1501 if (!base_sha1)
1502 return 0;
1503
1504 oidread(&base_oid, base_sha1);
1505
1506 /*
1507 * First see if we're already sending the base (or it's explicitly in
1508 * our "excluded" list).
1509 */
1510 base = packlist_find(&to_pack, &base_oid, NULL);
1511 if (base) {
1512 if (!in_same_island(&delta->idx.oid, &base->idx.oid))
1513 return 0;
1514 *base_out = base;
1515 return 1;
1516 }
1517
1518 /*
1519 * Otherwise, reachability bitmaps may tell us if the receiver has it,
1520 * even if it was buried too deep in history to make it into the
1521 * packing list.
1522 */
1523 if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, &base_oid)) {
1524 if (use_delta_islands) {
1525 if (!in_same_island(&delta->idx.oid, &base_oid))
1526 return 0;
1527 }
1528 *base_out = NULL;
1529 return 1;
1530 }
1531
1532 return 0;
1533 }
1534
1535 static void check_object(struct object_entry *entry)
1536 {
1537 unsigned long canonical_size;
1538
1539 if (IN_PACK(entry)) {
1540 struct packed_git *p = IN_PACK(entry);
1541 struct pack_window *w_curs = NULL;
1542 const unsigned char *base_ref = NULL;
1543 struct object_entry *base_entry;
1544 unsigned long used, used_0;
1545 unsigned long avail;
1546 off_t ofs;
1547 unsigned char *buf, c;
1548 enum object_type type;
1549 unsigned long in_pack_size;
1550
1551 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1552
1553 /*
1554 * We want in_pack_type even if we do not reuse delta
1555 * since non-delta representations could still be reused.
1556 */
1557 used = unpack_object_header_buffer(buf, avail,
1558 &type,
1559 &in_pack_size);
1560 if (used == 0)
1561 goto give_up;
1562
1563 if (type < 0)
1564 BUG("invalid type %d", type);
1565 entry->in_pack_type = type;
1566
1567 /*
1568 * Determine if this is a delta and if so whether we can
1569 * reuse it or not. Otherwise let's find out as cheaply as
1570 * possible what the actual type and size for this object is.
1571 */
1572 switch (entry->in_pack_type) {
1573 default:
1574 /* Not a delta hence we've already got all we need. */
1575 oe_set_type(entry, entry->in_pack_type);
1576 SET_SIZE(entry, in_pack_size);
1577 entry->in_pack_header_size = used;
1578 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
1579 goto give_up;
1580 unuse_pack(&w_curs);
1581 return;
1582 case OBJ_REF_DELTA:
1583 if (reuse_delta && !entry->preferred_base)
1584 base_ref = use_pack(p, &w_curs,
1585 entry->in_pack_offset + used, NULL);
1586 entry->in_pack_header_size = used + the_hash_algo->rawsz;
1587 break;
1588 case OBJ_OFS_DELTA:
1589 buf = use_pack(p, &w_curs,
1590 entry->in_pack_offset + used, NULL);
1591 used_0 = 0;
1592 c = buf[used_0++];
1593 ofs = c & 127;
1594 while (c & 128) {
1595 ofs += 1;
1596 if (!ofs || MSB(ofs, 7)) {
1597 error(_("delta base offset overflow in pack for %s"),
1598 oid_to_hex(&entry->idx.oid));
1599 goto give_up;
1600 }
1601 c = buf[used_0++];
1602 ofs = (ofs << 7) + (c & 127);
1603 }
1604 ofs = entry->in_pack_offset - ofs;
1605 if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1606 error(_("delta base offset out of bound for %s"),
1607 oid_to_hex(&entry->idx.oid));
1608 goto give_up;
1609 }
1610 if (reuse_delta && !entry->preferred_base) {
1611 struct revindex_entry *revidx;
1612 revidx = find_pack_revindex(p, ofs);
1613 if (!revidx)
1614 goto give_up;
1615 base_ref = nth_packed_object_sha1(p, revidx->nr);
1616 }
1617 entry->in_pack_header_size = used + used_0;
1618 break;
1619 }
1620
1621 if (can_reuse_delta(base_ref, entry, &base_entry)) {
1622 oe_set_type(entry, entry->in_pack_type);
1623 SET_SIZE(entry, in_pack_size); /* delta size */
1624 SET_DELTA_SIZE(entry, in_pack_size);
1625
1626 if (base_entry) {
1627 SET_DELTA(entry, base_entry);
1628 entry->delta_sibling_idx = base_entry->delta_child_idx;
1629 SET_DELTA_CHILD(base_entry, entry);
1630 } else {
1631 SET_DELTA_EXT(entry, base_ref);
1632 }
1633
1634 unuse_pack(&w_curs);
1635 return;
1636 }
1637
1638 if (oe_type(entry)) {
1639 off_t delta_pos;
1640
1641 /*
1642 * This must be a delta and we already know what the
1643 * final object type is. Let's extract the actual
1644 * object size from the delta header.
1645 */
1646 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
1647 canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
1648 if (canonical_size == 0)
1649 goto give_up;
1650 SET_SIZE(entry, canonical_size);
1651 unuse_pack(&w_curs);
1652 return;
1653 }
1654
1655 /*
1656 * No choice but to fall back to the recursive delta walk
1657 * with oid_object_info() to find about the object type
1658 * at this point...
1659 */
1660 give_up:
1661 unuse_pack(&w_curs);
1662 }
1663
1664 oe_set_type(entry,
1665 oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
1666 if (entry->type_valid) {
1667 SET_SIZE(entry, canonical_size);
1668 } else {
1669 /*
1670 * Bad object type is checked in prepare_pack(). This is
1671 * to permit a missing preferred base object to be ignored
1672 * as a preferred base. Doing so can result in a larger
1673 * pack file, but the transfer will still take place.
1674 */
1675 }
1676 }
1677
1678 static int pack_offset_sort(const void *_a, const void *_b)
1679 {
1680 const struct object_entry *a = *(struct object_entry **)_a;
1681 const struct object_entry *b = *(struct object_entry **)_b;
1682 const struct packed_git *a_in_pack = IN_PACK(a);
1683 const struct packed_git *b_in_pack = IN_PACK(b);
1684
1685 /* avoid filesystem trashing with loose objects */
1686 if (!a_in_pack && !b_in_pack)
1687 return oidcmp(&a->idx.oid, &b->idx.oid);
1688
1689 if (a_in_pack < b_in_pack)
1690 return -1;
1691 if (a_in_pack > b_in_pack)
1692 return 1;
1693 return a->in_pack_offset < b->in_pack_offset ? -1 :
1694 (a->in_pack_offset > b->in_pack_offset);
1695 }
1696
1697 /*
1698 * Drop an on-disk delta we were planning to reuse. Naively, this would
1699 * just involve blanking out the "delta" field, but we have to deal
1700 * with some extra book-keeping:
1701 *
1702 * 1. Removing ourselves from the delta_sibling linked list.
1703 *
1704 * 2. Updating our size/type to the non-delta representation. These were
1705 * either not recorded initially (size) or overwritten with the delta type
1706 * (type) when check_object() decided to reuse the delta.
1707 *
1708 * 3. Resetting our delta depth, as we are now a base object.
1709 */
1710 static void drop_reused_delta(struct object_entry *entry)
1711 {
1712 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
1713 struct object_info oi = OBJECT_INFO_INIT;
1714 enum object_type type;
1715 unsigned long size;
1716
1717 while (*idx) {
1718 struct object_entry *oe = &to_pack.objects[*idx - 1];
1719
1720 if (oe == entry)
1721 *idx = oe->delta_sibling_idx;
1722 else
1723 idx = &oe->delta_sibling_idx;
1724 }
1725 SET_DELTA(entry, NULL);
1726 entry->depth = 0;
1727
1728 oi.sizep = &size;
1729 oi.typep = &type;
1730 if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
1731 /*
1732 * We failed to get the info from this pack for some reason;
1733 * fall back to oid_object_info, which may find another copy.
1734 * And if that fails, the error will be recorded in oe_type(entry)
1735 * and dealt with in prepare_pack().
1736 */
1737 oe_set_type(entry,
1738 oid_object_info(the_repository, &entry->idx.oid, &size));
1739 } else {
1740 oe_set_type(entry, type);
1741 }
1742 SET_SIZE(entry, size);
1743 }
1744
1745 /*
1746 * Follow the chain of deltas from this entry onward, throwing away any links
1747 * that cause us to hit a cycle (as determined by the DFS state flags in
1748 * the entries).
1749 *
1750 * We also detect too-long reused chains that would violate our --depth
1751 * limit.
1752 */
1753 static void break_delta_chains(struct object_entry *entry)
1754 {
1755 /*
1756 * The actual depth of each object we will write is stored as an int,
1757 * as it cannot exceed our int "depth" limit. But before we break
1758 * changes based no that limit, we may potentially go as deep as the
1759 * number of objects, which is elsewhere bounded to a uint32_t.
1760 */
1761 uint32_t total_depth;
1762 struct object_entry *cur, *next;
1763
1764 for (cur = entry, total_depth = 0;
1765 cur;
1766 cur = DELTA(cur), total_depth++) {
1767 if (cur->dfs_state == DFS_DONE) {
1768 /*
1769 * We've already seen this object and know it isn't
1770 * part of a cycle. We do need to append its depth
1771 * to our count.
1772 */
1773 total_depth += cur->depth;
1774 break;
1775 }
1776
1777 /*
1778 * We break cycles before looping, so an ACTIVE state (or any
1779 * other cruft which made its way into the state variable)
1780 * is a bug.
1781 */
1782 if (cur->dfs_state != DFS_NONE)
1783 BUG("confusing delta dfs state in first pass: %d",
1784 cur->dfs_state);
1785
1786 /*
1787 * Now we know this is the first time we've seen the object. If
1788 * it's not a delta, we're done traversing, but we'll mark it
1789 * done to save time on future traversals.
1790 */
1791 if (!DELTA(cur)) {
1792 cur->dfs_state = DFS_DONE;
1793 break;
1794 }
1795
1796 /*
1797 * Mark ourselves as active and see if the next step causes
1798 * us to cycle to another active object. It's important to do
1799 * this _before_ we loop, because it impacts where we make the
1800 * cut, and thus how our total_depth counter works.
1801 * E.g., We may see a partial loop like:
1802 *
1803 * A -> B -> C -> D -> B
1804 *
1805 * Cutting B->C breaks the cycle. But now the depth of A is
1806 * only 1, and our total_depth counter is at 3. The size of the
1807 * error is always one less than the size of the cycle we
1808 * broke. Commits C and D were "lost" from A's chain.
1809 *
1810 * If we instead cut D->B, then the depth of A is correct at 3.
1811 * We keep all commits in the chain that we examined.
1812 */
1813 cur->dfs_state = DFS_ACTIVE;
1814 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
1815 drop_reused_delta(cur);
1816 cur->dfs_state = DFS_DONE;
1817 break;
1818 }
1819 }
1820
1821 /*
1822 * And now that we've gone all the way to the bottom of the chain, we
1823 * need to clear the active flags and set the depth fields as
1824 * appropriate. Unlike the loop above, which can quit when it drops a
1825 * delta, we need to keep going to look for more depth cuts. So we need
1826 * an extra "next" pointer to keep going after we reset cur->delta.
1827 */
1828 for (cur = entry; cur; cur = next) {
1829 next = DELTA(cur);
1830
1831 /*
1832 * We should have a chain of zero or more ACTIVE states down to
1833 * a final DONE. We can quit after the DONE, because either it
1834 * has no bases, or we've already handled them in a previous
1835 * call.
1836 */
1837 if (cur->dfs_state == DFS_DONE)
1838 break;
1839 else if (cur->dfs_state != DFS_ACTIVE)
1840 BUG("confusing delta dfs state in second pass: %d",
1841 cur->dfs_state);
1842
1843 /*
1844 * If the total_depth is more than depth, then we need to snip
1845 * the chain into two or more smaller chains that don't exceed
1846 * the maximum depth. Most of the resulting chains will contain
1847 * (depth + 1) entries (i.e., depth deltas plus one base), and
1848 * the last chain (i.e., the one containing entry) will contain
1849 * whatever entries are left over, namely
1850 * (total_depth % (depth + 1)) of them.
1851 *
1852 * Since we are iterating towards decreasing depth, we need to
1853 * decrement total_depth as we go, and we need to write to the
1854 * entry what its final depth will be after all of the
1855 * snipping. Since we're snipping into chains of length (depth
1856 * + 1) entries, the final depth of an entry will be its
1857 * original depth modulo (depth + 1). Any time we encounter an
1858 * entry whose final depth is supposed to be zero, we snip it
1859 * from its delta base, thereby making it so.
1860 */
1861 cur->depth = (total_depth--) % (depth + 1);
1862 if (!cur->depth)
1863 drop_reused_delta(cur);
1864
1865 cur->dfs_state = DFS_DONE;
1866 }
1867 }
1868
1869 static void get_object_details(void)
1870 {
1871 uint32_t i;
1872 struct object_entry **sorted_by_offset;
1873
1874 if (progress)
1875 progress_state = start_progress(_("Counting objects"),
1876 to_pack.nr_objects);
1877
1878 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
1879 for (i = 0; i < to_pack.nr_objects; i++)
1880 sorted_by_offset[i] = to_pack.objects + i;
1881 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
1882
1883 for (i = 0; i < to_pack.nr_objects; i++) {
1884 struct object_entry *entry = sorted_by_offset[i];
1885 check_object(entry);
1886 if (entry->type_valid &&
1887 oe_size_greater_than(&to_pack, entry, big_file_threshold))
1888 entry->no_try_delta = 1;
1889 display_progress(progress_state, i + 1);
1890 }
1891 stop_progress(&progress_state);
1892
1893 /*
1894 * This must happen in a second pass, since we rely on the delta
1895 * information for the whole list being completed.
1896 */
1897 for (i = 0; i < to_pack.nr_objects; i++)
1898 break_delta_chains(&to_pack.objects[i]);
1899
1900 free(sorted_by_offset);
1901 }
1902
1903 /*
1904 * We search for deltas in a list sorted by type, by filename hash, and then
1905 * by size, so that we see progressively smaller and smaller files.
1906 * That's because we prefer deltas to be from the bigger file
1907 * to the smaller -- deletes are potentially cheaper, but perhaps
1908 * more importantly, the bigger file is likely the more recent
1909 * one. The deepest deltas are therefore the oldest objects which are
1910 * less susceptible to be accessed often.
1911 */
1912 static int type_size_sort(const void *_a, const void *_b)
1913 {
1914 const struct object_entry *a = *(struct object_entry **)_a;
1915 const struct object_entry *b = *(struct object_entry **)_b;
1916 const enum object_type a_type = oe_type(a);
1917 const enum object_type b_type = oe_type(b);
1918 const unsigned long a_size = SIZE(a);
1919 const unsigned long b_size = SIZE(b);
1920
1921 if (a_type > b_type)
1922 return -1;
1923 if (a_type < b_type)
1924 return 1;
1925 if (a->hash > b->hash)
1926 return -1;
1927 if (a->hash < b->hash)
1928 return 1;
1929 if (a->preferred_base > b->preferred_base)
1930 return -1;
1931 if (a->preferred_base < b->preferred_base)
1932 return 1;
1933 if (use_delta_islands) {
1934 const int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
1935 if (island_cmp)
1936 return island_cmp;
1937 }
1938 if (a_size > b_size)
1939 return -1;
1940 if (a_size < b_size)
1941 return 1;
1942 return a < b ? -1 : (a > b); /* newest first */
1943 }
1944
1945 struct unpacked {
1946 struct object_entry *entry;
1947 void *data;
1948 struct delta_index *index;
1949 unsigned depth;
1950 };
1951
1952 static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1953 unsigned long delta_size)
1954 {
1955 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1956 return 0;
1957
1958 if (delta_size < cache_max_small_delta_size)
1959 return 1;
1960
1961 /* cache delta, if objects are large enough compared to delta size */
1962 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1963 return 1;
1964
1965 return 0;
1966 }
1967
1968 /* Protect delta_cache_size */
1969 static pthread_mutex_t cache_mutex;
1970 #define cache_lock() pthread_mutex_lock(&cache_mutex)
1971 #define cache_unlock() pthread_mutex_unlock(&cache_mutex)
1972
1973 /*
1974 * Protect object list partitioning (e.g. struct thread_param) and
1975 * progress_state
1976 */
1977 static pthread_mutex_t progress_mutex;
1978 #define progress_lock() pthread_mutex_lock(&progress_mutex)
1979 #define progress_unlock() pthread_mutex_unlock(&progress_mutex)
1980
1981 /*
1982 * Access to struct object_entry is unprotected since each thread owns
1983 * a portion of the main object list. Just don't access object entries
1984 * ahead in the list because they can be stolen and would need
1985 * progress_mutex for protection.
1986 */
1987
1988 /*
1989 * Return the size of the object without doing any delta
1990 * reconstruction (so non-deltas are true object sizes, but deltas
1991 * return the size of the delta data).
1992 */
1993 unsigned long oe_get_size_slow(struct packing_data *pack,
1994 const struct object_entry *e)
1995 {
1996 struct packed_git *p;
1997 struct pack_window *w_curs;
1998 unsigned char *buf;
1999 enum object_type type;
2000 unsigned long used, avail, size;
2001
2002 if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
2003 packing_data_lock(&to_pack);
2004 if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
2005 die(_("unable to get size of %s"),
2006 oid_to_hex(&e->idx.oid));
2007 packing_data_unlock(&to_pack);
2008 return size;
2009 }
2010
2011 p = oe_in_pack(pack, e);
2012 if (!p)
2013 BUG("when e->type is a delta, it must belong to a pack");
2014
2015 packing_data_lock(&to_pack);
2016 w_curs = NULL;
2017 buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
2018 used = unpack_object_header_buffer(buf, avail, &type, &size);
2019 if (used == 0)
2020 die(_("unable to parse object header of %s"),
2021 oid_to_hex(&e->idx.oid));
2022
2023 unuse_pack(&w_curs);
2024 packing_data_unlock(&to_pack);
2025 return size;
2026 }
2027
2028 static int try_delta(struct unpacked *trg, struct unpacked *src,
2029 unsigned max_depth, unsigned long *mem_usage)
2030 {
2031 struct object_entry *trg_entry = trg->entry;
2032 struct object_entry *src_entry = src->entry;
2033 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
2034 unsigned ref_depth;
2035 enum object_type type;
2036 void *delta_buf;
2037
2038 /* Don't bother doing diffs between different types */
2039 if (oe_type(trg_entry) != oe_type(src_entry))
2040 return -1;
2041
2042 /*
2043 * We do not bother to try a delta that we discarded on an
2044 * earlier try, but only when reusing delta data. Note that
2045 * src_entry that is marked as the preferred_base should always
2046 * be considered, as even if we produce a suboptimal delta against
2047 * it, we will still save the transfer cost, as we already know
2048 * the other side has it and we won't send src_entry at all.
2049 */
2050 if (reuse_delta && IN_PACK(trg_entry) &&
2051 IN_PACK(trg_entry) == IN_PACK(src_entry) &&
2052 !src_entry->preferred_base &&
2053 trg_entry->in_pack_type != OBJ_REF_DELTA &&
2054 trg_entry->in_pack_type != OBJ_OFS_DELTA)
2055 return 0;
2056
2057 /* Let's not bust the allowed depth. */
2058 if (src->depth >= max_depth)
2059 return 0;
2060
2061 /* Now some size filtering heuristics. */
2062 trg_size = SIZE(trg_entry);
2063 if (!DELTA(trg_entry)) {
2064 max_size = trg_size/2 - the_hash_algo->rawsz;
2065 ref_depth = 1;
2066 } else {
2067 max_size = DELTA_SIZE(trg_entry);
2068 ref_depth = trg->depth;
2069 }
2070 max_size = (uint64_t)max_size * (max_depth - src->depth) /
2071 (max_depth - ref_depth + 1);
2072 if (max_size == 0)
2073 return 0;
2074 src_size = SIZE(src_entry);
2075 sizediff = src_size < trg_size ? trg_size - src_size : 0;
2076 if (sizediff >= max_size)
2077 return 0;
2078 if (trg_size < src_size / 32)
2079 return 0;
2080
2081 if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
2082 return 0;
2083
2084 /* Load data if not already done */
2085 if (!trg->data) {
2086 packing_data_lock(&to_pack);
2087 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
2088 packing_data_unlock(&to_pack);
2089 if (!trg->data)
2090 die(_("object %s cannot be read"),
2091 oid_to_hex(&trg_entry->idx.oid));
2092 if (sz != trg_size)
2093 die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
2094 oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz,
2095 (uintmax_t)trg_size);
2096 *mem_usage += sz;
2097 }
2098 if (!src->data) {
2099 packing_data_lock(&to_pack);
2100 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
2101 packing_data_unlock(&to_pack);
2102 if (!src->data) {
2103 if (src_entry->preferred_base) {
2104 static int warned = 0;
2105 if (!warned++)
2106 warning(_("object %s cannot be read"),
2107 oid_to_hex(&src_entry->idx.oid));
2108 /*
2109 * Those objects are not included in the
2110 * resulting pack. Be resilient and ignore
2111 * them if they can't be read, in case the
2112 * pack could be created nevertheless.
2113 */
2114 return 0;
2115 }
2116 die(_("object %s cannot be read"),
2117 oid_to_hex(&src_entry->idx.oid));
2118 }
2119 if (sz != src_size)
2120 die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
2121 oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz,
2122 (uintmax_t)src_size);
2123 *mem_usage += sz;
2124 }
2125 if (!src->index) {
2126 src->index = create_delta_index(src->data, src_size);
2127 if (!src->index) {
2128 static int warned = 0;
2129 if (!warned++)
2130 warning(_("suboptimal pack - out of memory"));
2131 return 0;
2132 }
2133 *mem_usage += sizeof_delta_index(src->index);
2134 }
2135
2136 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
2137 if (!delta_buf)
2138 return 0;
2139
2140 if (DELTA(trg_entry)) {
2141 /* Prefer only shallower same-sized deltas. */
2142 if (delta_size == DELTA_SIZE(trg_entry) &&
2143 src->depth + 1 >= trg->depth) {
2144 free(delta_buf);
2145 return 0;
2146 }
2147 }
2148
2149 /*
2150 * Handle memory allocation outside of the cache
2151 * accounting lock. Compiler will optimize the strangeness
2152 * away when NO_PTHREADS is defined.
2153 */
2154 free(trg_entry->delta_data);
2155 cache_lock();
2156 if (trg_entry->delta_data) {
2157 delta_cache_size -= DELTA_SIZE(trg_entry);
2158 trg_entry->delta_data = NULL;
2159 }
2160 if (delta_cacheable(src_size, trg_size, delta_size)) {
2161 delta_cache_size += delta_size;
2162 cache_unlock();
2163 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
2164 } else {
2165 cache_unlock();
2166 free(delta_buf);
2167 }
2168
2169 SET_DELTA(trg_entry, src_entry);
2170 SET_DELTA_SIZE(trg_entry, delta_size);
2171 trg->depth = src->depth + 1;
2172
2173 return 1;
2174 }
2175
2176 static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
2177 {
2178 struct object_entry *child = DELTA_CHILD(me);
2179 unsigned int m = n;
2180 while (child) {
2181 const unsigned int c = check_delta_limit(child, n + 1);
2182 if (m < c)
2183 m = c;
2184 child = DELTA_SIBLING(child);
2185 }
2186 return m;
2187 }
2188
2189 static unsigned long free_unpacked(struct unpacked *n)
2190 {
2191 unsigned long freed_mem = sizeof_delta_index(n->index);
2192 free_delta_index(n->index);
2193 n->index = NULL;
2194 if (n->data) {
2195 freed_mem += SIZE(n->entry);
2196 FREE_AND_NULL(n->data);
2197 }
2198 n->entry = NULL;
2199 n->depth = 0;
2200 return freed_mem;
2201 }
2202
2203 static void find_deltas(struct object_entry **list, unsigned *list_size,
2204 int window, int depth, unsigned *processed)
2205 {
2206 uint32_t i, idx = 0, count = 0;
2207 struct unpacked *array;
2208 unsigned long mem_usage = 0;
2209
2210 array = xcalloc(window, sizeof(struct unpacked));
2211
2212 for (;;) {
2213 struct object_entry *entry;
2214 struct unpacked *n = array + idx;
2215 int j, max_depth, best_base = -1;
2216
2217 progress_lock();
2218 if (!*list_size) {
2219 progress_unlock();
2220 break;
2221 }
2222 entry = *list++;
2223 (*list_size)--;
2224 if (!entry->preferred_base) {
2225 (*processed)++;
2226 display_progress(progress_state, *processed);
2227 }
2228 progress_unlock();
2229
2230 mem_usage -= free_unpacked(n);
2231 n->entry = entry;
2232
2233 while (window_memory_limit &&
2234 mem_usage > window_memory_limit &&
2235 count > 1) {
2236 const uint32_t tail = (idx + window - count) % window;
2237 mem_usage -= free_unpacked(array + tail);
2238 count--;
2239 }
2240
2241 /* We do not compute delta to *create* objects we are not
2242 * going to pack.
2243 */
2244 if (entry->preferred_base)
2245 goto next;
2246
2247 /*
2248 * If the current object is at pack edge, take the depth the
2249 * objects that depend on the current object into account
2250 * otherwise they would become too deep.
2251 */
2252 max_depth = depth;
2253 if (DELTA_CHILD(entry)) {
2254 max_depth -= check_delta_limit(entry, 0);
2255 if (max_depth <= 0)
2256 goto next;
2257 }
2258
2259 j = window;
2260 while (--j > 0) {
2261 int ret;
2262 uint32_t other_idx = idx + j;
2263 struct unpacked *m;
2264 if (other_idx >= window)
2265 other_idx -= window;
2266 m = array + other_idx;
2267 if (!m->entry)
2268 break;
2269 ret = try_delta(n, m, max_depth, &mem_usage);
2270 if (ret < 0)
2271 break;
2272 else if (ret > 0)
2273 best_base = other_idx;
2274 }
2275
2276 /*
2277 * If we decided to cache the delta data, then it is best
2278 * to compress it right away. First because we have to do
2279 * it anyway, and doing it here while we're threaded will
2280 * save a lot of time in the non threaded write phase,
2281 * as well as allow for caching more deltas within
2282 * the same cache size limit.
2283 * ...
2284 * But only if not writing to stdout, since in that case
2285 * the network is most likely throttling writes anyway,
2286 * and therefore it is best to go to the write phase ASAP
2287 * instead, as we can afford spending more time compressing
2288 * between writes at that moment.
2289 */
2290 if (entry->delta_data && !pack_to_stdout) {
2291 unsigned long size;
2292
2293 size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
2294 if (size < (1U << OE_Z_DELTA_BITS)) {
2295 entry->z_delta_size = size;
2296 cache_lock();
2297 delta_cache_size -= DELTA_SIZE(entry);
2298 delta_cache_size += entry->z_delta_size;
2299 cache_unlock();
2300 } else {
2301 FREE_AND_NULL(entry->delta_data);
2302 entry->z_delta_size = 0;
2303 }
2304 }
2305
2306 /* if we made n a delta, and if n is already at max
2307 * depth, leaving it in the window is pointless. we
2308 * should evict it first.
2309 */
2310 if (DELTA(entry) && max_depth <= n->depth)
2311 continue;
2312
2313 /*
2314 * Move the best delta base up in the window, after the
2315 * currently deltified object, to keep it longer. It will
2316 * be the first base object to be attempted next.
2317 */
2318 if (DELTA(entry)) {
2319 struct unpacked swap = array[best_base];
2320 int dist = (window + idx - best_base) % window;
2321 int dst = best_base;
2322 while (dist--) {
2323 int src = (dst + 1) % window;
2324 array[dst] = array[src];
2325 dst = src;
2326 }
2327 array[dst] = swap;
2328 }
2329
2330 next:
2331 idx++;
2332 if (count + 1 < window)
2333 count++;
2334 if (idx >= window)
2335 idx = 0;
2336 }
2337
2338 for (i = 0; i < window; ++i) {
2339 free_delta_index(array[i].index);
2340 free(array[i].data);
2341 }
2342 free(array);
2343 }
2344
2345 static void try_to_free_from_threads(size_t size)
2346 {
2347 packing_data_lock(&to_pack);
2348 release_pack_memory(size);
2349 packing_data_unlock(&to_pack);
2350 }
2351
2352 static try_to_free_t old_try_to_free_routine;
2353
2354 /*
2355 * The main object list is split into smaller lists, each is handed to
2356 * one worker.
2357 *
2358 * The main thread waits on the condition that (at least) one of the workers
2359 * has stopped working (which is indicated in the .working member of
2360 * struct thread_params).
2361 *
2362 * When a work thread has completed its work, it sets .working to 0 and
2363 * signals the main thread and waits on the condition that .data_ready
2364 * becomes 1.
2365 *
2366 * The main thread steals half of the work from the worker that has
2367 * most work left to hand it to the idle worker.
2368 */
2369
2370 struct thread_params {
2371 pthread_t thread;
2372 struct object_entry **list;
2373 unsigned list_size;
2374 unsigned remaining;
2375 int window;
2376 int depth;
2377 int working;
2378 int data_ready;
2379 pthread_mutex_t mutex;
2380 pthread_cond_t cond;
2381 unsigned *processed;
2382 };
2383
2384 static pthread_cond_t progress_cond;
2385
2386 /*
2387 * Mutex and conditional variable can't be statically-initialized on Windows.
2388 */
2389 static void init_threaded_search(void)
2390 {
2391 pthread_mutex_init(&cache_mutex, NULL);
2392 pthread_mutex_init(&progress_mutex, NULL);
2393 pthread_cond_init(&progress_cond, NULL);
2394 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
2395 }
2396
2397 static void cleanup_threaded_search(void)
2398 {
2399 set_try_to_free_routine(old_try_to_free_routine);
2400 pthread_cond_destroy(&progress_cond);
2401 pthread_mutex_destroy(&cache_mutex);
2402 pthread_mutex_destroy(&progress_mutex);
2403 }
2404
2405 static void *threaded_find_deltas(void *arg)
2406 {
2407 struct thread_params *me = arg;
2408
2409 progress_lock();
2410 while (me->remaining) {
2411 progress_unlock();
2412
2413 find_deltas(me->list, &me->remaining,
2414 me->window, me->depth, me->processed);
2415
2416 progress_lock();
2417 me->working = 0;
2418 pthread_cond_signal(&progress_cond);
2419 progress_unlock();
2420
2421 /*
2422 * We must not set ->data_ready before we wait on the
2423 * condition because the main thread may have set it to 1
2424 * before we get here. In order to be sure that new
2425 * work is available if we see 1 in ->data_ready, it
2426 * was initialized to 0 before this thread was spawned
2427 * and we reset it to 0 right away.
2428 */
2429 pthread_mutex_lock(&me->mutex);
2430 while (!me->data_ready)
2431 pthread_cond_wait(&me->cond, &me->mutex);
2432 me->data_ready = 0;
2433 pthread_mutex_unlock(&me->mutex);
2434
2435 progress_lock();
2436 }
2437 progress_unlock();
2438 /* leave ->working 1 so that this doesn't get more work assigned */
2439 return NULL;
2440 }
2441
2442 static void ll_find_deltas(struct object_entry **list, unsigned list_size,
2443 int window, int depth, unsigned *processed)
2444 {
2445 struct thread_params *p;
2446 int i, ret, active_threads = 0;
2447
2448 init_threaded_search();
2449
2450 if (delta_search_threads <= 1) {
2451 find_deltas(list, &list_size, window, depth, processed);
2452 cleanup_threaded_search();
2453 return;
2454 }
2455 if (progress > pack_to_stdout)
2456 fprintf_ln(stderr, _("Delta compression using up to %d threads"),
2457 delta_search_threads);
2458 p = xcalloc(delta_search_threads, sizeof(*p));
2459
2460 /* Partition the work amongst work threads. */
2461 for (i = 0; i < delta_search_threads; i++) {
2462 unsigned sub_size = list_size / (delta_search_threads - i);
2463
2464 /* don't use too small segments or no deltas will be found */
2465 if (sub_size < 2*window && i+1 < delta_search_threads)
2466 sub_size = 0;
2467
2468 p[i].window = window;
2469 p[i].depth = depth;
2470 p[i].processed = processed;
2471 p[i].working = 1;
2472 p[i].data_ready = 0;
2473
2474 /* try to split chunks on "path" boundaries */
2475 while (sub_size && sub_size < list_size &&
2476 list[sub_size]->hash &&
2477 list[sub_size]->hash == list[sub_size-1]->hash)
2478 sub_size++;
2479
2480 p[i].list = list;
2481 p[i].list_size = sub_size;
2482 p[i].remaining = sub_size;
2483
2484 list += sub_size;
2485 list_size -= sub_size;
2486 }
2487
2488 /* Start work threads. */
2489 for (i = 0; i < delta_search_threads; i++) {
2490 if (!p[i].list_size)
2491 continue;
2492 pthread_mutex_init(&p[i].mutex, NULL);
2493 pthread_cond_init(&p[i].cond, NULL);
2494 ret = pthread_create(&p[i].thread, NULL,
2495 threaded_find_deltas, &p[i]);
2496 if (ret)
2497 die(_("unable to create thread: %s"), strerror(ret));
2498 active_threads++;
2499 }
2500
2501 /*
2502 * Now let's wait for work completion. Each time a thread is done
2503 * with its work, we steal half of the remaining work from the
2504 * thread with the largest number of unprocessed objects and give
2505 * it to that newly idle thread. This ensure good load balancing
2506 * until the remaining object list segments are simply too short
2507 * to be worth splitting anymore.
2508 */
2509 while (active_threads) {
2510 struct thread_params *target = NULL;
2511 struct thread_params *victim = NULL;
2512 unsigned sub_size = 0;
2513
2514 progress_lock();
2515 for (;;) {
2516 for (i = 0; !target && i < delta_search_threads; i++)
2517 if (!p[i].working)
2518 target = &p[i];
2519 if (target)
2520 break;
2521 pthread_cond_wait(&progress_cond, &progress_mutex);
2522 }
2523
2524 for (i = 0; i < delta_search_threads; i++)
2525 if (p[i].remaining > 2*window &&
2526 (!victim || victim->remaining < p[i].remaining))
2527 victim = &p[i];
2528 if (victim) {
2529 sub_size = victim->remaining / 2;
2530 list = victim->list + victim->list_size - sub_size;
2531 while (sub_size && list[0]->hash &&
2532 list[0]->hash == list[-1]->hash) {
2533 list++;
2534 sub_size--;
2535 }
2536 if (!sub_size) {
2537 /*
2538 * It is possible for some "paths" to have
2539 * so many objects that no hash boundary
2540 * might be found. Let's just steal the
2541 * exact half in that case.
2542 */
2543 sub_size = victim->remaining / 2;
2544 list -= sub_size;
2545 }
2546 target->list = list;
2547 victim->list_size -= sub_size;
2548 victim->remaining -= sub_size;
2549 }
2550 target->list_size = sub_size;
2551 target->remaining = sub_size;
2552 target->working = 1;
2553 progress_unlock();
2554
2555 pthread_mutex_lock(&target->mutex);
2556 target->data_ready = 1;
2557 pthread_cond_signal(&target->cond);
2558 pthread_mutex_unlock(&target->mutex);
2559
2560 if (!sub_size) {
2561 pthread_join(target->thread, NULL);
2562 pthread_cond_destroy(&target->cond);
2563 pthread_mutex_destroy(&target->mutex);
2564 active_threads--;
2565 }
2566 }
2567 cleanup_threaded_search();
2568 free(p);
2569 }
2570
2571 static void add_tag_chain(const struct object_id *oid)
2572 {
2573 struct tag *tag;
2574
2575 /*
2576 * We catch duplicates already in add_object_entry(), but we'd
2577 * prefer to do this extra check to avoid having to parse the
2578 * tag at all if we already know that it's being packed (e.g., if
2579 * it was included via bitmaps, we would not have parsed it
2580 * previously).
2581 */
2582 if (packlist_find(&to_pack, oid, NULL))
2583 return;
2584
2585 tag = lookup_tag(the_repository, oid);
2586 while (1) {
2587 if (!tag || parse_tag(tag) || !tag->tagged)
2588 die(_("unable to pack objects reachable from tag %s"),
2589 oid_to_hex(oid));
2590
2591 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
2592
2593 if (tag->tagged->type != OBJ_TAG)
2594 return;
2595
2596 tag = (struct tag *)tag->tagged;
2597 }
2598 }
2599
2600 static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
2601 {
2602 struct object_id peeled;
2603
2604 if (starts_with(path, "refs/tags/") && /* is a tag? */
2605 !peel_ref(path, &peeled) && /* peelable? */
2606 packlist_find(&to_pack, &peeled, NULL)) /* object packed? */
2607 add_tag_chain(oid);
2608 return 0;
2609 }
2610
2611 static void prepare_pack(int window, int depth)
2612 {
2613 struct object_entry **delta_list;
2614 uint32_t i, nr_deltas;
2615 unsigned n;
2616
2617 if (use_delta_islands)
2618 resolve_tree_islands(the_repository, progress, &to_pack);
2619
2620 get_object_details();
2621
2622 /*
2623 * If we're locally repacking then we need to be doubly careful
2624 * from now on in order to make sure no stealth corruption gets
2625 * propagated to the new pack. Clients receiving streamed packs
2626 * should validate everything they get anyway so no need to incur
2627 * the additional cost here in that case.
2628 */
2629 if (!pack_to_stdout)
2630 do_check_packed_object_crc = 1;
2631
2632 if (!to_pack.nr_objects || !window || !depth)
2633 return;
2634
2635 ALLOC_ARRAY(delta_list, to_pack.nr_objects);
2636 nr_deltas = n = 0;
2637
2638 for (i = 0; i < to_pack.nr_objects; i++) {
2639 struct object_entry *entry = to_pack.objects + i;
2640
2641 if (DELTA(entry))
2642 /* This happens if we decided to reuse existing
2643 * delta from a pack. "reuse_delta &&" is implied.
2644 */
2645 continue;
2646
2647 if (!entry->type_valid ||
2648 oe_size_less_than(&to_pack, entry, 50))
2649 continue;
2650
2651 if (entry->no_try_delta)
2652 continue;
2653
2654 if (!entry->preferred_base) {
2655 nr_deltas++;
2656 if (oe_type(entry) < 0)
2657 die(_("unable to get type of object %s"),
2658 oid_to_hex(&entry->idx.oid));
2659 } else {
2660 if (oe_type(entry) < 0) {
2661 /*
2662 * This object is not found, but we
2663 * don't have to include it anyway.
2664 */
2665 continue;
2666 }
2667 }
2668
2669 delta_list[n++] = entry;
2670 }
2671
2672 if (nr_deltas && n > 1) {
2673 unsigned nr_done = 0;
2674 if (progress)
2675 progress_state = start_progress(_("Compressing objects"),
2676 nr_deltas);
2677 QSORT(delta_list, n, type_size_sort);
2678 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2679 stop_progress(&progress_state);
2680 if (nr_done != nr_deltas)
2681 die(_("inconsistency with delta count"));
2682 }
2683 free(delta_list);
2684 }
2685
2686 static int git_pack_config(const char *k, const char *v, void *cb)
2687 {
2688 if (!strcmp(k, "pack.window")) {
2689 window = git_config_int(k, v);
2690 return 0;
2691 }
2692 if (!strcmp(k, "pack.windowmemory")) {
2693 window_memory_limit = git_config_ulong(k, v);
2694 return 0;
2695 }
2696 if (!strcmp(k, "pack.depth")) {
2697 depth = git_config_int(k, v);
2698 return 0;
2699 }
2700 if (!strcmp(k, "pack.deltacachesize")) {
2701 max_delta_cache_size = git_config_int(k, v);
2702 return 0;
2703 }
2704 if (!strcmp(k, "pack.deltacachelimit")) {
2705 cache_max_small_delta_size = git_config_int(k, v);
2706 return 0;
2707 }
2708 if (!strcmp(k, "pack.writebitmaphashcache")) {
2709 if (git_config_bool(k, v))
2710 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
2711 else
2712 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
2713 }
2714 if (!strcmp(k, "pack.usebitmaps")) {
2715 use_bitmap_index_default = git_config_bool(k, v);
2716 return 0;
2717 }
2718 if (!strcmp(k, "pack.usesparse")) {
2719 sparse = git_config_bool(k, v);
2720 return 0;
2721 }
2722 if (!strcmp(k, "pack.threads")) {
2723 delta_search_threads = git_config_int(k, v);
2724 if (delta_search_threads < 0)
2725 die(_("invalid number of threads specified (%d)"),
2726 delta_search_threads);
2727 if (!HAVE_THREADS && delta_search_threads != 1) {
2728 warning(_("no threads support, ignoring %s"), k);
2729 delta_search_threads = 0;
2730 }
2731 return 0;
2732 }
2733 if (!strcmp(k, "pack.indexversion")) {
2734 pack_idx_opts.version = git_config_int(k, v);
2735 if (pack_idx_opts.version > 2)
2736 die(_("bad pack.indexversion=%"PRIu32),
2737 pack_idx_opts.version);
2738 return 0;
2739 }
2740 return git_default_config(k, v, cb);
2741 }
2742
2743 static void read_object_list_from_stdin(void)
2744 {
2745 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
2746 struct object_id oid;
2747 const char *p;
2748
2749 for (;;) {
2750 if (!fgets(line, sizeof(line), stdin)) {
2751 if (feof(stdin))
2752 break;
2753 if (!ferror(stdin))
2754 die("BUG: fgets returned NULL, not EOF, not error!");
2755 if (errno != EINTR)
2756 die_errno("fgets");
2757 clearerr(stdin);
2758 continue;
2759 }
2760 if (line[0] == '-') {
2761 if (get_oid_hex(line+1, &oid))
2762 die(_("expected edge object ID, got garbage:\n %s"),
2763 line);
2764 add_preferred_base(&oid);
2765 continue;
2766 }
2767 if (parse_oid_hex(line, &oid, &p))
2768 die(_("expected object ID, got garbage:\n %s"), line);
2769
2770 add_preferred_base_object(p + 1);
2771 add_object_entry(&oid, OBJ_NONE, p + 1, 0);
2772 }
2773 }
2774
2775 /* Remember to update object flag allocation in object.h */
2776 #define OBJECT_ADDED (1u<<20)
2777
2778 static void show_commit(struct commit *commit, void *data)
2779 {
2780 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
2781 commit->object.flags |= OBJECT_ADDED;
2782
2783 if (write_bitmap_index)
2784 index_commit_for_bitmap(commit);
2785
2786 if (use_delta_islands)
2787 propagate_island_marks(commit);
2788 }
2789
2790 static void show_object(struct object *obj, const char *name, void *data)
2791 {
2792 add_preferred_base_object(name);
2793 add_object_entry(&obj->oid, obj->type, name, 0);
2794 obj->flags |= OBJECT_ADDED;
2795
2796 if (use_delta_islands) {
2797 const char *p;
2798 unsigned depth;
2799 struct object_entry *ent;
2800
2801 /* the empty string is a root tree, which is depth 0 */
2802 depth = *name ? 1 : 0;
2803 for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
2804 depth++;
2805
2806 ent = packlist_find(&to_pack, &obj->oid, NULL);
2807 if (ent && depth > oe_tree_depth(&to_pack, ent))
2808 oe_set_tree_depth(&to_pack, ent, depth);
2809 }
2810 }
2811
2812 static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
2813 {
2814 assert(arg_missing_action == MA_ALLOW_ANY);
2815
2816 /*
2817 * Quietly ignore ALL missing objects. This avoids problems with
2818 * staging them now and getting an odd error later.
2819 */
2820 if (!has_object_file(&obj->oid))
2821 return;
2822
2823 show_object(obj, name, data);
2824 }
2825
2826 static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
2827 {
2828 assert(arg_missing_action == MA_ALLOW_PROMISOR);
2829
2830 /*
2831 * Quietly ignore EXPECTED missing objects. This avoids problems with
2832 * staging them now and getting an odd error later.
2833 */
2834 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
2835 return;
2836
2837 show_object(obj, name, data);
2838 }
2839
2840 static int option_parse_missing_action(const struct option *opt,
2841 const char *arg, int unset)
2842 {
2843 assert(arg);
2844 assert(!unset);
2845
2846 if (!strcmp(arg, "error")) {
2847 arg_missing_action = MA_ERROR;
2848 fn_show_object = show_object;
2849 return 0;
2850 }
2851
2852 if (!strcmp(arg, "allow-any")) {
2853 arg_missing_action = MA_ALLOW_ANY;
2854 fetch_if_missing = 0;
2855 fn_show_object = show_object__ma_allow_any;
2856 return 0;
2857 }
2858
2859 if (!strcmp(arg, "allow-promisor")) {
2860 arg_missing_action = MA_ALLOW_PROMISOR;
2861 fetch_if_missing = 0;
2862 fn_show_object = show_object__ma_allow_promisor;
2863 return 0;
2864 }
2865
2866 die(_("invalid value for --missing"));
2867 return 0;
2868 }
2869
2870 static void show_edge(struct commit *commit)
2871 {
2872 add_preferred_base(&commit->object.oid);
2873 }
2874
2875 struct in_pack_object {
2876 off_t offset;
2877 struct object *object;
2878 };
2879
2880 struct in_pack {
2881 unsigned int alloc;
2882 unsigned int nr;
2883 struct in_pack_object *array;
2884 };
2885
2886 static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2887 {
2888 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
2889 in_pack->array[in_pack->nr].object = object;
2890 in_pack->nr++;
2891 }
2892
2893 /*
2894 * Compare the objects in the offset order, in order to emulate the
2895 * "git rev-list --objects" output that produced the pack originally.
2896 */
2897 static int ofscmp(const void *a_, const void *b_)
2898 {
2899 struct in_pack_object *a = (struct in_pack_object *)a_;
2900 struct in_pack_object *b = (struct in_pack_object *)b_;
2901
2902 if (a->offset < b->offset)
2903 return -1;
2904 else if (a->offset > b->offset)
2905 return 1;
2906 else
2907 return oidcmp(&a->object->oid, &b->object->oid);
2908 }
2909
2910 static void add_objects_in_unpacked_packs(void)
2911 {
2912 struct packed_git *p;
2913 struct in_pack in_pack;
2914 uint32_t i;
2915
2916 memset(&in_pack, 0, sizeof(in_pack));
2917
2918 for (p = get_all_packs(the_repository); p; p = p->next) {
2919 struct object_id oid;
2920 struct object *o;
2921
2922 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
2923 continue;
2924 if (open_pack_index(p))
2925 die(_("cannot open pack index"));
2926
2927 ALLOC_GROW(in_pack.array,
2928 in_pack.nr + p->num_objects,
2929 in_pack.alloc);
2930
2931 for (i = 0; i < p->num_objects; i++) {
2932 nth_packed_object_oid(&oid, p, i);
2933 o = lookup_unknown_object(&oid);
2934 if (!(o->flags & OBJECT_ADDED))
2935 mark_in_pack_object(o, p, &in_pack);
2936 o->flags |= OBJECT_ADDED;
2937 }
2938 }
2939
2940 if (in_pack.nr) {
2941 QSORT(in_pack.array, in_pack.nr, ofscmp);
2942 for (i = 0; i < in_pack.nr; i++) {
2943 struct object *o = in_pack.array[i].object;
2944 add_object_entry(&o->oid, o->type, "", 0);
2945 }
2946 }
2947 free(in_pack.array);
2948 }
2949
2950 static int add_loose_object(const struct object_id *oid, const char *path,
2951 void *data)
2952 {
2953 enum object_type type = oid_object_info(the_repository, oid, NULL);
2954
2955 if (type < 0) {
2956 warning(_("loose object at %s could not be examined"), path);
2957 return 0;
2958 }
2959
2960 add_object_entry(oid, type, "", 0);
2961 return 0;
2962 }
2963
2964 /*
2965 * We actually don't even have to worry about reachability here.
2966 * add_object_entry will weed out duplicates, so we just add every
2967 * loose object we find.
2968 */
2969 static void add_unreachable_loose_objects(void)
2970 {
2971 for_each_loose_file_in_objdir(get_object_directory(),
2972 add_loose_object,
2973 NULL, NULL, NULL);
2974 }
2975
2976 static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
2977 {
2978 static struct packed_git *last_found = (void *)1;
2979 struct packed_git *p;
2980
2981 p = (last_found != (void *)1) ? last_found :
2982 get_all_packs(the_repository);
2983
2984 while (p) {
2985 if ((!p->pack_local || p->pack_keep ||
2986 p->pack_keep_in_core) &&
2987 find_pack_entry_one(oid->hash, p)) {
2988 last_found = p;
2989 return 1;
2990 }
2991 if (p == last_found)
2992 p = get_all_packs(the_repository);
2993 else
2994 p = p->next;
2995 if (p == last_found)
2996 p = p->next;
2997 }
2998 return 0;
2999 }
3000
3001 /*
3002 * Store a list of sha1s that are should not be discarded
3003 * because they are either written too recently, or are
3004 * reachable from another object that was.
3005 *
3006 * This is filled by get_object_list.
3007 */
3008 static struct oid_array recent_objects;
3009
3010 static int loosened_object_can_be_discarded(const struct object_id *oid,
3011 timestamp_t mtime)
3012 {
3013 if (!unpack_unreachable_expiration)
3014 return 0;
3015 if (mtime > unpack_unreachable_expiration)
3016 return 0;
3017 if (oid_array_lookup(&recent_objects, oid) >= 0)
3018 return 0;
3019 return 1;
3020 }
3021
3022 static void loosen_unused_packed_objects(void)
3023 {
3024 struct packed_git *p;
3025 uint32_t i;
3026 struct object_id oid;
3027
3028 for (p = get_all_packs(the_repository); p; p = p->next) {
3029 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
3030 continue;
3031
3032 if (open_pack_index(p))
3033 die(_("cannot open pack index"));
3034
3035 for (i = 0; i < p->num_objects; i++) {
3036 nth_packed_object_oid(&oid, p, i);
3037 if (!packlist_find(&to_pack, &oid, NULL) &&
3038 !has_sha1_pack_kept_or_nonlocal(&oid) &&
3039 !loosened_object_can_be_discarded(&oid, p->mtime))
3040 if (force_object_loose(&oid, p->mtime))
3041 die(_("unable to force loose object"));
3042 }
3043 }
3044 }
3045
3046 /*
3047 * This tracks any options which pack-reuse code expects to be on, or which a
3048 * reader of the pack might not understand, and which would therefore prevent
3049 * blind reuse of what we have on disk.
3050 */
3051 static int pack_options_allow_reuse(void)
3052 {
3053 return pack_to_stdout &&
3054 allow_ofs_delta &&
3055 !ignore_packed_keep_on_disk &&
3056 !ignore_packed_keep_in_core &&
3057 (!local || !have_non_local_packs) &&
3058 !incremental;
3059 }
3060
3061 static int get_object_list_from_bitmap(struct rev_info *revs)
3062 {
3063 if (!(bitmap_git = prepare_bitmap_walk(revs)))
3064 return -1;
3065
3066 if (pack_options_allow_reuse() &&
3067 !reuse_partial_packfile_from_bitmap(
3068 bitmap_git,
3069 &reuse_packfile,
3070 &reuse_packfile_objects,
3071 &reuse_packfile_offset)) {
3072 assert(reuse_packfile_objects);
3073 nr_result += reuse_packfile_objects;
3074 display_progress(progress_state, nr_result);
3075 }
3076
3077 traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
3078 return 0;
3079 }
3080
3081 static void record_recent_object(struct object *obj,
3082 const char *name,
3083 void *data)
3084 {
3085 oid_array_append(&recent_objects, &obj->oid);
3086 }
3087
3088 static void record_recent_commit(struct commit *commit, void *data)
3089 {
3090 oid_array_append(&recent_objects, &commit->object.oid);
3091 }
3092
3093 static void get_object_list(int ac, const char **av)
3094 {
3095 struct rev_info revs;
3096 struct setup_revision_opt s_r_opt = {
3097 .allow_exclude_promisor_objects = 1,
3098 };
3099 char line[1000];
3100 int flags = 0;
3101 int save_warning;
3102
3103 repo_init_revisions(the_repository, &revs, NULL);
3104 save_commit_buffer = 0;
3105 setup_revisions(ac, av, &revs, &s_r_opt);
3106
3107 /* make sure shallows are read */
3108 is_repository_shallow(the_repository);
3109
3110 save_warning = warn_on_object_refname_ambiguity;
3111 warn_on_object_refname_ambiguity = 0;
3112
3113 while (fgets(line, sizeof(line), stdin) != NULL) {
3114 int len = strlen(line);
3115 if (len && line[len - 1] == '\n')
3116 line[--len] = 0;
3117 if (!len)
3118 break;
3119 if (*line == '-') {
3120 if (!strcmp(line, "--not")) {
3121 flags ^= UNINTERESTING;
3122 write_bitmap_index = 0;
3123 continue;
3124 }
3125 if (starts_with(line, "--shallow ")) {
3126 struct object_id oid;
3127 if (get_oid_hex(line + 10, &oid))
3128 die("not an SHA-1 '%s'", line + 10);
3129 register_shallow(the_repository, &oid);
3130 use_bitmap_index = 0;
3131 continue;
3132 }
3133 die(_("not a rev '%s'"), line);
3134 }
3135 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
3136 die(_("bad revision '%s'"), line);
3137 }
3138
3139 warn_on_object_refname_ambiguity = save_warning;
3140
3141 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
3142 return;
3143
3144 if (use_delta_islands)
3145 load_delta_islands(the_repository, progress);
3146
3147 if (prepare_revision_walk(&revs))
3148 die(_("revision walk setup failed"));
3149 mark_edges_uninteresting(&revs, show_edge, sparse);
3150
3151 if (!fn_show_object)
3152 fn_show_object = show_object;
3153 traverse_commit_list_filtered(&filter_options, &revs,
3154 show_commit, fn_show_object, NULL,
3155 NULL);
3156
3157 if (unpack_unreachable_expiration) {
3158 revs.ignore_missing_links = 1;
3159 if (add_unseen_recent_objects_to_traversal(&revs,
3160 unpack_unreachable_expiration))
3161 die(_("unable to add recent objects"));
3162 if (prepare_revision_walk(&revs))
3163 die(_("revision walk setup failed"));
3164 traverse_commit_list(&revs, record_recent_commit,
3165 record_recent_object, NULL);
3166 }
3167
3168 if (keep_unreachable)
3169 add_objects_in_unpacked_packs();
3170 if (pack_loose_unreachable)
3171 add_unreachable_loose_objects();
3172 if (unpack_unreachable)
3173 loosen_unused_packed_objects();
3174
3175 oid_array_clear(&recent_objects);
3176 }
3177
3178 static void add_extra_kept_packs(const struct string_list *names)
3179 {
3180 struct packed_git *p;
3181
3182 if (!names->nr)
3183 return;
3184
3185 for (p = get_all_packs(the_repository); p; p = p->next) {
3186 const char *name = basename(p->pack_name);
3187 int i;
3188
3189 if (!p->pack_local)
3190 continue;
3191
3192 for (i = 0; i < names->nr; i++)
3193 if (!fspathcmp(name, names->items[i].string))
3194 break;
3195
3196 if (i < names->nr) {
3197 p->pack_keep_in_core = 1;
3198 ignore_packed_keep_in_core = 1;
3199 continue;
3200 }
3201 }
3202 }
3203
3204 static int option_parse_index_version(const struct option *opt,
3205 const char *arg, int unset)
3206 {
3207 char *c;
3208 const char *val = arg;
3209
3210 BUG_ON_OPT_NEG(unset);
3211
3212 pack_idx_opts.version = strtoul(val, &c, 10);
3213 if (pack_idx_opts.version > 2)
3214 die(_("unsupported index version %s"), val);
3215 if (*c == ',' && c[1])
3216 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
3217 if (*c || pack_idx_opts.off32_limit & 0x80000000)
3218 die(_("bad index version '%s'"), val);
3219 return 0;
3220 }
3221
3222 static int option_parse_unpack_unreachable(const struct option *opt,
3223 const char *arg, int unset)
3224 {
3225 if (unset) {
3226 unpack_unreachable = 0;
3227 unpack_unreachable_expiration = 0;
3228 }
3229 else {
3230 unpack_unreachable = 1;
3231 if (arg)
3232 unpack_unreachable_expiration = approxidate(arg);
3233 }
3234 return 0;
3235 }
3236
3237 int cmd_pack_objects(int argc, const char **argv, const char *prefix)
3238 {
3239 int use_internal_rev_list = 0;
3240 int shallow = 0;
3241 int all_progress_implied = 0;
3242 struct argv_array rp = ARGV_ARRAY_INIT;
3243 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
3244 int rev_list_index = 0;
3245 struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
3246 struct option pack_objects_options[] = {
3247 OPT_SET_INT('q', "quiet", &progress,
3248 N_("do not show progress meter"), 0),
3249 OPT_SET_INT(0, "progress", &progress,
3250 N_("show progress meter"), 1),
3251 OPT_SET_INT(0, "all-progress", &progress,
3252 N_("show progress meter during object writing phase"), 2),
3253 OPT_BOOL(0, "all-progress-implied",
3254 &all_progress_implied,
3255 N_("similar to --all-progress when progress meter is shown")),
3256 { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
3257 N_("write the pack index file in the specified idx format version"),
3258 PARSE_OPT_NONEG, option_parse_index_version },
3259 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
3260 N_("maximum size of each output pack file")),
3261 OPT_BOOL(0, "local", &local,
3262 N_("ignore borrowed objects from alternate object store")),
3263 OPT_BOOL(0, "incremental", &incremental,
3264 N_("ignore packed objects")),
3265 OPT_INTEGER(0, "window", &window,
3266 N_("limit pack window by objects")),
3267 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
3268 N_("limit pack window by memory in addition to object limit")),
3269 OPT_INTEGER(0, "depth", &depth,
3270 N_("maximum length of delta chain allowed in the resulting pack")),
3271 OPT_BOOL(0, "reuse-delta", &reuse_delta,
3272 N_("reuse existing deltas")),
3273 OPT_BOOL(0, "reuse-object", &reuse_object,
3274 N_("reuse existing objects")),
3275 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
3276 N_("use OFS_DELTA objects")),
3277 OPT_INTEGER(0, "threads", &delta_search_threads,
3278 N_("use threads when searching for best delta matches")),
3279 OPT_BOOL(0, "non-empty", &non_empty,
3280 N_("do not create an empty pack output")),
3281 OPT_BOOL(0, "revs", &use_internal_rev_list,
3282 N_("read revision arguments from standard input")),
3283 OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
3284 N_("limit the objects to those that are not yet packed"),
3285 1, PARSE_OPT_NONEG),
3286 OPT_SET_INT_F(0, "all", &rev_list_all,
3287 N_("include objects reachable from any reference"),
3288 1, PARSE_OPT_NONEG),
3289 OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
3290 N_("include objects referred by reflog entries"),
3291 1, PARSE_OPT_NONEG),
3292 OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
3293 N_("include objects referred to by the index"),
3294 1, PARSE_OPT_NONEG),
3295 OPT_BOOL(0, "stdout", &pack_to_stdout,
3296 N_("output pack to stdout")),
3297 OPT_BOOL(0, "include-tag", &include_tag,
3298 N_("include tag objects that refer to objects to be packed")),
3299 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
3300 N_("keep unreachable objects")),
3301 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
3302 N_("pack loose unreachable objects")),
3303 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
3304 N_("unpack unreachable objects newer than <time>"),
3305 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
3306 OPT_BOOL(0, "sparse", &sparse,
3307 N_("use the sparse reachability algorithm")),
3308 OPT_BOOL(0, "thin", &thin,
3309 N_("create thin packs")),
3310 OPT_BOOL(0, "shallow", &shallow,
3311 N_("create packs suitable for shallow fetches")),
3312 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
3313 N_("ignore packs that have companion .keep file")),
3314 OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
3315 N_("ignore this pack")),
3316 OPT_INTEGER(0, "compression", &pack_compression_level,
3317 N_("pack compression level")),
3318 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
3319 N_("do not hide commits by grafts"), 0),
3320 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
3321 N_("use a bitmap index if available to speed up counting objects")),
3322 OPT_SET_INT(0, "write-bitmap-index", &write_bitmap_index,
3323 N_("write a bitmap index together with the pack index"),
3324 WRITE_BITMAP_TRUE),
3325 OPT_SET_INT_F(0, "write-bitmap-index-quiet",
3326 &write_bitmap_index,
3327 N_("write a bitmap index if possible"),
3328 WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
3329 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
3330 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
3331 N_("handling for missing objects"), PARSE_OPT_NONEG,
3332 option_parse_missing_action },
3333 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
3334 N_("do not pack objects in promisor packfiles")),
3335 OPT_BOOL(0, "delta-islands", &use_delta_islands,
3336 N_("respect islands during delta compression")),
3337 OPT_END(),
3338 };
3339
3340 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
3341 BUG("too many dfs states, increase OE_DFS_STATE_BITS");
3342
3343 read_replace_refs = 0;
3344
3345 sparse = git_env_bool("GIT_TEST_PACK_SPARSE", 0);
3346 reset_pack_idx_option(&pack_idx_opts);
3347 git_config(git_pack_config, NULL);
3348
3349 progress = isatty(2);
3350 argc = parse_options(argc, argv, prefix, pack_objects_options,
3351 pack_usage, 0);
3352
3353 if (argc) {
3354 base_name = argv[0];
3355 argc--;
3356 }
3357 if (pack_to_stdout != !base_name || argc)
3358 usage_with_options(pack_usage, pack_objects_options);
3359
3360 if (depth >= (1 << OE_DEPTH_BITS)) {
3361 warning(_("delta chain depth %d is too deep, forcing %d"),
3362 depth, (1 << OE_DEPTH_BITS) - 1);
3363 depth = (1 << OE_DEPTH_BITS) - 1;
3364 }
3365 if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
3366 warning(_("pack.deltaCacheLimit is too high, forcing %d"),
3367 (1U << OE_Z_DELTA_BITS) - 1);
3368 cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
3369 }
3370
3371 argv_array_push(&rp, "pack-objects");
3372 if (thin) {
3373 use_internal_rev_list = 1;
3374 argv_array_push(&rp, shallow
3375 ? "--objects-edge-aggressive"
3376 : "--objects-edge");
3377 } else
3378 argv_array_push(&rp, "--objects");
3379
3380 if (rev_list_all) {
3381 use_internal_rev_list = 1;
3382 argv_array_push(&rp, "--all");
3383 }
3384 if (rev_list_reflog) {
3385 use_internal_rev_list = 1;
3386 argv_array_push(&rp, "--reflog");
3387 }
3388 if (rev_list_index) {
3389 use_internal_rev_list = 1;
3390 argv_array_push(&rp, "--indexed-objects");
3391 }
3392 if (rev_list_unpacked) {
3393 use_internal_rev_list = 1;
3394 argv_array_push(&rp, "--unpacked");
3395 }
3396
3397 if (exclude_promisor_objects) {
3398 use_internal_rev_list = 1;
3399 fetch_if_missing = 0;
3400 argv_array_push(&rp, "--exclude-promisor-objects");
3401 }
3402 if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
3403 use_internal_rev_list = 1;
3404
3405 if (!reuse_object)
3406 reuse_delta = 0;
3407 if (pack_compression_level == -1)
3408 pack_compression_level = Z_DEFAULT_COMPRESSION;
3409 else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
3410 die(_("bad pack compression level %d"), pack_compression_level);
3411
3412 if (!delta_search_threads) /* --threads=0 means autodetect */
3413 delta_search_threads = online_cpus();
3414
3415 if (!HAVE_THREADS && delta_search_threads != 1)
3416 warning(_("no threads support, ignoring --threads"));
3417 if (!pack_to_stdout && !pack_size_limit)
3418 pack_size_limit = pack_size_limit_cfg;
3419 if (pack_to_stdout && pack_size_limit)
3420 die(_("--max-pack-size cannot be used to build a pack for transfer"));
3421 if (pack_size_limit && pack_size_limit < 1024*1024) {
3422 warning(_("minimum pack size limit is 1 MiB"));
3423 pack_size_limit = 1024*1024;
3424 }
3425
3426 if (!pack_to_stdout && thin)
3427 die(_("--thin cannot be used to build an indexable pack"));
3428
3429 if (keep_unreachable && unpack_unreachable)
3430 die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
3431 if (!rev_list_all || !rev_list_reflog || !rev_list_index)
3432 unpack_unreachable_expiration = 0;
3433
3434 if (filter_options.choice) {
3435 if (!pack_to_stdout)
3436 die(_("cannot use --filter without --stdout"));
3437 use_bitmap_index = 0;
3438 }
3439
3440 /*
3441 * "soft" reasons not to use bitmaps - for on-disk repack by default we want
3442 *
3443 * - to produce good pack (with bitmap index not-yet-packed objects are
3444 * packed in suboptimal order).
3445 *
3446 * - to use more robust pack-generation codepath (avoiding possible
3447 * bugs in bitmap code and possible bitmap index corruption).
3448 */
3449 if (!pack_to_stdout)
3450 use_bitmap_index_default = 0;
3451
3452 if (use_bitmap_index < 0)
3453 use_bitmap_index = use_bitmap_index_default;
3454
3455 /* "hard" reasons not to use bitmaps; these just won't work at all */
3456 if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
3457 use_bitmap_index = 0;
3458
3459 if (pack_to_stdout || !rev_list_all)
3460 write_bitmap_index = 0;
3461
3462 if (use_delta_islands)
3463 argv_array_push(&rp, "--topo-order");
3464
3465 if (progress && all_progress_implied)
3466 progress = 2;
3467
3468 add_extra_kept_packs(&keep_pack_list);
3469 if (ignore_packed_keep_on_disk) {
3470 struct packed_git *p;
3471 for (p = get_all_packs(the_repository); p; p = p->next)
3472 if (p->pack_local && p->pack_keep)
3473 break;
3474 if (!p) /* no keep-able packs found */
3475 ignore_packed_keep_on_disk = 0;
3476 }
3477 if (local) {
3478 /*
3479 * unlike ignore_packed_keep_on_disk above, we do not
3480 * want to unset "local" based on looking at packs, as
3481 * it also covers non-local objects
3482 */
3483 struct packed_git *p;
3484 for (p = get_all_packs(the_repository); p; p = p->next) {
3485 if (!p->pack_local) {
3486 have_non_local_packs = 1;
3487 break;
3488 }
3489 }
3490 }
3491
3492 trace2_region_enter("pack-objects", "enumerate-objects",
3493 the_repository);
3494 prepare_packing_data(the_repository, &to_pack);
3495
3496 if (progress)
3497 progress_state = start_progress(_("Enumerating objects"), 0);
3498 if (!use_internal_rev_list)
3499 read_object_list_from_stdin();
3500 else {
3501 get_object_list(rp.argc, rp.argv);
3502 argv_array_clear(&rp);
3503 }
3504 cleanup_preferred_base();
3505 if (include_tag && nr_result)
3506 for_each_ref(add_ref_tag, NULL);
3507 stop_progress(&progress_state);
3508 trace2_region_leave("pack-objects", "enumerate-objects",
3509 the_repository);
3510
3511 if (non_empty && !nr_result)
3512 return 0;
3513 if (nr_result) {
3514 trace2_region_enter("pack-objects", "prepare-pack",
3515 the_repository);
3516 prepare_pack(window, depth);
3517 trace2_region_leave("pack-objects", "prepare-pack",
3518 the_repository);
3519 }
3520
3521 trace2_region_enter("pack-objects", "write-pack-file", the_repository);
3522 write_pack_file();
3523 trace2_region_leave("pack-objects", "write-pack-file", the_repository);
3524
3525 if (progress)
3526 fprintf_ln(stderr,
3527 _("Total %"PRIu32" (delta %"PRIu32"),"
3528 " reused %"PRIu32" (delta %"PRIu32")"),
3529 written, written_delta, reused, reused_delta);
3530 return 0;
3531 }