]> git.ipfire.org Git - thirdparty/git.git/blob - builtin/index-pack.c
2b78ba7fe4d14a8bcb12165395ff02deeb8c047f
[thirdparty/git.git] / builtin / index-pack.c
1 #define USE_THE_REPOSITORY_VARIABLE
2 #define DISABLE_SIGN_COMPARE_WARNINGS
3
4 #include "builtin.h"
5 #include "config.h"
6 #include "delta.h"
7 #include "environment.h"
8 #include "gettext.h"
9 #include "hex.h"
10 #include "pack.h"
11 #include "csum-file.h"
12 #include "blob.h"
13 #include "commit.h"
14 #include "tag.h"
15 #include "tree.h"
16 #include "progress.h"
17 #include "fsck.h"
18 #include "strbuf.h"
19 #include "streaming.h"
20 #include "thread-utils.h"
21 #include "packfile.h"
22 #include "pack-revindex.h"
23 #include "object-file.h"
24 #include "odb.h"
25 #include "oid-array.h"
26 #include "oidset.h"
27 #include "path.h"
28 #include "replace-object.h"
29 #include "tree-walk.h"
30 #include "promisor-remote.h"
31 #include "run-command.h"
32 #include "setup.h"
33 #include "strvec.h"
34
35 static const char index_pack_usage[] =
36 "git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--[no-]rev-index] [--verify] [--strict[=<msg-id>=<severity>...]] [--fsck-objects[=<msg-id>=<severity>...]] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
37
38 struct object_entry {
39 struct pack_idx_entry idx;
40 unsigned long size;
41 unsigned char hdr_size;
42 signed char type;
43 signed char real_type;
44 };
45
46 struct object_stat {
47 unsigned delta_depth;
48 int base_object_no;
49 };
50
51 struct base_data {
52 /* Initialized by make_base(). */
53 struct base_data *base;
54 struct object_entry *obj;
55 int ref_first, ref_last;
56 int ofs_first, ofs_last;
57 /*
58 * Threads should increment retain_data if they are about to call
59 * patch_delta() using this struct's data as a base, and decrement this
60 * when they are done. While retain_data is nonzero, this struct's data
61 * will not be freed even if the delta base cache limit is exceeded.
62 */
63 int retain_data;
64 /*
65 * The number of direct children that have not been fully processed
66 * (entered work_head, entered done_head, left done_head). When this
67 * number reaches zero, this struct base_data can be freed.
68 */
69 int children_remaining;
70
71 /* Not initialized by make_base(). */
72 struct list_head list;
73 void *data;
74 unsigned long size;
75 };
76
77 /*
78 * Stack of struct base_data that have unprocessed children.
79 * threaded_second_pass() uses this as a source of work (the other being the
80 * objects array).
81 *
82 * Guarded by work_mutex.
83 */
84 static LIST_HEAD(work_head);
85
86 /*
87 * Stack of struct base_data that have children, all of whom have been
88 * processed or are being processed, and at least one child is being processed.
89 * These struct base_data must be kept around until the last child is
90 * processed.
91 *
92 * Guarded by work_mutex.
93 */
94 static LIST_HEAD(done_head);
95
96 /*
97 * All threads share one delta base cache.
98 *
99 * base_cache_used is guarded by work_mutex, and base_cache_limit is read-only
100 * in a thread.
101 */
102 static size_t base_cache_used;
103 static size_t base_cache_limit;
104
105 struct thread_local_data {
106 pthread_t thread;
107 int pack_fd;
108 };
109
110 /* Remember to update object flag allocation in object.h */
111 #define FLAG_LINK (1u<<20)
112 #define FLAG_CHECKED (1u<<21)
113
114 struct ofs_delta_entry {
115 off_t offset;
116 int obj_no;
117 };
118
119 struct ref_delta_entry {
120 struct object_id oid;
121 int obj_no;
122 };
123
124 static struct object_entry *objects;
125 static struct object_stat *obj_stat;
126 static struct ofs_delta_entry *ofs_deltas;
127 static struct ref_delta_entry *ref_deltas;
128 static struct thread_local_data nothread_data;
129 static int nr_objects;
130 static int nr_ofs_deltas;
131 static int nr_ref_deltas;
132 static int ref_deltas_alloc;
133 static int nr_resolved_deltas;
134 static int nr_threads;
135
136 static int from_stdin;
137 static int strict;
138 static int do_fsck_object;
139 static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
140 static int verbose;
141 static const char *progress_title;
142 static int show_resolving_progress;
143 static int show_stat;
144 static int check_self_contained_and_connected;
145
146 static struct progress *progress;
147
148 /* We always read in 4kB chunks. */
149 static unsigned char input_buffer[4096];
150 static unsigned int input_offset, input_len;
151 static off_t consumed_bytes;
152 static off_t max_input_size;
153 static unsigned deepest_delta;
154 static struct git_hash_ctx input_ctx;
155 static uint32_t input_crc32;
156 static int input_fd, output_fd;
157 static const char *curr_pack;
158
159 /*
160 * outgoing_links is guarded by read_mutex, and record_outgoing_links is
161 * read-only in a thread.
162 */
163 static struct oidset outgoing_links = OIDSET_INIT;
164 static int record_outgoing_links;
165
166 static struct thread_local_data *thread_data;
167 static int nr_dispatched;
168 static int threads_active;
169
170 static pthread_mutex_t read_mutex;
171 #define read_lock() lock_mutex(&read_mutex)
172 #define read_unlock() unlock_mutex(&read_mutex)
173
174 static pthread_mutex_t counter_mutex;
175 #define counter_lock() lock_mutex(&counter_mutex)
176 #define counter_unlock() unlock_mutex(&counter_mutex)
177
178 static pthread_mutex_t work_mutex;
179 #define work_lock() lock_mutex(&work_mutex)
180 #define work_unlock() unlock_mutex(&work_mutex)
181
182 static pthread_mutex_t deepest_delta_mutex;
183 #define deepest_delta_lock() lock_mutex(&deepest_delta_mutex)
184 #define deepest_delta_unlock() unlock_mutex(&deepest_delta_mutex)
185
186 static pthread_key_t key;
187
188 static inline void lock_mutex(pthread_mutex_t *mutex)
189 {
190 if (threads_active)
191 pthread_mutex_lock(mutex);
192 }
193
194 static inline void unlock_mutex(pthread_mutex_t *mutex)
195 {
196 if (threads_active)
197 pthread_mutex_unlock(mutex);
198 }
199
200 /*
201 * Mutex and conditional variable can't be statically-initialized on Windows.
202 */
203 static void init_thread(void)
204 {
205 int i;
206 init_recursive_mutex(&read_mutex);
207 pthread_mutex_init(&counter_mutex, NULL);
208 pthread_mutex_init(&work_mutex, NULL);
209 if (show_stat)
210 pthread_mutex_init(&deepest_delta_mutex, NULL);
211 pthread_key_create(&key, NULL);
212 CALLOC_ARRAY(thread_data, nr_threads);
213 for (i = 0; i < nr_threads; i++) {
214 thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY);
215 }
216
217 threads_active = 1;
218 }
219
220 static void cleanup_thread(void)
221 {
222 int i;
223 if (!threads_active)
224 return;
225 threads_active = 0;
226 pthread_mutex_destroy(&read_mutex);
227 pthread_mutex_destroy(&counter_mutex);
228 pthread_mutex_destroy(&work_mutex);
229 if (show_stat)
230 pthread_mutex_destroy(&deepest_delta_mutex);
231 for (i = 0; i < nr_threads; i++)
232 close(thread_data[i].pack_fd);
233 pthread_key_delete(key);
234 free(thread_data);
235 }
236
237 static int mark_link(struct object *obj, enum object_type type,
238 void *data UNUSED,
239 struct fsck_options *options UNUSED)
240 {
241 if (!obj)
242 return -1;
243
244 if (type != OBJ_ANY && obj->type != type)
245 die(_("object type mismatch at %s"), oid_to_hex(&obj->oid));
246
247 obj->flags |= FLAG_LINK;
248 return 0;
249 }
250
251 /* The content of each linked object must have been checked
252 or it must be already present in the object database */
253 static unsigned check_object(struct object *obj)
254 {
255 if (!obj)
256 return 0;
257
258 if (!(obj->flags & FLAG_LINK))
259 return 0;
260
261 if (!(obj->flags & FLAG_CHECKED)) {
262 unsigned long size;
263 int type = odb_read_object_info(the_repository->objects,
264 &obj->oid, &size);
265 if (type <= 0)
266 die(_("did not receive expected object %s"),
267 oid_to_hex(&obj->oid));
268 if (type != obj->type)
269 die(_("object %s: expected type %s, found %s"),
270 oid_to_hex(&obj->oid),
271 type_name(obj->type), type_name(type));
272 obj->flags |= FLAG_CHECKED;
273 return 1;
274 }
275
276 return 0;
277 }
278
279 static unsigned check_objects(void)
280 {
281 unsigned i, max, foreign_nr = 0;
282
283 max = get_max_object_index(the_repository);
284
285 if (verbose)
286 progress = start_delayed_progress(the_repository,
287 _("Checking objects"), max);
288
289 for (i = 0; i < max; i++) {
290 foreign_nr += check_object(get_indexed_object(the_repository, i));
291 display_progress(progress, i + 1);
292 }
293
294 stop_progress(&progress);
295 return foreign_nr;
296 }
297
298
299 /* Discard current buffer used content. */
300 static void flush(void)
301 {
302 if (input_offset) {
303 if (output_fd >= 0)
304 write_or_die(output_fd, input_buffer, input_offset);
305 git_hash_update(&input_ctx, input_buffer, input_offset);
306 memmove(input_buffer, input_buffer + input_offset, input_len);
307 input_offset = 0;
308 }
309 }
310
311 /*
312 * Make sure at least "min" bytes are available in the buffer, and
313 * return the pointer to the buffer.
314 */
315 static void *fill(int min)
316 {
317 if (min <= input_len)
318 return input_buffer + input_offset;
319 if (min > sizeof(input_buffer))
320 die(Q_("cannot fill %d byte",
321 "cannot fill %d bytes",
322 min),
323 min);
324 flush();
325 do {
326 ssize_t ret = xread(input_fd, input_buffer + input_len,
327 sizeof(input_buffer) - input_len);
328 if (ret <= 0) {
329 if (!ret)
330 die(_("early EOF"));
331 die_errno(_("read error on input"));
332 }
333 input_len += ret;
334 if (from_stdin)
335 display_throughput(progress, consumed_bytes + input_len);
336 } while (input_len < min);
337 return input_buffer;
338 }
339
340 static void use(int bytes)
341 {
342 if (bytes > input_len)
343 die(_("used more bytes than were available"));
344 input_crc32 = crc32(input_crc32, input_buffer + input_offset, bytes);
345 input_len -= bytes;
346 input_offset += bytes;
347
348 /* make sure off_t is sufficiently large not to wrap */
349 if (signed_add_overflows(consumed_bytes, bytes))
350 die(_("pack too large for current definition of off_t"));
351 consumed_bytes += bytes;
352 if (max_input_size && consumed_bytes > max_input_size) {
353 struct strbuf size_limit = STRBUF_INIT;
354 strbuf_humanise_bytes(&size_limit, max_input_size);
355 die(_("pack exceeds maximum allowed size (%s)"),
356 size_limit.buf);
357 }
358 }
359
360 static const char *open_pack_file(const char *pack_name)
361 {
362 if (from_stdin) {
363 input_fd = 0;
364 if (!pack_name) {
365 struct strbuf tmp_file = STRBUF_INIT;
366 output_fd = odb_mkstemp(the_repository->objects, &tmp_file,
367 "pack/tmp_pack_XXXXXX");
368 pack_name = strbuf_detach(&tmp_file, NULL);
369 } else {
370 output_fd = xopen(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600);
371 }
372 nothread_data.pack_fd = output_fd;
373 } else {
374 input_fd = xopen(pack_name, O_RDONLY);
375 output_fd = -1;
376 nothread_data.pack_fd = input_fd;
377 }
378 the_hash_algo->init_fn(&input_ctx);
379 return pack_name;
380 }
381
382 static void parse_pack_header(void)
383 {
384 unsigned char *hdr = fill(sizeof(struct pack_header));
385
386 /* Header consistency check */
387 if (get_be32(hdr) != PACK_SIGNATURE)
388 die(_("pack signature mismatch"));
389 hdr += 4;
390 if (!pack_version_ok_native(get_be32(hdr)))
391 die(_("pack version %"PRIu32" unsupported"),
392 get_be32(hdr));
393 hdr += 4;
394
395 nr_objects = get_be32(hdr);
396 use(sizeof(struct pack_header));
397 }
398
399 __attribute__((format (printf, 2, 3)))
400 static NORETURN void bad_object(off_t offset, const char *format, ...)
401 {
402 va_list params;
403 char buf[1024];
404
405 va_start(params, format);
406 vsnprintf(buf, sizeof(buf), format, params);
407 va_end(params);
408 die(_("pack has bad object at offset %"PRIuMAX": %s"),
409 (uintmax_t)offset, buf);
410 }
411
412 static inline struct thread_local_data *get_thread_data(void)
413 {
414 if (HAVE_THREADS) {
415 if (threads_active)
416 return pthread_getspecific(key);
417 assert(!threads_active &&
418 "This should only be reached when all threads are gone");
419 }
420 return &nothread_data;
421 }
422
423 static void set_thread_data(struct thread_local_data *data)
424 {
425 if (threads_active)
426 pthread_setspecific(key, data);
427 }
428
429 static void free_base_data(struct base_data *c)
430 {
431 if (c->data) {
432 FREE_AND_NULL(c->data);
433 base_cache_used -= c->size;
434 }
435 }
436
437 static void prune_base_data(struct base_data *retain)
438 {
439 struct list_head *pos;
440
441 if (base_cache_used <= base_cache_limit)
442 return;
443
444 list_for_each_prev(pos, &done_head) {
445 struct base_data *b = list_entry(pos, struct base_data, list);
446 if (b->retain_data || b == retain)
447 continue;
448 if (b->data) {
449 free_base_data(b);
450 if (base_cache_used <= base_cache_limit)
451 return;
452 }
453 }
454
455 list_for_each_prev(pos, &work_head) {
456 struct base_data *b = list_entry(pos, struct base_data, list);
457 if (b->retain_data || b == retain)
458 continue;
459 if (b->data) {
460 free_base_data(b);
461 if (base_cache_used <= base_cache_limit)
462 return;
463 }
464 }
465 }
466
467 static int is_delta_type(enum object_type type)
468 {
469 return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
470 }
471
472 static void *unpack_entry_data(off_t offset, unsigned long size,
473 enum object_type type, struct object_id *oid)
474 {
475 static char fixed_buf[8192];
476 int status;
477 git_zstream stream;
478 void *buf;
479 struct git_hash_ctx c;
480 char hdr[32];
481 int hdrlen;
482
483 if (!is_delta_type(type)) {
484 hdrlen = format_object_header(hdr, sizeof(hdr), type, size);
485 the_hash_algo->init_fn(&c);
486 git_hash_update(&c, hdr, hdrlen);
487 } else
488 oid = NULL;
489 if (type == OBJ_BLOB &&
490 size > repo_settings_get_big_file_threshold(the_repository))
491 buf = fixed_buf;
492 else
493 buf = xmallocz(size);
494
495 memset(&stream, 0, sizeof(stream));
496 git_inflate_init(&stream);
497 stream.next_out = buf;
498 stream.avail_out = buf == fixed_buf ? sizeof(fixed_buf) : size;
499
500 do {
501 unsigned char *last_out = stream.next_out;
502 stream.next_in = fill(1);
503 stream.avail_in = input_len;
504 status = git_inflate(&stream, 0);
505 use(input_len - stream.avail_in);
506 if (oid)
507 git_hash_update(&c, last_out, stream.next_out - last_out);
508 if (buf == fixed_buf) {
509 stream.next_out = buf;
510 stream.avail_out = sizeof(fixed_buf);
511 }
512 } while (status == Z_OK);
513 if (stream.total_out != size || status != Z_STREAM_END)
514 bad_object(offset, _("inflate returned %d"), status);
515 git_inflate_end(&stream);
516 if (oid)
517 git_hash_final_oid(oid, &c);
518 return buf == fixed_buf ? NULL : buf;
519 }
520
521 static void *unpack_raw_entry(struct object_entry *obj,
522 off_t *ofs_offset,
523 struct object_id *ref_oid,
524 struct object_id *oid)
525 {
526 unsigned char *p;
527 unsigned long size, c;
528 off_t base_offset;
529 unsigned shift;
530 void *data;
531
532 obj->idx.offset = consumed_bytes;
533 input_crc32 = crc32(0, NULL, 0);
534
535 p = fill(1);
536 c = *p;
537 use(1);
538 obj->type = (c >> 4) & 7;
539 size = (c & 15);
540 shift = 4;
541 while (c & 0x80) {
542 p = fill(1);
543 c = *p;
544 use(1);
545 size += (c & 0x7f) << shift;
546 shift += 7;
547 }
548 obj->size = size;
549
550 switch (obj->type) {
551 case OBJ_REF_DELTA:
552 oidread(ref_oid, fill(the_hash_algo->rawsz),
553 the_repository->hash_algo);
554 use(the_hash_algo->rawsz);
555 break;
556 case OBJ_OFS_DELTA:
557 p = fill(1);
558 c = *p;
559 use(1);
560 base_offset = c & 127;
561 while (c & 128) {
562 base_offset += 1;
563 if (!base_offset || MSB(base_offset, 7))
564 bad_object(obj->idx.offset, _("offset value overflow for delta base object"));
565 p = fill(1);
566 c = *p;
567 use(1);
568 base_offset = (base_offset << 7) + (c & 127);
569 }
570 *ofs_offset = obj->idx.offset - base_offset;
571 if (*ofs_offset <= 0 || *ofs_offset >= obj->idx.offset)
572 bad_object(obj->idx.offset, _("delta base offset is out of bound"));
573 break;
574 case OBJ_COMMIT:
575 case OBJ_TREE:
576 case OBJ_BLOB:
577 case OBJ_TAG:
578 break;
579 default:
580 bad_object(obj->idx.offset, _("unknown object type %d"), obj->type);
581 }
582 obj->hdr_size = consumed_bytes - obj->idx.offset;
583
584 data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
585 obj->idx.crc32 = input_crc32;
586 return data;
587 }
588
589 static void *unpack_data(struct object_entry *obj,
590 int (*consume)(const unsigned char *, unsigned long, void *),
591 void *cb_data)
592 {
593 off_t from = obj[0].idx.offset + obj[0].hdr_size;
594 off_t len = obj[1].idx.offset - from;
595 unsigned char *data, *inbuf;
596 git_zstream stream;
597 int status;
598
599 data = xmallocz(consume ? 64*1024 : obj->size);
600 inbuf = xmalloc((len < 64*1024) ? (int)len : 64*1024);
601
602 memset(&stream, 0, sizeof(stream));
603 git_inflate_init(&stream);
604 stream.next_out = data;
605 stream.avail_out = consume ? 64*1024 : obj->size;
606
607 do {
608 ssize_t n = (len < 64*1024) ? (ssize_t)len : 64*1024;
609 n = xpread(get_thread_data()->pack_fd, inbuf, n, from);
610 if (n < 0)
611 die_errno(_("cannot pread pack file"));
612 if (!n)
613 die(Q_("premature end of pack file, %"PRIuMAX" byte missing",
614 "premature end of pack file, %"PRIuMAX" bytes missing",
615 len),
616 (uintmax_t)len);
617 from += n;
618 len -= n;
619 stream.next_in = inbuf;
620 stream.avail_in = n;
621 if (!consume)
622 status = git_inflate(&stream, 0);
623 else {
624 do {
625 status = git_inflate(&stream, 0);
626 if (consume(data, stream.next_out - data, cb_data)) {
627 free(inbuf);
628 free(data);
629 return NULL;
630 }
631 stream.next_out = data;
632 stream.avail_out = 64*1024;
633 } while (status == Z_OK && stream.avail_in);
634 }
635 } while (len && status == Z_OK && !stream.avail_in);
636
637 /* This has been inflated OK when first encountered, so... */
638 if (status != Z_STREAM_END || stream.total_out != obj->size)
639 die(_("serious inflate inconsistency"));
640
641 git_inflate_end(&stream);
642 free(inbuf);
643 if (consume) {
644 FREE_AND_NULL(data);
645 }
646 return data;
647 }
648
649 static void *get_data_from_pack(struct object_entry *obj)
650 {
651 return unpack_data(obj, NULL, NULL);
652 }
653
654 static int compare_ofs_delta_bases(off_t offset1, off_t offset2,
655 enum object_type type1,
656 enum object_type type2)
657 {
658 int cmp = type1 - type2;
659 if (cmp)
660 return cmp;
661 return offset1 < offset2 ? -1 :
662 offset1 > offset2 ? 1 :
663 0;
664 }
665
666 static int find_ofs_delta(const off_t offset)
667 {
668 int first = 0, last = nr_ofs_deltas;
669
670 while (first < last) {
671 int next = first + (last - first) / 2;
672 struct ofs_delta_entry *delta = &ofs_deltas[next];
673 int cmp;
674
675 cmp = compare_ofs_delta_bases(offset, delta->offset,
676 OBJ_OFS_DELTA,
677 objects[delta->obj_no].type);
678 if (!cmp)
679 return next;
680 if (cmp < 0) {
681 last = next;
682 continue;
683 }
684 first = next+1;
685 }
686 return -first-1;
687 }
688
689 static void find_ofs_delta_children(off_t offset,
690 int *first_index, int *last_index)
691 {
692 int first = find_ofs_delta(offset);
693 int last = first;
694 int end = nr_ofs_deltas - 1;
695
696 if (first < 0) {
697 *first_index = 0;
698 *last_index = -1;
699 return;
700 }
701 while (first > 0 && ofs_deltas[first - 1].offset == offset)
702 --first;
703 while (last < end && ofs_deltas[last + 1].offset == offset)
704 ++last;
705 *first_index = first;
706 *last_index = last;
707 }
708
709 static int compare_ref_delta_bases(const struct object_id *oid1,
710 const struct object_id *oid2,
711 enum object_type type1,
712 enum object_type type2)
713 {
714 int cmp = type1 - type2;
715 if (cmp)
716 return cmp;
717 return oidcmp(oid1, oid2);
718 }
719
720 static int find_ref_delta(const struct object_id *oid)
721 {
722 int first = 0, last = nr_ref_deltas;
723
724 while (first < last) {
725 int next = first + (last - first) / 2;
726 struct ref_delta_entry *delta = &ref_deltas[next];
727 int cmp;
728
729 cmp = compare_ref_delta_bases(oid, &delta->oid,
730 OBJ_REF_DELTA,
731 objects[delta->obj_no].type);
732 if (!cmp)
733 return next;
734 if (cmp < 0) {
735 last = next;
736 continue;
737 }
738 first = next+1;
739 }
740 return -first-1;
741 }
742
743 static void find_ref_delta_children(const struct object_id *oid,
744 int *first_index, int *last_index)
745 {
746 int first = find_ref_delta(oid);
747 int last = first;
748 int end = nr_ref_deltas - 1;
749
750 if (first < 0) {
751 *first_index = 0;
752 *last_index = -1;
753 return;
754 }
755 while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
756 --first;
757 while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
758 ++last;
759 *first_index = first;
760 *last_index = last;
761 }
762
763 struct compare_data {
764 struct object_entry *entry;
765 struct git_istream *st;
766 unsigned char *buf;
767 unsigned long buf_size;
768 };
769
770 static int compare_objects(const unsigned char *buf, unsigned long size,
771 void *cb_data)
772 {
773 struct compare_data *data = cb_data;
774
775 if (data->buf_size < size) {
776 free(data->buf);
777 data->buf = xmalloc(size);
778 data->buf_size = size;
779 }
780
781 while (size) {
782 ssize_t len = read_istream(data->st, data->buf, size);
783 if (len == 0)
784 die(_("SHA1 COLLISION FOUND WITH %s !"),
785 oid_to_hex(&data->entry->idx.oid));
786 if (len < 0)
787 die(_("unable to read %s"),
788 oid_to_hex(&data->entry->idx.oid));
789 if (memcmp(buf, data->buf, len))
790 die(_("SHA1 COLLISION FOUND WITH %s !"),
791 oid_to_hex(&data->entry->idx.oid));
792 size -= len;
793 buf += len;
794 }
795 return 0;
796 }
797
798 static int check_collison(struct object_entry *entry)
799 {
800 struct compare_data data;
801 enum object_type type;
802 unsigned long size;
803
804 if (entry->size <= repo_settings_get_big_file_threshold(the_repository) ||
805 entry->type != OBJ_BLOB)
806 return -1;
807
808 memset(&data, 0, sizeof(data));
809 data.entry = entry;
810 data.st = open_istream(the_repository, &entry->idx.oid, &type, &size,
811 NULL);
812 if (!data.st)
813 return -1;
814 if (size != entry->size || type != entry->type)
815 die(_("SHA1 COLLISION FOUND WITH %s !"),
816 oid_to_hex(&entry->idx.oid));
817 unpack_data(entry, compare_objects, &data);
818 close_istream(data.st);
819 free(data.buf);
820 return 0;
821 }
822
823 static void record_outgoing_link(const struct object_id *oid)
824 {
825 oidset_insert(&outgoing_links, oid);
826 }
827
828 static void maybe_record_name_entry(const struct name_entry *entry)
829 {
830 /*
831 * Checking only trees here results in a significantly faster packfile
832 * indexing, but the drawback is that if the packfile to be indexed
833 * references a local blob only directly (that is, never through a
834 * local tree), that local blob is in danger of being garbage
835 * collected. Such a situation may arise if we push local commits,
836 * including one with a change to a blob in the root tree, and then the
837 * server incorporates them into its main branch through a "rebase" or
838 * "squash" merge strategy, and then we fetch the new main branch from
839 * the server.
840 *
841 * This situation has not been observed yet - we have only noticed
842 * missing commits, not missing trees or blobs. (In fact, if it were
843 * believed that only missing commits are problematic, one could argue
844 * that we should also exclude trees during the outgoing link check;
845 * but it is safer to include them.)
846 *
847 * Due to the rarity of the situation (it has not been observed to
848 * happen in real life), and because the "penalty" in such a situation
849 * is merely to refetch the missing blob when it's needed (and this
850 * happens only once - when refetched, the blob goes into a promisor
851 * pack, so it won't be GC-ed, the tradeoff seems worth it.
852 */
853 if (S_ISDIR(entry->mode))
854 record_outgoing_link(&entry->oid);
855 }
856
857 static void do_record_outgoing_links(struct object *obj)
858 {
859 if (obj->type == OBJ_TREE) {
860 struct tree *tree = (struct tree *)obj;
861 struct tree_desc desc;
862 struct name_entry entry;
863 if (init_tree_desc_gently(&desc, &tree->object.oid,
864 tree->buffer, tree->size, 0))
865 /*
866 * Error messages are given when packs are
867 * verified, so do not print any here.
868 */
869 return;
870 while (tree_entry_gently(&desc, &entry))
871 maybe_record_name_entry(&entry);
872 } else if (obj->type == OBJ_COMMIT) {
873 struct commit *commit = (struct commit *) obj;
874 struct commit_list *parents = commit->parents;
875
876 record_outgoing_link(get_commit_tree_oid(commit));
877 for (; parents; parents = parents->next)
878 record_outgoing_link(&parents->item->object.oid);
879 } else if (obj->type == OBJ_TAG) {
880 struct tag *tag = (struct tag *) obj;
881 record_outgoing_link(get_tagged_oid(tag));
882 }
883 }
884
885 static void sha1_object(const void *data, struct object_entry *obj_entry,
886 unsigned long size, enum object_type type,
887 const struct object_id *oid)
888 {
889 void *new_data = NULL;
890 int collision_test_needed = 0;
891
892 assert(data || obj_entry);
893
894 if (startup_info->have_repository) {
895 read_lock();
896 collision_test_needed = odb_has_object(the_repository->objects, oid,
897 HAS_OBJECT_FETCH_PROMISOR);
898 read_unlock();
899 }
900
901 if (collision_test_needed && !data) {
902 read_lock();
903 if (!check_collison(obj_entry))
904 collision_test_needed = 0;
905 read_unlock();
906 }
907 if (collision_test_needed) {
908 void *has_data;
909 enum object_type has_type;
910 unsigned long has_size;
911 read_lock();
912 has_type = odb_read_object_info(the_repository->objects, oid, &has_size);
913 if (has_type < 0)
914 die(_("cannot read existing object info %s"), oid_to_hex(oid));
915 if (has_type != type || has_size != size)
916 die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
917 has_data = odb_read_object(the_repository->objects, oid,
918 &has_type, &has_size);
919 read_unlock();
920 if (!data)
921 data = new_data = get_data_from_pack(obj_entry);
922 if (!has_data)
923 die(_("cannot read existing object %s"), oid_to_hex(oid));
924 if (size != has_size || type != has_type ||
925 memcmp(data, has_data, size) != 0)
926 die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
927 free(has_data);
928 }
929
930 if (strict || do_fsck_object || record_outgoing_links) {
931 read_lock();
932 if (type == OBJ_BLOB) {
933 struct blob *blob = lookup_blob(the_repository, oid);
934 if (blob)
935 blob->object.flags |= FLAG_CHECKED;
936 else
937 die(_("invalid blob object %s"), oid_to_hex(oid));
938 if (do_fsck_object &&
939 fsck_object(&blob->object, (void *)data, size, &fsck_options))
940 die(_("fsck error in packed object"));
941 } else {
942 struct object *obj;
943 int eaten;
944 void *buf = (void *) data;
945
946 assert(data && "data can only be NULL for large _blobs_");
947
948 /*
949 * we do not need to free the memory here, as the
950 * buf is deleted by the caller.
951 */
952 obj = parse_object_buffer(the_repository, oid, type,
953 size, buf,
954 &eaten);
955 if (!obj)
956 die(_("invalid %s"), type_name(type));
957 if (do_fsck_object &&
958 fsck_object(obj, buf, size, &fsck_options))
959 die(_("fsck error in packed object"));
960 if (strict && fsck_walk(obj, NULL, &fsck_options))
961 die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
962 if (record_outgoing_links)
963 do_record_outgoing_links(obj);
964
965 if (obj->type == OBJ_TREE) {
966 struct tree *item = (struct tree *) obj;
967 item->buffer = NULL;
968 obj->parsed = 0;
969 }
970 if (obj->type == OBJ_COMMIT) {
971 struct commit *commit = (struct commit *) obj;
972 if (detach_commit_buffer(commit, NULL) != data)
973 BUG("parse_object_buffer transmogrified our buffer");
974 }
975 obj->flags |= FLAG_CHECKED;
976 }
977 read_unlock();
978 }
979
980 free(new_data);
981 }
982
983 /*
984 * Ensure that this node has been reconstructed and return its contents.
985 *
986 * In the typical and best case, this node would already be reconstructed
987 * (through the invocation to resolve_delta() in threaded_second_pass()) and it
988 * would not be pruned. However, if pruning of this node was necessary due to
989 * reaching delta_base_cache_limit, this function will find the closest
990 * ancestor with reconstructed data that has not been pruned (or if there is
991 * none, the ultimate base object), and reconstruct each node in the delta
992 * chain in order to generate the reconstructed data for this node.
993 */
994 static void *get_base_data(struct base_data *c)
995 {
996 if (!c->data) {
997 struct object_entry *obj = c->obj;
998 struct base_data **delta = NULL;
999 int delta_nr = 0, delta_alloc = 0;
1000
1001 while (is_delta_type(c->obj->type) && !c->data) {
1002 ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
1003 delta[delta_nr++] = c;
1004 c = c->base;
1005 }
1006 if (!delta_nr) {
1007 c->data = get_data_from_pack(obj);
1008 c->size = obj->size;
1009 base_cache_used += c->size;
1010 prune_base_data(c);
1011 }
1012 for (; delta_nr > 0; delta_nr--) {
1013 void *base, *raw;
1014 c = delta[delta_nr - 1];
1015 obj = c->obj;
1016 base = get_base_data(c->base);
1017 raw = get_data_from_pack(obj);
1018 c->data = patch_delta(
1019 base, c->base->size,
1020 raw, obj->size,
1021 &c->size);
1022 free(raw);
1023 if (!c->data)
1024 bad_object(obj->idx.offset, _("failed to apply delta"));
1025 base_cache_used += c->size;
1026 prune_base_data(c);
1027 }
1028 free(delta);
1029 }
1030 return c->data;
1031 }
1032
1033 static struct base_data *make_base(struct object_entry *obj,
1034 struct base_data *parent)
1035 {
1036 struct base_data *base = xcalloc(1, sizeof(struct base_data));
1037 base->base = parent;
1038 base->obj = obj;
1039 find_ref_delta_children(&obj->idx.oid,
1040 &base->ref_first, &base->ref_last);
1041 find_ofs_delta_children(obj->idx.offset,
1042 &base->ofs_first, &base->ofs_last);
1043 base->children_remaining = base->ref_last - base->ref_first +
1044 base->ofs_last - base->ofs_first + 2;
1045 return base;
1046 }
1047
1048 static struct base_data *resolve_delta(struct object_entry *delta_obj,
1049 struct base_data *base)
1050 {
1051 void *delta_data, *result_data;
1052 struct base_data *result;
1053 unsigned long result_size;
1054
1055 if (show_stat) {
1056 int i = delta_obj - objects;
1057 int j = base->obj - objects;
1058 obj_stat[i].delta_depth = obj_stat[j].delta_depth + 1;
1059 deepest_delta_lock();
1060 if (deepest_delta < obj_stat[i].delta_depth)
1061 deepest_delta = obj_stat[i].delta_depth;
1062 deepest_delta_unlock();
1063 obj_stat[i].base_object_no = j;
1064 }
1065 delta_data = get_data_from_pack(delta_obj);
1066 assert(base->data);
1067 result_data = patch_delta(base->data, base->size,
1068 delta_data, delta_obj->size, &result_size);
1069 free(delta_data);
1070 if (!result_data)
1071 bad_object(delta_obj->idx.offset, _("failed to apply delta"));
1072 hash_object_file(the_hash_algo, result_data, result_size,
1073 delta_obj->real_type, &delta_obj->idx.oid);
1074 sha1_object(result_data, NULL, result_size, delta_obj->real_type,
1075 &delta_obj->idx.oid);
1076
1077 result = make_base(delta_obj, base);
1078 result->data = result_data;
1079 result->size = result_size;
1080
1081 counter_lock();
1082 nr_resolved_deltas++;
1083 counter_unlock();
1084
1085 return result;
1086 }
1087
1088 static int compare_ofs_delta_entry(const void *a, const void *b)
1089 {
1090 const struct ofs_delta_entry *delta_a = a;
1091 const struct ofs_delta_entry *delta_b = b;
1092
1093 return delta_a->offset < delta_b->offset ? -1 :
1094 delta_a->offset > delta_b->offset ? 1 :
1095 0;
1096 }
1097
1098 static int compare_ref_delta_entry(const void *a, const void *b)
1099 {
1100 const struct ref_delta_entry *delta_a = a;
1101 const struct ref_delta_entry *delta_b = b;
1102
1103 return oidcmp(&delta_a->oid, &delta_b->oid);
1104 }
1105
1106 static void *threaded_second_pass(void *data)
1107 {
1108 if (data)
1109 set_thread_data(data);
1110 for (;;) {
1111 struct base_data *parent = NULL;
1112 struct object_entry *child_obj = NULL;
1113 struct base_data *child = NULL;
1114
1115 counter_lock();
1116 display_progress(progress, nr_resolved_deltas);
1117 counter_unlock();
1118
1119 work_lock();
1120 if (list_empty(&work_head)) {
1121 /*
1122 * Take an object from the object array.
1123 */
1124 while (nr_dispatched < nr_objects &&
1125 is_delta_type(objects[nr_dispatched].type))
1126 nr_dispatched++;
1127 if (nr_dispatched >= nr_objects) {
1128 work_unlock();
1129 break;
1130 }
1131 child_obj = &objects[nr_dispatched++];
1132 } else {
1133 /*
1134 * Peek at the top of the stack, and take a child from
1135 * it.
1136 */
1137 parent = list_first_entry(&work_head, struct base_data,
1138 list);
1139
1140 while (parent->ref_first <= parent->ref_last) {
1141 int offset = ref_deltas[parent->ref_first++].obj_no;
1142 child_obj = objects + offset;
1143 if (child_obj->real_type != OBJ_REF_DELTA) {
1144 child_obj = NULL;
1145 continue;
1146 }
1147 child_obj->real_type = parent->obj->real_type;
1148 break;
1149 }
1150
1151 if (!child_obj && parent->ofs_first <= parent->ofs_last) {
1152 child_obj = objects +
1153 ofs_deltas[parent->ofs_first++].obj_no;
1154 assert(child_obj->real_type == OBJ_OFS_DELTA);
1155 child_obj->real_type = parent->obj->real_type;
1156 }
1157
1158 if (parent->ref_first > parent->ref_last &&
1159 parent->ofs_first > parent->ofs_last) {
1160 /*
1161 * This parent has run out of children, so move
1162 * it to done_head.
1163 */
1164 list_del(&parent->list);
1165 list_add(&parent->list, &done_head);
1166 }
1167
1168 /*
1169 * Ensure that the parent has data, since we will need
1170 * it later.
1171 *
1172 * NEEDSWORK: If parent data needs to be reloaded, this
1173 * prolongs the time that the current thread spends in
1174 * the mutex. A mitigating factor is that parent data
1175 * needs to be reloaded only if the delta base cache
1176 * limit is exceeded, so in the typical case, this does
1177 * not happen.
1178 */
1179 get_base_data(parent);
1180 parent->retain_data++;
1181 }
1182 work_unlock();
1183
1184 if (child_obj) {
1185 if (parent) {
1186 child = resolve_delta(child_obj, parent);
1187 if (!child->children_remaining)
1188 FREE_AND_NULL(child->data);
1189 } else{
1190 child = make_base(child_obj, NULL);
1191 if (child->children_remaining) {
1192 /*
1193 * Since this child has its own delta children,
1194 * we will need this data in the future.
1195 * Inflate now so that future iterations will
1196 * have access to this object's data while
1197 * outside the work mutex.
1198 */
1199 child->data = get_data_from_pack(child_obj);
1200 child->size = child_obj->size;
1201 }
1202 }
1203 }
1204
1205 work_lock();
1206 if (parent)
1207 parent->retain_data--;
1208
1209 if (child && child->data) {
1210 /*
1211 * This child has its own children, so add it to
1212 * work_head.
1213 */
1214 list_add(&child->list, &work_head);
1215 base_cache_used += child->size;
1216 prune_base_data(NULL);
1217 free_base_data(child);
1218 } else if (child) {
1219 /*
1220 * This child does not have its own children. It may be
1221 * the last descendant of its ancestors; free those
1222 * that we can.
1223 */
1224 struct base_data *p = parent;
1225
1226 while (p) {
1227 struct base_data *next_p;
1228
1229 p->children_remaining--;
1230 if (p->children_remaining)
1231 break;
1232
1233 next_p = p->base;
1234 free_base_data(p);
1235 list_del(&p->list);
1236 free(p);
1237
1238 p = next_p;
1239 }
1240 FREE_AND_NULL(child);
1241 }
1242 work_unlock();
1243 }
1244 return NULL;
1245 }
1246
1247 /*
1248 * First pass:
1249 * - find locations of all objects;
1250 * - calculate SHA1 of all non-delta objects;
1251 * - remember base (SHA1 or offset) for all deltas.
1252 */
1253 static void parse_pack_objects(unsigned char *hash)
1254 {
1255 int i, nr_delays = 0;
1256 struct ofs_delta_entry *ofs_delta = ofs_deltas;
1257 struct object_id ref_delta_oid;
1258 struct stat st;
1259 struct git_hash_ctx tmp_ctx;
1260
1261 if (verbose)
1262 progress = start_progress(
1263 the_repository,
1264 progress_title ? progress_title :
1265 from_stdin ? _("Receiving objects") : _("Indexing objects"),
1266 nr_objects);
1267 for (i = 0; i < nr_objects; i++) {
1268 struct object_entry *obj = &objects[i];
1269 void *data = unpack_raw_entry(obj, &ofs_delta->offset,
1270 &ref_delta_oid,
1271 &obj->idx.oid);
1272 obj->real_type = obj->type;
1273 if (obj->type == OBJ_OFS_DELTA) {
1274 nr_ofs_deltas++;
1275 ofs_delta->obj_no = i;
1276 ofs_delta++;
1277 } else if (obj->type == OBJ_REF_DELTA) {
1278 ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
1279 oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
1280 ref_deltas[nr_ref_deltas].obj_no = i;
1281 nr_ref_deltas++;
1282 } else if (!data) {
1283 /* large blobs, check later */
1284 obj->real_type = OBJ_BAD;
1285 nr_delays++;
1286 } else
1287 sha1_object(data, NULL, obj->size, obj->type,
1288 &obj->idx.oid);
1289 free(data);
1290 display_progress(progress, i+1);
1291 }
1292 objects[i].idx.offset = consumed_bytes;
1293 stop_progress(&progress);
1294
1295 /* Check pack integrity */
1296 flush();
1297 the_hash_algo->init_fn(&tmp_ctx);
1298 git_hash_clone(&tmp_ctx, &input_ctx);
1299 git_hash_final(hash, &tmp_ctx);
1300 if (!hasheq(fill(the_hash_algo->rawsz), hash, the_repository->hash_algo))
1301 die(_("pack is corrupted (SHA1 mismatch)"));
1302 use(the_hash_algo->rawsz);
1303
1304 /* If input_fd is a file, we should have reached its end now. */
1305 if (fstat(input_fd, &st))
1306 die_errno(_("cannot fstat packfile"));
1307 if (S_ISREG(st.st_mode) &&
1308 lseek(input_fd, 0, SEEK_CUR) - input_len != st.st_size)
1309 die(_("pack has junk at the end"));
1310
1311 for (i = 0; i < nr_objects; i++) {
1312 struct object_entry *obj = &objects[i];
1313 if (obj->real_type != OBJ_BAD)
1314 continue;
1315 obj->real_type = obj->type;
1316 sha1_object(NULL, obj, obj->size, obj->type,
1317 &obj->idx.oid);
1318 nr_delays--;
1319 }
1320 if (nr_delays)
1321 die(_("confusion beyond insanity in parse_pack_objects()"));
1322 }
1323
1324 /*
1325 * Second pass:
1326 * - for all non-delta objects, look if it is used as a base for
1327 * deltas;
1328 * - if used as a base, uncompress the object and apply all deltas,
1329 * recursively checking if the resulting object is used as a base
1330 * for some more deltas.
1331 */
1332 static void resolve_deltas(struct pack_idx_option *opts)
1333 {
1334 int i;
1335
1336 if (!nr_ofs_deltas && !nr_ref_deltas)
1337 return;
1338
1339 /* Sort deltas by base SHA1/offset for fast searching */
1340 QSORT(ofs_deltas, nr_ofs_deltas, compare_ofs_delta_entry);
1341 QSORT(ref_deltas, nr_ref_deltas, compare_ref_delta_entry);
1342
1343 if (verbose || show_resolving_progress)
1344 progress = start_progress(the_repository,
1345 _("Resolving deltas"),
1346 nr_ref_deltas + nr_ofs_deltas);
1347
1348 nr_dispatched = 0;
1349 base_cache_limit = opts->delta_base_cache_limit * nr_threads;
1350 if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
1351 init_thread();
1352 for (i = 0; i < nr_threads; i++) {
1353 int ret = pthread_create(&thread_data[i].thread, NULL,
1354 threaded_second_pass, thread_data + i);
1355 if (ret)
1356 die(_("unable to create thread: %s"),
1357 strerror(ret));
1358 }
1359 for (i = 0; i < nr_threads; i++)
1360 pthread_join(thread_data[i].thread, NULL);
1361 cleanup_thread();
1362 return;
1363 }
1364 threaded_second_pass(&nothread_data);
1365 }
1366
1367 /*
1368 * Third pass:
1369 * - append objects to convert thin pack to full pack if required
1370 * - write the final pack hash
1371 */
1372 static void fix_unresolved_deltas(struct hashfile *f);
1373 static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
1374 {
1375 if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
1376 stop_progress(&progress);
1377 /* Flush remaining pack final hash. */
1378 flush();
1379 return;
1380 }
1381
1382 if (fix_thin_pack) {
1383 struct hashfile *f;
1384 unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
1385 struct strbuf msg = STRBUF_INIT;
1386 int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
1387 int nr_objects_initial = nr_objects;
1388 if (nr_unresolved <= 0)
1389 die(_("confusion beyond insanity"));
1390 REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
1391 memset(objects + nr_objects + 1, 0,
1392 nr_unresolved * sizeof(*objects));
1393 f = hashfd(the_repository->hash_algo, output_fd, curr_pack);
1394 fix_unresolved_deltas(f);
1395 strbuf_addf(&msg, Q_("completed with %d local object",
1396 "completed with %d local objects",
1397 nr_objects - nr_objects_initial),
1398 nr_objects - nr_objects_initial);
1399 stop_progress_msg(&progress, msg.buf);
1400 strbuf_release(&msg);
1401 finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
1402 hashcpy(read_hash, pack_hash, the_repository->hash_algo);
1403 fixup_pack_header_footer(the_hash_algo, output_fd, pack_hash,
1404 curr_pack, nr_objects,
1405 read_hash, consumed_bytes-the_hash_algo->rawsz);
1406 if (!hasheq(read_hash, tail_hash, the_repository->hash_algo))
1407 die(_("Unexpected tail checksum for %s "
1408 "(disk corruption?)"), curr_pack);
1409 }
1410 if (nr_ofs_deltas + nr_ref_deltas != nr_resolved_deltas)
1411 die(Q_("pack has %d unresolved delta",
1412 "pack has %d unresolved deltas",
1413 nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas),
1414 nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
1415 }
1416
1417 static int write_compressed(struct hashfile *f, void *in, unsigned int size)
1418 {
1419 git_zstream stream;
1420 int status;
1421 unsigned char outbuf[4096];
1422
1423 git_deflate_init(&stream, zlib_compression_level);
1424 stream.next_in = in;
1425 stream.avail_in = size;
1426
1427 do {
1428 stream.next_out = outbuf;
1429 stream.avail_out = sizeof(outbuf);
1430 status = git_deflate(&stream, Z_FINISH);
1431 hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
1432 } while (status == Z_OK);
1433
1434 if (status != Z_STREAM_END)
1435 die(_("unable to deflate appended object (%d)"), status);
1436 size = stream.total_out;
1437 git_deflate_end(&stream);
1438 return size;
1439 }
1440
1441 static struct object_entry *append_obj_to_pack(struct hashfile *f,
1442 const unsigned char *sha1, void *buf,
1443 unsigned long size, enum object_type type)
1444 {
1445 struct object_entry *obj = &objects[nr_objects++];
1446 unsigned char header[10];
1447 unsigned long s = size;
1448 int n = 0;
1449 unsigned char c = (type << 4) | (s & 15);
1450 s >>= 4;
1451 while (s) {
1452 header[n++] = c | 0x80;
1453 c = s & 0x7f;
1454 s >>= 7;
1455 }
1456 header[n++] = c;
1457 crc32_begin(f);
1458 hashwrite(f, header, n);
1459 obj[0].size = size;
1460 obj[0].hdr_size = n;
1461 obj[0].type = type;
1462 obj[0].real_type = type;
1463 obj[1].idx.offset = obj[0].idx.offset + n;
1464 obj[1].idx.offset += write_compressed(f, buf, size);
1465 obj[0].idx.crc32 = crc32_end(f);
1466 hashflush(f);
1467 oidread(&obj->idx.oid, sha1, the_repository->hash_algo);
1468 return obj;
1469 }
1470
1471 static int delta_pos_compare(const void *_a, const void *_b)
1472 {
1473 struct ref_delta_entry *a = *(struct ref_delta_entry **)_a;
1474 struct ref_delta_entry *b = *(struct ref_delta_entry **)_b;
1475 return a->obj_no - b->obj_no;
1476 }
1477
1478 static void fix_unresolved_deltas(struct hashfile *f)
1479 {
1480 struct ref_delta_entry **sorted_by_pos;
1481 int i;
1482
1483 /*
1484 * Since many unresolved deltas may well be themselves base objects
1485 * for more unresolved deltas, we really want to include the
1486 * smallest number of base objects that would cover as much delta
1487 * as possible by picking the
1488 * trunc deltas first, allowing for other deltas to resolve without
1489 * additional base objects. Since most base objects are to be found
1490 * before deltas depending on them, a good heuristic is to start
1491 * resolving deltas in the same order as their position in the pack.
1492 */
1493 ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas);
1494 for (i = 0; i < nr_ref_deltas; i++)
1495 sorted_by_pos[i] = &ref_deltas[i];
1496 QSORT(sorted_by_pos, nr_ref_deltas, delta_pos_compare);
1497
1498 if (repo_has_promisor_remote(the_repository)) {
1499 /*
1500 * Prefetch the delta bases.
1501 */
1502 struct oid_array to_fetch = OID_ARRAY_INIT;
1503 for (i = 0; i < nr_ref_deltas; i++) {
1504 struct ref_delta_entry *d = sorted_by_pos[i];
1505 if (!odb_read_object_info_extended(the_repository->objects,
1506 &d->oid, NULL,
1507 OBJECT_INFO_FOR_PREFETCH))
1508 continue;
1509 oid_array_append(&to_fetch, &d->oid);
1510 }
1511 promisor_remote_get_direct(the_repository,
1512 to_fetch.oid, to_fetch.nr);
1513 oid_array_clear(&to_fetch);
1514 }
1515
1516 for (i = 0; i < nr_ref_deltas; i++) {
1517 struct ref_delta_entry *d = sorted_by_pos[i];
1518 enum object_type type;
1519 void *data;
1520 unsigned long size;
1521
1522 if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
1523 continue;
1524 data = odb_read_object(the_repository->objects, &d->oid,
1525 &type, &size);
1526 if (!data)
1527 continue;
1528
1529 if (check_object_signature(the_repository, &d->oid, data, size,
1530 type) < 0)
1531 die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
1532
1533 /*
1534 * Add this as an object to the objects array and call
1535 * threaded_second_pass() (which will pick up the added
1536 * object).
1537 */
1538 append_obj_to_pack(f, d->oid.hash, data, size, type);
1539 free(data);
1540 threaded_second_pass(NULL);
1541
1542 display_progress(progress, nr_resolved_deltas);
1543 }
1544 free(sorted_by_pos);
1545 }
1546
1547 static const char *derive_filename(const char *pack_name, const char *strip,
1548 const char *suffix, struct strbuf *buf)
1549 {
1550 size_t len;
1551 if (!strip_suffix(pack_name, strip, &len) || !len ||
1552 pack_name[len - 1] != '.')
1553 die(_("packfile name '%s' does not end with '.%s'"),
1554 pack_name, strip);
1555 strbuf_add(buf, pack_name, len);
1556 strbuf_addstr(buf, suffix);
1557 return buf->buf;
1558 }
1559
1560 static void write_special_file(const char *suffix, const char *msg,
1561 const char *pack_name, const unsigned char *hash,
1562 const char **report)
1563 {
1564 struct strbuf name_buf = STRBUF_INIT;
1565 const char *filename;
1566 int fd;
1567 int msg_len = strlen(msg);
1568
1569 if (pack_name)
1570 filename = derive_filename(pack_name, "pack", suffix, &name_buf);
1571 else
1572 filename = odb_pack_name(the_repository, &name_buf, hash, suffix);
1573
1574 fd = safe_create_file_with_leading_directories(the_repository, filename);
1575 if (fd < 0) {
1576 if (errno != EEXIST)
1577 die_errno(_("cannot write %s file '%s'"),
1578 suffix, filename);
1579 } else {
1580 if (msg_len > 0) {
1581 write_or_die(fd, msg, msg_len);
1582 write_or_die(fd, "\n", 1);
1583 }
1584 if (close(fd) != 0)
1585 die_errno(_("cannot close written %s file '%s'"),
1586 suffix, filename);
1587 if (report)
1588 *report = suffix;
1589 }
1590 strbuf_release(&name_buf);
1591 }
1592
1593 static void rename_tmp_packfile(const char **final_name,
1594 const char *curr_name,
1595 struct strbuf *name, unsigned char *hash,
1596 const char *ext, int make_read_only_if_same)
1597 {
1598 if (!*final_name || strcmp(*final_name, curr_name)) {
1599 if (!*final_name)
1600 *final_name = odb_pack_name(the_repository, name, hash, ext);
1601 if (finalize_object_file(the_repository, curr_name, *final_name))
1602 die(_("unable to rename temporary '*.%s' file to '%s'"),
1603 ext, *final_name);
1604 } else if (make_read_only_if_same) {
1605 chmod(*final_name, 0444);
1606 }
1607 }
1608
1609 static void final(const char *final_pack_name, const char *curr_pack_name,
1610 const char *final_index_name, const char *curr_index_name,
1611 const char *final_rev_index_name, const char *curr_rev_index_name,
1612 const char *keep_msg, const char *promisor_msg,
1613 unsigned char *hash)
1614 {
1615 const char *report = "pack";
1616 struct strbuf pack_name = STRBUF_INIT;
1617 struct strbuf index_name = STRBUF_INIT;
1618 struct strbuf rev_index_name = STRBUF_INIT;
1619
1620 if (!from_stdin) {
1621 close(input_fd);
1622 } else {
1623 fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name);
1624 if (close(output_fd))
1625 die_errno(_("error while closing pack file"));
1626 }
1627
1628 if (keep_msg)
1629 write_special_file("keep", keep_msg, final_pack_name, hash,
1630 &report);
1631 if (promisor_msg)
1632 write_special_file("promisor", promisor_msg, final_pack_name,
1633 hash, NULL);
1634
1635 rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name,
1636 hash, "pack", from_stdin);
1637 if (curr_rev_index_name)
1638 rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name,
1639 &rev_index_name, hash, "rev", 1);
1640 rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
1641 hash, "idx", 1);
1642
1643 if (do_fsck_object)
1644 packfile_store_load_pack(the_repository->objects->packfiles,
1645 final_index_name, 0);
1646
1647 if (!from_stdin) {
1648 printf("%s\n", hash_to_hex(hash));
1649 } else {
1650 struct strbuf buf = STRBUF_INIT;
1651
1652 strbuf_addf(&buf, "%s\t%s\n", report, hash_to_hex(hash));
1653 write_or_die(1, buf.buf, buf.len);
1654 strbuf_release(&buf);
1655
1656 /* Write the last part of the buffer to stdout */
1657 write_in_full(1, input_buffer + input_offset, input_len);
1658 }
1659
1660 strbuf_release(&rev_index_name);
1661 strbuf_release(&index_name);
1662 strbuf_release(&pack_name);
1663 }
1664
1665 static int git_index_pack_config(const char *k, const char *v,
1666 const struct config_context *ctx, void *cb)
1667 {
1668 struct pack_idx_option *opts = cb;
1669
1670 if (!strcmp(k, "pack.indexversion")) {
1671 opts->version = git_config_int(k, v, ctx->kvi);
1672 if (opts->version > 2)
1673 die(_("bad pack.indexVersion=%"PRIu32), opts->version);
1674 return 0;
1675 }
1676 if (!strcmp(k, "pack.threads")) {
1677 nr_threads = git_config_int(k, v, ctx->kvi);
1678 if (nr_threads < 0)
1679 die(_("invalid number of threads specified (%d)"),
1680 nr_threads);
1681 if (!HAVE_THREADS && nr_threads != 1) {
1682 warning(_("no threads support, ignoring %s"), k);
1683 nr_threads = 1;
1684 }
1685 return 0;
1686 }
1687 if (!strcmp(k, "pack.writereverseindex")) {
1688 if (git_config_bool(k, v))
1689 opts->flags |= WRITE_REV;
1690 else
1691 opts->flags &= ~WRITE_REV;
1692 }
1693 if (!strcmp(k, "core.deltabasecachelimit")) {
1694 opts->delta_base_cache_limit = git_config_ulong(k, v, ctx->kvi);
1695 return 0;
1696 }
1697 return git_default_config(k, v, ctx, cb);
1698 }
1699
1700 static int cmp_uint32(const void *a_, const void *b_)
1701 {
1702 uint32_t a = *((uint32_t *)a_);
1703 uint32_t b = *((uint32_t *)b_);
1704
1705 return (a < b) ? -1 : (a != b);
1706 }
1707
1708 static void read_v2_anomalous_offsets(struct packed_git *p,
1709 struct pack_idx_option *opts)
1710 {
1711 const uint32_t *idx1, *idx2;
1712 uint32_t i;
1713
1714 /* The address of the 4-byte offset table */
1715 idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset))
1716 + (size_t)p->num_objects /* CRC32 table */
1717 );
1718
1719 /* The address of the 8-byte offset table */
1720 idx2 = idx1 + p->num_objects;
1721
1722 for (i = 0; i < p->num_objects; i++) {
1723 uint32_t off = ntohl(idx1[i]);
1724 if (!(off & 0x80000000))
1725 continue;
1726 off = off & 0x7fffffff;
1727 check_pack_index_ptr(p, &idx2[off * 2]);
1728 if (idx2[off * 2])
1729 continue;
1730 /*
1731 * The real offset is ntohl(idx2[off * 2]) in high 4
1732 * octets, and ntohl(idx2[off * 2 + 1]) in low 4
1733 * octets. But idx2[off * 2] is Zero!!!
1734 */
1735 ALLOC_GROW(opts->anomaly, opts->anomaly_nr + 1, opts->anomaly_alloc);
1736 opts->anomaly[opts->anomaly_nr++] = ntohl(idx2[off * 2 + 1]);
1737 }
1738
1739 QSORT(opts->anomaly, opts->anomaly_nr, cmp_uint32);
1740 }
1741
1742 static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
1743 {
1744 struct packed_git *p = add_packed_git(the_repository, pack_name,
1745 strlen(pack_name), 1);
1746
1747 if (!p)
1748 die(_("Cannot open existing pack file '%s'"), pack_name);
1749 if (open_pack_index(p))
1750 die(_("Cannot open existing pack idx file for '%s'"), pack_name);
1751
1752 /* Read the attributes from the existing idx file */
1753 opts->version = p->index_version;
1754
1755 if (opts->version == 2)
1756 read_v2_anomalous_offsets(p, opts);
1757
1758 /*
1759 * Get rid of the idx file as we do not need it anymore.
1760 * NEEDSWORK: extract this bit from free_pack_by_name() in
1761 * object-file.c, perhaps? It shouldn't matter very much as we
1762 * know we haven't installed this pack (hence we never have
1763 * read anything from it).
1764 */
1765 close_pack_index(p);
1766 free(p);
1767 }
1768
1769 static void show_pack_info(int stat_only)
1770 {
1771 int i, baseobjects = nr_objects - nr_ref_deltas - nr_ofs_deltas;
1772 unsigned long *chain_histogram = NULL;
1773
1774 if (deepest_delta)
1775 CALLOC_ARRAY(chain_histogram, deepest_delta);
1776
1777 for (i = 0; i < nr_objects; i++) {
1778 struct object_entry *obj = &objects[i];
1779
1780 if (is_delta_type(obj->type))
1781 chain_histogram[obj_stat[i].delta_depth - 1]++;
1782 if (stat_only)
1783 continue;
1784 printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX,
1785 oid_to_hex(&obj->idx.oid),
1786 type_name(obj->real_type), (uintmax_t)obj->size,
1787 (uintmax_t)(obj[1].idx.offset - obj->idx.offset),
1788 (uintmax_t)obj->idx.offset);
1789 if (is_delta_type(obj->type)) {
1790 struct object_entry *bobj = &objects[obj_stat[i].base_object_no];
1791 printf(" %u %s", obj_stat[i].delta_depth,
1792 oid_to_hex(&bobj->idx.oid));
1793 }
1794 putchar('\n');
1795 }
1796
1797 if (baseobjects)
1798 printf_ln(Q_("non delta: %d object",
1799 "non delta: %d objects",
1800 baseobjects),
1801 baseobjects);
1802 for (i = 0; i < deepest_delta; i++) {
1803 if (!chain_histogram[i])
1804 continue;
1805 printf_ln(Q_("chain length = %d: %lu object",
1806 "chain length = %d: %lu objects",
1807 chain_histogram[i]),
1808 i + 1,
1809 chain_histogram[i]);
1810 }
1811 free(chain_histogram);
1812 }
1813
1814 static void repack_local_links(void)
1815 {
1816 struct child_process cmd = CHILD_PROCESS_INIT;
1817 FILE *out;
1818 struct strbuf line = STRBUF_INIT;
1819 struct oidset_iter iter;
1820 struct object_id *oid;
1821 char *base_name = NULL;
1822
1823 if (!oidset_size(&outgoing_links))
1824 return;
1825
1826 oidset_iter_init(&outgoing_links, &iter);
1827 while ((oid = oidset_iter_next(&iter))) {
1828 struct object_info info = OBJECT_INFO_INIT;
1829 if (odb_read_object_info_extended(the_repository->objects, oid, &info, 0))
1830 /* Missing; assume it is a promisor object */
1831 continue;
1832 if (info.whence == OI_PACKED && info.u.packed.pack->pack_promisor)
1833 continue;
1834
1835 if (!cmd.args.nr) {
1836 base_name = mkpathdup(
1837 "%s/pack/pack",
1838 repo_get_object_directory(the_repository));
1839 strvec_push(&cmd.args, "pack-objects");
1840 strvec_push(&cmd.args,
1841 "--exclude-promisor-objects-best-effort");
1842 strvec_push(&cmd.args, base_name);
1843 cmd.git_cmd = 1;
1844 cmd.in = -1;
1845 cmd.out = -1;
1846 if (start_command(&cmd))
1847 die(_("could not start pack-objects to repack local links"));
1848 }
1849
1850 if (write_in_full(cmd.in, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
1851 write_in_full(cmd.in, "\n", 1) < 0)
1852 die(_("failed to feed local object to pack-objects"));
1853 }
1854
1855 if (!cmd.args.nr)
1856 return;
1857
1858 close(cmd.in);
1859
1860 out = xfdopen(cmd.out, "r");
1861 while (strbuf_getline_lf(&line, out) != EOF) {
1862 unsigned char binary[GIT_MAX_RAWSZ];
1863 if (line.len != the_hash_algo->hexsz ||
1864 !hex_to_bytes(binary, line.buf, line.len))
1865 die(_("index-pack: Expecting full hex object ID lines only from pack-objects."));
1866
1867 /*
1868 * pack-objects creates the .pack and .idx files, but not the
1869 * .promisor file. Create the .promisor file, which is empty.
1870 */
1871 write_special_file("promisor", "", NULL, binary, NULL);
1872 }
1873
1874 fclose(out);
1875 if (finish_command(&cmd))
1876 die(_("could not finish pack-objects to repack local links"));
1877 strbuf_release(&line);
1878 free(base_name);
1879 }
1880
1881 int cmd_index_pack(int argc,
1882 const char **argv,
1883 const char *prefix,
1884 struct repository *repo UNUSED)
1885 {
1886 int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index;
1887 const char *curr_index;
1888 char *curr_rev_index = NULL;
1889 const char *index_name = NULL, *pack_name = NULL, *rev_index_name = NULL;
1890 const char *keep_msg = NULL;
1891 const char *promisor_msg = NULL;
1892 struct strbuf index_name_buf = STRBUF_INIT;
1893 struct strbuf rev_index_name_buf = STRBUF_INIT;
1894 struct pack_idx_entry **idx_objects;
1895 struct pack_idx_option opts;
1896 unsigned char pack_hash[GIT_MAX_RAWSZ];
1897 unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
1898 int report_end_of_input = 0;
1899 int hash_algo = 0;
1900
1901 /*
1902 * index-pack never needs to fetch missing objects except when
1903 * REF_DELTA bases are missing (which are explicitly handled). It only
1904 * accesses the repo to do hash collision checks and to check which
1905 * REF_DELTA bases need to be fetched.
1906 */
1907 fetch_if_missing = 0;
1908
1909 show_usage_if_asked(argc, argv, index_pack_usage);
1910
1911 disable_replace_refs();
1912 fsck_options.walk = mark_link;
1913
1914 reset_pack_idx_option(&opts);
1915 opts.flags |= WRITE_REV;
1916 repo_config(the_repository, git_index_pack_config, &opts);
1917 if (prefix && chdir(prefix))
1918 die(_("Cannot come back to cwd"));
1919
1920 if (git_env_bool(GIT_TEST_NO_WRITE_REV_INDEX, 0))
1921 rev_index = 0;
1922 else
1923 rev_index = !!(opts.flags & (WRITE_REV_VERIFY | WRITE_REV));
1924
1925 for (i = 1; i < argc; i++) {
1926 const char *arg = argv[i];
1927
1928 if (*arg == '-') {
1929 if (!strcmp(arg, "--stdin")) {
1930 from_stdin = 1;
1931 } else if (!strcmp(arg, "--fix-thin")) {
1932 fix_thin_pack = 1;
1933 } else if (skip_to_optional_arg(arg, "--strict", &arg)) {
1934 strict = 1;
1935 do_fsck_object = 1;
1936 fsck_set_msg_types(&fsck_options, arg);
1937 } else if (!strcmp(arg, "--check-self-contained-and-connected")) {
1938 strict = 1;
1939 check_self_contained_and_connected = 1;
1940 } else if (skip_to_optional_arg(arg, "--fsck-objects", &arg)) {
1941 do_fsck_object = 1;
1942 fsck_set_msg_types(&fsck_options, arg);
1943 } else if (!strcmp(arg, "--verify")) {
1944 verify = 1;
1945 } else if (!strcmp(arg, "--verify-stat")) {
1946 verify = 1;
1947 show_stat = 1;
1948 } else if (!strcmp(arg, "--verify-stat-only")) {
1949 verify = 1;
1950 show_stat = 1;
1951 stat_only = 1;
1952 } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
1953 ; /* nothing to do */
1954 } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
1955 record_outgoing_links = 1;
1956 } else if (starts_with(arg, "--threads=")) {
1957 char *end;
1958 nr_threads = strtoul(arg+10, &end, 0);
1959 if (!arg[10] || *end || nr_threads < 0)
1960 usage(index_pack_usage);
1961 if (!HAVE_THREADS && nr_threads != 1) {
1962 warning(_("no threads support, ignoring %s"), arg);
1963 nr_threads = 1;
1964 }
1965 } else if (skip_prefix(arg, "--pack_header=", &arg)) {
1966 if (parse_pack_header_option(arg,
1967 input_buffer,
1968 &input_len) < 0)
1969 die(_("bad --pack_header: %s"), arg);
1970 } else if (!strcmp(arg, "-v")) {
1971 verbose = 1;
1972 } else if (!strcmp(arg, "--progress-title")) {
1973 if (progress_title || (i+1) >= argc)
1974 usage(index_pack_usage);
1975 progress_title = argv[++i];
1976 } else if (!strcmp(arg, "--show-resolving-progress")) {
1977 show_resolving_progress = 1;
1978 } else if (!strcmp(arg, "--report-end-of-input")) {
1979 report_end_of_input = 1;
1980 } else if (!strcmp(arg, "-o")) {
1981 if (index_name || (i+1) >= argc)
1982 usage(index_pack_usage);
1983 index_name = argv[++i];
1984 } else if (starts_with(arg, "--index-version=")) {
1985 char *c;
1986 opts.version = strtoul(arg + 16, &c, 10);
1987 if (opts.version > 2)
1988 die(_("bad %s"), arg);
1989 if (*c == ',')
1990 opts.off32_limit = strtoul(c+1, &c, 0);
1991 if (*c || opts.off32_limit & 0x80000000)
1992 die(_("bad %s"), arg);
1993 } else if (skip_prefix(arg, "--max-input-size=", &arg)) {
1994 max_input_size = strtoumax(arg, NULL, 10);
1995 } else if (skip_prefix(arg, "--object-format=", &arg)) {
1996 hash_algo = hash_algo_by_name(arg);
1997 if (hash_algo == GIT_HASH_UNKNOWN)
1998 die(_("unknown hash algorithm '%s'"), arg);
1999 repo_set_hash_algo(the_repository, hash_algo);
2000 } else if (!strcmp(arg, "--rev-index")) {
2001 rev_index = 1;
2002 } else if (!strcmp(arg, "--no-rev-index")) {
2003 rev_index = 0;
2004 } else
2005 usage(index_pack_usage);
2006 continue;
2007 }
2008
2009 if (pack_name)
2010 usage(index_pack_usage);
2011 pack_name = arg;
2012 }
2013
2014 if (!pack_name && !from_stdin)
2015 usage(index_pack_usage);
2016 if (fix_thin_pack && !from_stdin)
2017 die(_("the option '%s' requires '%s'"), "--fix-thin", "--stdin");
2018 if (promisor_msg && pack_name)
2019 die(_("--promisor cannot be used with a pack name"));
2020 if (from_stdin && !startup_info->have_repository)
2021 die(_("--stdin requires a git repository"));
2022 if (from_stdin && hash_algo)
2023 die(_("options '%s' and '%s' cannot be used together"), "--object-format", "--stdin");
2024 if (!index_name && pack_name)
2025 index_name = derive_filename(pack_name, "pack", "idx", &index_name_buf);
2026
2027 /*
2028 * Packfiles and indices do not carry enough information to be able to
2029 * identify their object hash. So when we are neither in a repository
2030 * nor has the user told us which object hash to use we have no other
2031 * choice but to guess the object hash.
2032 */
2033 if (!the_repository->hash_algo)
2034 repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
2035
2036 opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY);
2037 if (rev_index) {
2038 opts.flags |= verify ? WRITE_REV_VERIFY : WRITE_REV;
2039 if (index_name)
2040 rev_index_name = derive_filename(index_name,
2041 "idx", "rev",
2042 &rev_index_name_buf);
2043 }
2044
2045 if (verify) {
2046 if (!index_name)
2047 die(_("--verify with no packfile name given"));
2048 read_idx_option(&opts, index_name);
2049 opts.flags |= WRITE_IDX_VERIFY | WRITE_IDX_STRICT;
2050 }
2051 if (strict)
2052 opts.flags |= WRITE_IDX_STRICT;
2053
2054 if (HAVE_THREADS && !nr_threads) {
2055 nr_threads = online_cpus();
2056 /*
2057 * Experiments show that going above 20 threads doesn't help,
2058 * no matter how many cores you have. Below that, we tend to
2059 * max at half the number of online_cpus(), presumably because
2060 * half of those are hyperthreads rather than full cores. We'll
2061 * never reduce the level below "3", though, to match a
2062 * historical value that nobody complained about.
2063 */
2064 if (nr_threads < 4)
2065 ; /* too few cores to consider capping */
2066 else if (nr_threads < 6)
2067 nr_threads = 3; /* historic cap */
2068 else if (nr_threads < 40)
2069 nr_threads /= 2;
2070 else
2071 nr_threads = 20; /* hard cap */
2072 }
2073
2074 curr_pack = open_pack_file(pack_name);
2075 parse_pack_header();
2076 CALLOC_ARRAY(objects, st_add(nr_objects, 1));
2077 if (show_stat)
2078 CALLOC_ARRAY(obj_stat, st_add(nr_objects, 1));
2079 CALLOC_ARRAY(ofs_deltas, nr_objects);
2080 parse_pack_objects(pack_hash);
2081 if (report_end_of_input)
2082 write_in_full(2, "\0", 1);
2083 resolve_deltas(&opts);
2084 conclude_pack(fix_thin_pack, curr_pack, pack_hash);
2085 free(ofs_deltas);
2086 free(ref_deltas);
2087 if (strict)
2088 foreign_nr = check_objects();
2089
2090 if (show_stat)
2091 show_pack_info(stat_only);
2092
2093 ALLOC_ARRAY(idx_objects, nr_objects);
2094 for (i = 0; i < nr_objects; i++)
2095 idx_objects[i] = &objects[i].idx;
2096 curr_index = write_idx_file(the_repository, index_name, idx_objects,
2097 nr_objects, &opts, pack_hash);
2098 if (rev_index)
2099 curr_rev_index = write_rev_file(the_repository, rev_index_name,
2100 idx_objects, nr_objects,
2101 pack_hash, opts.flags);
2102 free(idx_objects);
2103
2104 if (!verify)
2105 final(pack_name, curr_pack,
2106 index_name, curr_index,
2107 rev_index_name, curr_rev_index,
2108 keep_msg, promisor_msg,
2109 pack_hash);
2110 else
2111 close(input_fd);
2112
2113 if (do_fsck_object && fsck_finish(&fsck_options))
2114 die(_("fsck error in pack objects"));
2115
2116 free(opts.anomaly);
2117 free(objects);
2118 strbuf_release(&index_name_buf);
2119 strbuf_release(&rev_index_name_buf);
2120 if (!pack_name)
2121 free((void *) curr_pack);
2122 if (!index_name)
2123 free((void *) curr_index);
2124 free(curr_rev_index);
2125
2126 repack_local_links();
2127
2128 /*
2129 * Let the caller know this pack is not self contained
2130 */
2131 if (check_self_contained_and_connected && foreign_nr)
2132 return 1;
2133
2134 return 0;
2135 }