]>
Commit | Line | Data |
---|---|---|
1 | #define USE_THE_REPOSITORY_VARIABLE | |
2 | #define DISABLE_SIGN_COMPARE_WARNINGS | |
3 | ||
4 | #include "builtin.h" | |
5 | #include "config.h" | |
6 | #include "delta.h" | |
7 | #include "environment.h" | |
8 | #include "gettext.h" | |
9 | #include "hex.h" | |
10 | #include "pack.h" | |
11 | #include "csum-file.h" | |
12 | #include "blob.h" | |
13 | #include "commit.h" | |
14 | #include "tag.h" | |
15 | #include "tree.h" | |
16 | #include "progress.h" | |
17 | #include "fsck.h" | |
18 | #include "strbuf.h" | |
19 | #include "streaming.h" | |
20 | #include "thread-utils.h" | |
21 | #include "packfile.h" | |
22 | #include "pack-revindex.h" | |
23 | #include "object-file.h" | |
24 | #include "object-store.h" | |
25 | #include "oid-array.h" | |
26 | #include "oidset.h" | |
27 | #include "path.h" | |
28 | #include "replace-object.h" | |
29 | #include "tree-walk.h" | |
30 | #include "promisor-remote.h" | |
31 | #include "run-command.h" | |
32 | #include "setup.h" | |
33 | #include "strvec.h" | |
34 | ||
35 | static const char index_pack_usage[] = | |
36 | "git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--[no-]rev-index] [--verify] [--strict[=<msg-id>=<severity>...]] [--fsck-objects[=<msg-id>=<severity>...]] (<pack-file> | --stdin [--fix-thin] [<pack-file>])"; | |
37 | ||
38 | struct object_entry { | |
39 | struct pack_idx_entry idx; | |
40 | unsigned long size; | |
41 | unsigned char hdr_size; | |
42 | signed char type; | |
43 | signed char real_type; | |
44 | }; | |
45 | ||
46 | struct object_stat { | |
47 | unsigned delta_depth; | |
48 | int base_object_no; | |
49 | }; | |
50 | ||
51 | struct base_data { | |
52 | /* Initialized by make_base(). */ | |
53 | struct base_data *base; | |
54 | struct object_entry *obj; | |
55 | int ref_first, ref_last; | |
56 | int ofs_first, ofs_last; | |
57 | /* | |
58 | * Threads should increment retain_data if they are about to call | |
59 | * patch_delta() using this struct's data as a base, and decrement this | |
60 | * when they are done. While retain_data is nonzero, this struct's data | |
61 | * will not be freed even if the delta base cache limit is exceeded. | |
62 | */ | |
63 | int retain_data; | |
64 | /* | |
65 | * The number of direct children that have not been fully processed | |
66 | * (entered work_head, entered done_head, left done_head). When this | |
67 | * number reaches zero, this struct base_data can be freed. | |
68 | */ | |
69 | int children_remaining; | |
70 | ||
71 | /* Not initialized by make_base(). */ | |
72 | struct list_head list; | |
73 | void *data; | |
74 | unsigned long size; | |
75 | }; | |
76 | ||
77 | /* | |
78 | * Stack of struct base_data that have unprocessed children. | |
79 | * threaded_second_pass() uses this as a source of work (the other being the | |
80 | * objects array). | |
81 | * | |
82 | * Guarded by work_mutex. | |
83 | */ | |
84 | static LIST_HEAD(work_head); | |
85 | ||
86 | /* | |
87 | * Stack of struct base_data that have children, all of whom have been | |
88 | * processed or are being processed, and at least one child is being processed. | |
89 | * These struct base_data must be kept around until the last child is | |
90 | * processed. | |
91 | * | |
92 | * Guarded by work_mutex. | |
93 | */ | |
94 | static LIST_HEAD(done_head); | |
95 | ||
96 | /* | |
97 | * All threads share one delta base cache. | |
98 | * | |
99 | * base_cache_used is guarded by work_mutex, and base_cache_limit is read-only | |
100 | * in a thread. | |
101 | */ | |
102 | static size_t base_cache_used; | |
103 | static size_t base_cache_limit; | |
104 | ||
105 | struct thread_local_data { | |
106 | pthread_t thread; | |
107 | int pack_fd; | |
108 | }; | |
109 | ||
110 | /* Remember to update object flag allocation in object.h */ | |
111 | #define FLAG_LINK (1u<<20) | |
112 | #define FLAG_CHECKED (1u<<21) | |
113 | ||
114 | struct ofs_delta_entry { | |
115 | off_t offset; | |
116 | int obj_no; | |
117 | }; | |
118 | ||
119 | struct ref_delta_entry { | |
120 | struct object_id oid; | |
121 | int obj_no; | |
122 | }; | |
123 | ||
124 | static struct object_entry *objects; | |
125 | static struct object_stat *obj_stat; | |
126 | static struct ofs_delta_entry *ofs_deltas; | |
127 | static struct ref_delta_entry *ref_deltas; | |
128 | static struct thread_local_data nothread_data; | |
129 | static int nr_objects; | |
130 | static int nr_ofs_deltas; | |
131 | static int nr_ref_deltas; | |
132 | static int ref_deltas_alloc; | |
133 | static int nr_resolved_deltas; | |
134 | static int nr_threads; | |
135 | ||
136 | static int from_stdin; | |
137 | static int strict; | |
138 | static int do_fsck_object; | |
139 | static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES; | |
140 | static int verbose; | |
141 | static const char *progress_title; | |
142 | static int show_resolving_progress; | |
143 | static int show_stat; | |
144 | static int check_self_contained_and_connected; | |
145 | ||
146 | static struct progress *progress; | |
147 | ||
148 | /* We always read in 4kB chunks. */ | |
149 | static unsigned char input_buffer[4096]; | |
150 | static unsigned int input_offset, input_len; | |
151 | static off_t consumed_bytes; | |
152 | static off_t max_input_size; | |
153 | static unsigned deepest_delta; | |
154 | static struct git_hash_ctx input_ctx; | |
155 | static uint32_t input_crc32; | |
156 | static int input_fd, output_fd; | |
157 | static const char *curr_pack; | |
158 | ||
159 | /* | |
160 | * outgoing_links is guarded by read_mutex, and record_outgoing_links is | |
161 | * read-only in a thread. | |
162 | */ | |
163 | static struct oidset outgoing_links = OIDSET_INIT; | |
164 | static int record_outgoing_links; | |
165 | ||
166 | static struct thread_local_data *thread_data; | |
167 | static int nr_dispatched; | |
168 | static int threads_active; | |
169 | ||
170 | static pthread_mutex_t read_mutex; | |
171 | #define read_lock() lock_mutex(&read_mutex) | |
172 | #define read_unlock() unlock_mutex(&read_mutex) | |
173 | ||
174 | static pthread_mutex_t counter_mutex; | |
175 | #define counter_lock() lock_mutex(&counter_mutex) | |
176 | #define counter_unlock() unlock_mutex(&counter_mutex) | |
177 | ||
178 | static pthread_mutex_t work_mutex; | |
179 | #define work_lock() lock_mutex(&work_mutex) | |
180 | #define work_unlock() unlock_mutex(&work_mutex) | |
181 | ||
182 | static pthread_mutex_t deepest_delta_mutex; | |
183 | #define deepest_delta_lock() lock_mutex(&deepest_delta_mutex) | |
184 | #define deepest_delta_unlock() unlock_mutex(&deepest_delta_mutex) | |
185 | ||
186 | static pthread_key_t key; | |
187 | ||
188 | static inline void lock_mutex(pthread_mutex_t *mutex) | |
189 | { | |
190 | if (threads_active) | |
191 | pthread_mutex_lock(mutex); | |
192 | } | |
193 | ||
194 | static inline void unlock_mutex(pthread_mutex_t *mutex) | |
195 | { | |
196 | if (threads_active) | |
197 | pthread_mutex_unlock(mutex); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Mutex and conditional variable can't be statically-initialized on Windows. | |
202 | */ | |
203 | static void init_thread(void) | |
204 | { | |
205 | int i; | |
206 | init_recursive_mutex(&read_mutex); | |
207 | pthread_mutex_init(&counter_mutex, NULL); | |
208 | pthread_mutex_init(&work_mutex, NULL); | |
209 | if (show_stat) | |
210 | pthread_mutex_init(&deepest_delta_mutex, NULL); | |
211 | pthread_key_create(&key, NULL); | |
212 | CALLOC_ARRAY(thread_data, nr_threads); | |
213 | for (i = 0; i < nr_threads; i++) { | |
214 | thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY); | |
215 | } | |
216 | ||
217 | threads_active = 1; | |
218 | } | |
219 | ||
220 | static void cleanup_thread(void) | |
221 | { | |
222 | int i; | |
223 | if (!threads_active) | |
224 | return; | |
225 | threads_active = 0; | |
226 | pthread_mutex_destroy(&read_mutex); | |
227 | pthread_mutex_destroy(&counter_mutex); | |
228 | pthread_mutex_destroy(&work_mutex); | |
229 | if (show_stat) | |
230 | pthread_mutex_destroy(&deepest_delta_mutex); | |
231 | for (i = 0; i < nr_threads; i++) | |
232 | close(thread_data[i].pack_fd); | |
233 | pthread_key_delete(key); | |
234 | free(thread_data); | |
235 | } | |
236 | ||
237 | static int mark_link(struct object *obj, enum object_type type, | |
238 | void *data UNUSED, | |
239 | struct fsck_options *options UNUSED) | |
240 | { | |
241 | if (!obj) | |
242 | return -1; | |
243 | ||
244 | if (type != OBJ_ANY && obj->type != type) | |
245 | die(_("object type mismatch at %s"), oid_to_hex(&obj->oid)); | |
246 | ||
247 | obj->flags |= FLAG_LINK; | |
248 | return 0; | |
249 | } | |
250 | ||
251 | /* The content of each linked object must have been checked | |
252 | or it must be already present in the object database */ | |
253 | static unsigned check_object(struct object *obj) | |
254 | { | |
255 | if (!obj) | |
256 | return 0; | |
257 | ||
258 | if (!(obj->flags & FLAG_LINK)) | |
259 | return 0; | |
260 | ||
261 | if (!(obj->flags & FLAG_CHECKED)) { | |
262 | unsigned long size; | |
263 | int type = oid_object_info(the_repository, &obj->oid, &size); | |
264 | if (type <= 0) | |
265 | die(_("did not receive expected object %s"), | |
266 | oid_to_hex(&obj->oid)); | |
267 | if (type != obj->type) | |
268 | die(_("object %s: expected type %s, found %s"), | |
269 | oid_to_hex(&obj->oid), | |
270 | type_name(obj->type), type_name(type)); | |
271 | obj->flags |= FLAG_CHECKED; | |
272 | return 1; | |
273 | } | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | static unsigned check_objects(void) | |
279 | { | |
280 | unsigned i, max, foreign_nr = 0; | |
281 | ||
282 | max = get_max_object_index(the_repository); | |
283 | ||
284 | if (verbose) | |
285 | progress = start_delayed_progress(the_repository, | |
286 | _("Checking objects"), max); | |
287 | ||
288 | for (i = 0; i < max; i++) { | |
289 | foreign_nr += check_object(get_indexed_object(the_repository, i)); | |
290 | display_progress(progress, i + 1); | |
291 | } | |
292 | ||
293 | stop_progress(&progress); | |
294 | return foreign_nr; | |
295 | } | |
296 | ||
297 | ||
298 | /* Discard current buffer used content. */ | |
299 | static void flush(void) | |
300 | { | |
301 | if (input_offset) { | |
302 | if (output_fd >= 0) | |
303 | write_or_die(output_fd, input_buffer, input_offset); | |
304 | git_hash_update(&input_ctx, input_buffer, input_offset); | |
305 | memmove(input_buffer, input_buffer + input_offset, input_len); | |
306 | input_offset = 0; | |
307 | } | |
308 | } | |
309 | ||
310 | /* | |
311 | * Make sure at least "min" bytes are available in the buffer, and | |
312 | * return the pointer to the buffer. | |
313 | */ | |
314 | static void *fill(int min) | |
315 | { | |
316 | if (min <= input_len) | |
317 | return input_buffer + input_offset; | |
318 | if (min > sizeof(input_buffer)) | |
319 | die(Q_("cannot fill %d byte", | |
320 | "cannot fill %d bytes", | |
321 | min), | |
322 | min); | |
323 | flush(); | |
324 | do { | |
325 | ssize_t ret = xread(input_fd, input_buffer + input_len, | |
326 | sizeof(input_buffer) - input_len); | |
327 | if (ret <= 0) { | |
328 | if (!ret) | |
329 | die(_("early EOF")); | |
330 | die_errno(_("read error on input")); | |
331 | } | |
332 | input_len += ret; | |
333 | if (from_stdin) | |
334 | display_throughput(progress, consumed_bytes + input_len); | |
335 | } while (input_len < min); | |
336 | return input_buffer; | |
337 | } | |
338 | ||
339 | static void use(int bytes) | |
340 | { | |
341 | if (bytes > input_len) | |
342 | die(_("used more bytes than were available")); | |
343 | input_crc32 = crc32(input_crc32, input_buffer + input_offset, bytes); | |
344 | input_len -= bytes; | |
345 | input_offset += bytes; | |
346 | ||
347 | /* make sure off_t is sufficiently large not to wrap */ | |
348 | if (signed_add_overflows(consumed_bytes, bytes)) | |
349 | die(_("pack too large for current definition of off_t")); | |
350 | consumed_bytes += bytes; | |
351 | if (max_input_size && consumed_bytes > max_input_size) { | |
352 | struct strbuf size_limit = STRBUF_INIT; | |
353 | strbuf_humanise_bytes(&size_limit, max_input_size); | |
354 | die(_("pack exceeds maximum allowed size (%s)"), | |
355 | size_limit.buf); | |
356 | } | |
357 | } | |
358 | ||
359 | static const char *open_pack_file(const char *pack_name) | |
360 | { | |
361 | if (from_stdin) { | |
362 | input_fd = 0; | |
363 | if (!pack_name) { | |
364 | struct strbuf tmp_file = STRBUF_INIT; | |
365 | output_fd = odb_mkstemp(&tmp_file, | |
366 | "pack/tmp_pack_XXXXXX"); | |
367 | pack_name = strbuf_detach(&tmp_file, NULL); | |
368 | } else { | |
369 | output_fd = xopen(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600); | |
370 | } | |
371 | nothread_data.pack_fd = output_fd; | |
372 | } else { | |
373 | input_fd = xopen(pack_name, O_RDONLY); | |
374 | output_fd = -1; | |
375 | nothread_data.pack_fd = input_fd; | |
376 | } | |
377 | the_hash_algo->init_fn(&input_ctx); | |
378 | return pack_name; | |
379 | } | |
380 | ||
381 | static void parse_pack_header(void) | |
382 | { | |
383 | unsigned char *hdr = fill(sizeof(struct pack_header)); | |
384 | ||
385 | /* Header consistency check */ | |
386 | if (get_be32(hdr) != PACK_SIGNATURE) | |
387 | die(_("pack signature mismatch")); | |
388 | hdr += 4; | |
389 | if (!pack_version_ok_native(get_be32(hdr))) | |
390 | die(_("pack version %"PRIu32" unsupported"), | |
391 | get_be32(hdr)); | |
392 | hdr += 4; | |
393 | ||
394 | nr_objects = get_be32(hdr); | |
395 | use(sizeof(struct pack_header)); | |
396 | } | |
397 | ||
398 | __attribute__((format (printf, 2, 3))) | |
399 | static NORETURN void bad_object(off_t offset, const char *format, ...) | |
400 | { | |
401 | va_list params; | |
402 | char buf[1024]; | |
403 | ||
404 | va_start(params, format); | |
405 | vsnprintf(buf, sizeof(buf), format, params); | |
406 | va_end(params); | |
407 | die(_("pack has bad object at offset %"PRIuMAX": %s"), | |
408 | (uintmax_t)offset, buf); | |
409 | } | |
410 | ||
411 | static inline struct thread_local_data *get_thread_data(void) | |
412 | { | |
413 | if (HAVE_THREADS) { | |
414 | if (threads_active) | |
415 | return pthread_getspecific(key); | |
416 | assert(!threads_active && | |
417 | "This should only be reached when all threads are gone"); | |
418 | } | |
419 | return ¬hread_data; | |
420 | } | |
421 | ||
422 | static void set_thread_data(struct thread_local_data *data) | |
423 | { | |
424 | if (threads_active) | |
425 | pthread_setspecific(key, data); | |
426 | } | |
427 | ||
428 | static void free_base_data(struct base_data *c) | |
429 | { | |
430 | if (c->data) { | |
431 | FREE_AND_NULL(c->data); | |
432 | base_cache_used -= c->size; | |
433 | } | |
434 | } | |
435 | ||
436 | static void prune_base_data(struct base_data *retain) | |
437 | { | |
438 | struct list_head *pos; | |
439 | ||
440 | if (base_cache_used <= base_cache_limit) | |
441 | return; | |
442 | ||
443 | list_for_each_prev(pos, &done_head) { | |
444 | struct base_data *b = list_entry(pos, struct base_data, list); | |
445 | if (b->retain_data || b == retain) | |
446 | continue; | |
447 | if (b->data) { | |
448 | free_base_data(b); | |
449 | if (base_cache_used <= base_cache_limit) | |
450 | return; | |
451 | } | |
452 | } | |
453 | ||
454 | list_for_each_prev(pos, &work_head) { | |
455 | struct base_data *b = list_entry(pos, struct base_data, list); | |
456 | if (b->retain_data || b == retain) | |
457 | continue; | |
458 | if (b->data) { | |
459 | free_base_data(b); | |
460 | if (base_cache_used <= base_cache_limit) | |
461 | return; | |
462 | } | |
463 | } | |
464 | } | |
465 | ||
466 | static int is_delta_type(enum object_type type) | |
467 | { | |
468 | return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA); | |
469 | } | |
470 | ||
471 | static void *unpack_entry_data(off_t offset, unsigned long size, | |
472 | enum object_type type, struct object_id *oid) | |
473 | { | |
474 | static char fixed_buf[8192]; | |
475 | int status; | |
476 | git_zstream stream; | |
477 | void *buf; | |
478 | struct git_hash_ctx c; | |
479 | char hdr[32]; | |
480 | int hdrlen; | |
481 | ||
482 | if (!is_delta_type(type)) { | |
483 | hdrlen = format_object_header(hdr, sizeof(hdr), type, size); | |
484 | the_hash_algo->init_fn(&c); | |
485 | git_hash_update(&c, hdr, hdrlen); | |
486 | } else | |
487 | oid = NULL; | |
488 | if (type == OBJ_BLOB && | |
489 | size > repo_settings_get_big_file_threshold(the_repository)) | |
490 | buf = fixed_buf; | |
491 | else | |
492 | buf = xmallocz(size); | |
493 | ||
494 | memset(&stream, 0, sizeof(stream)); | |
495 | git_inflate_init(&stream); | |
496 | stream.next_out = buf; | |
497 | stream.avail_out = buf == fixed_buf ? sizeof(fixed_buf) : size; | |
498 | ||
499 | do { | |
500 | unsigned char *last_out = stream.next_out; | |
501 | stream.next_in = fill(1); | |
502 | stream.avail_in = input_len; | |
503 | status = git_inflate(&stream, 0); | |
504 | use(input_len - stream.avail_in); | |
505 | if (oid) | |
506 | git_hash_update(&c, last_out, stream.next_out - last_out); | |
507 | if (buf == fixed_buf) { | |
508 | stream.next_out = buf; | |
509 | stream.avail_out = sizeof(fixed_buf); | |
510 | } | |
511 | } while (status == Z_OK); | |
512 | if (stream.total_out != size || status != Z_STREAM_END) | |
513 | bad_object(offset, _("inflate returned %d"), status); | |
514 | git_inflate_end(&stream); | |
515 | if (oid) | |
516 | git_hash_final_oid(oid, &c); | |
517 | return buf == fixed_buf ? NULL : buf; | |
518 | } | |
519 | ||
520 | static void *unpack_raw_entry(struct object_entry *obj, | |
521 | off_t *ofs_offset, | |
522 | struct object_id *ref_oid, | |
523 | struct object_id *oid) | |
524 | { | |
525 | unsigned char *p; | |
526 | unsigned long size, c; | |
527 | off_t base_offset; | |
528 | unsigned shift; | |
529 | void *data; | |
530 | ||
531 | obj->idx.offset = consumed_bytes; | |
532 | input_crc32 = crc32(0, NULL, 0); | |
533 | ||
534 | p = fill(1); | |
535 | c = *p; | |
536 | use(1); | |
537 | obj->type = (c >> 4) & 7; | |
538 | size = (c & 15); | |
539 | shift = 4; | |
540 | while (c & 0x80) { | |
541 | p = fill(1); | |
542 | c = *p; | |
543 | use(1); | |
544 | size += (c & 0x7f) << shift; | |
545 | shift += 7; | |
546 | } | |
547 | obj->size = size; | |
548 | ||
549 | switch (obj->type) { | |
550 | case OBJ_REF_DELTA: | |
551 | oidread(ref_oid, fill(the_hash_algo->rawsz), | |
552 | the_repository->hash_algo); | |
553 | use(the_hash_algo->rawsz); | |
554 | break; | |
555 | case OBJ_OFS_DELTA: | |
556 | p = fill(1); | |
557 | c = *p; | |
558 | use(1); | |
559 | base_offset = c & 127; | |
560 | while (c & 128) { | |
561 | base_offset += 1; | |
562 | if (!base_offset || MSB(base_offset, 7)) | |
563 | bad_object(obj->idx.offset, _("offset value overflow for delta base object")); | |
564 | p = fill(1); | |
565 | c = *p; | |
566 | use(1); | |
567 | base_offset = (base_offset << 7) + (c & 127); | |
568 | } | |
569 | *ofs_offset = obj->idx.offset - base_offset; | |
570 | if (*ofs_offset <= 0 || *ofs_offset >= obj->idx.offset) | |
571 | bad_object(obj->idx.offset, _("delta base offset is out of bound")); | |
572 | break; | |
573 | case OBJ_COMMIT: | |
574 | case OBJ_TREE: | |
575 | case OBJ_BLOB: | |
576 | case OBJ_TAG: | |
577 | break; | |
578 | default: | |
579 | bad_object(obj->idx.offset, _("unknown object type %d"), obj->type); | |
580 | } | |
581 | obj->hdr_size = consumed_bytes - obj->idx.offset; | |
582 | ||
583 | data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid); | |
584 | obj->idx.crc32 = input_crc32; | |
585 | return data; | |
586 | } | |
587 | ||
588 | static void *unpack_data(struct object_entry *obj, | |
589 | int (*consume)(const unsigned char *, unsigned long, void *), | |
590 | void *cb_data) | |
591 | { | |
592 | off_t from = obj[0].idx.offset + obj[0].hdr_size; | |
593 | off_t len = obj[1].idx.offset - from; | |
594 | unsigned char *data, *inbuf; | |
595 | git_zstream stream; | |
596 | int status; | |
597 | ||
598 | data = xmallocz(consume ? 64*1024 : obj->size); | |
599 | inbuf = xmalloc((len < 64*1024) ? (int)len : 64*1024); | |
600 | ||
601 | memset(&stream, 0, sizeof(stream)); | |
602 | git_inflate_init(&stream); | |
603 | stream.next_out = data; | |
604 | stream.avail_out = consume ? 64*1024 : obj->size; | |
605 | ||
606 | do { | |
607 | ssize_t n = (len < 64*1024) ? (ssize_t)len : 64*1024; | |
608 | n = xpread(get_thread_data()->pack_fd, inbuf, n, from); | |
609 | if (n < 0) | |
610 | die_errno(_("cannot pread pack file")); | |
611 | if (!n) | |
612 | die(Q_("premature end of pack file, %"PRIuMAX" byte missing", | |
613 | "premature end of pack file, %"PRIuMAX" bytes missing", | |
614 | len), | |
615 | (uintmax_t)len); | |
616 | from += n; | |
617 | len -= n; | |
618 | stream.next_in = inbuf; | |
619 | stream.avail_in = n; | |
620 | if (!consume) | |
621 | status = git_inflate(&stream, 0); | |
622 | else { | |
623 | do { | |
624 | status = git_inflate(&stream, 0); | |
625 | if (consume(data, stream.next_out - data, cb_data)) { | |
626 | free(inbuf); | |
627 | free(data); | |
628 | return NULL; | |
629 | } | |
630 | stream.next_out = data; | |
631 | stream.avail_out = 64*1024; | |
632 | } while (status == Z_OK && stream.avail_in); | |
633 | } | |
634 | } while (len && status == Z_OK && !stream.avail_in); | |
635 | ||
636 | /* This has been inflated OK when first encountered, so... */ | |
637 | if (status != Z_STREAM_END || stream.total_out != obj->size) | |
638 | die(_("serious inflate inconsistency")); | |
639 | ||
640 | git_inflate_end(&stream); | |
641 | free(inbuf); | |
642 | if (consume) { | |
643 | FREE_AND_NULL(data); | |
644 | } | |
645 | return data; | |
646 | } | |
647 | ||
648 | static void *get_data_from_pack(struct object_entry *obj) | |
649 | { | |
650 | return unpack_data(obj, NULL, NULL); | |
651 | } | |
652 | ||
653 | static int compare_ofs_delta_bases(off_t offset1, off_t offset2, | |
654 | enum object_type type1, | |
655 | enum object_type type2) | |
656 | { | |
657 | int cmp = type1 - type2; | |
658 | if (cmp) | |
659 | return cmp; | |
660 | return offset1 < offset2 ? -1 : | |
661 | offset1 > offset2 ? 1 : | |
662 | 0; | |
663 | } | |
664 | ||
665 | static int find_ofs_delta(const off_t offset) | |
666 | { | |
667 | int first = 0, last = nr_ofs_deltas; | |
668 | ||
669 | while (first < last) { | |
670 | int next = first + (last - first) / 2; | |
671 | struct ofs_delta_entry *delta = &ofs_deltas[next]; | |
672 | int cmp; | |
673 | ||
674 | cmp = compare_ofs_delta_bases(offset, delta->offset, | |
675 | OBJ_OFS_DELTA, | |
676 | objects[delta->obj_no].type); | |
677 | if (!cmp) | |
678 | return next; | |
679 | if (cmp < 0) { | |
680 | last = next; | |
681 | continue; | |
682 | } | |
683 | first = next+1; | |
684 | } | |
685 | return -first-1; | |
686 | } | |
687 | ||
688 | static void find_ofs_delta_children(off_t offset, | |
689 | int *first_index, int *last_index) | |
690 | { | |
691 | int first = find_ofs_delta(offset); | |
692 | int last = first; | |
693 | int end = nr_ofs_deltas - 1; | |
694 | ||
695 | if (first < 0) { | |
696 | *first_index = 0; | |
697 | *last_index = -1; | |
698 | return; | |
699 | } | |
700 | while (first > 0 && ofs_deltas[first - 1].offset == offset) | |
701 | --first; | |
702 | while (last < end && ofs_deltas[last + 1].offset == offset) | |
703 | ++last; | |
704 | *first_index = first; | |
705 | *last_index = last; | |
706 | } | |
707 | ||
708 | static int compare_ref_delta_bases(const struct object_id *oid1, | |
709 | const struct object_id *oid2, | |
710 | enum object_type type1, | |
711 | enum object_type type2) | |
712 | { | |
713 | int cmp = type1 - type2; | |
714 | if (cmp) | |
715 | return cmp; | |
716 | return oidcmp(oid1, oid2); | |
717 | } | |
718 | ||
719 | static int find_ref_delta(const struct object_id *oid) | |
720 | { | |
721 | int first = 0, last = nr_ref_deltas; | |
722 | ||
723 | while (first < last) { | |
724 | int next = first + (last - first) / 2; | |
725 | struct ref_delta_entry *delta = &ref_deltas[next]; | |
726 | int cmp; | |
727 | ||
728 | cmp = compare_ref_delta_bases(oid, &delta->oid, | |
729 | OBJ_REF_DELTA, | |
730 | objects[delta->obj_no].type); | |
731 | if (!cmp) | |
732 | return next; | |
733 | if (cmp < 0) { | |
734 | last = next; | |
735 | continue; | |
736 | } | |
737 | first = next+1; | |
738 | } | |
739 | return -first-1; | |
740 | } | |
741 | ||
742 | static void find_ref_delta_children(const struct object_id *oid, | |
743 | int *first_index, int *last_index) | |
744 | { | |
745 | int first = find_ref_delta(oid); | |
746 | int last = first; | |
747 | int end = nr_ref_deltas - 1; | |
748 | ||
749 | if (first < 0) { | |
750 | *first_index = 0; | |
751 | *last_index = -1; | |
752 | return; | |
753 | } | |
754 | while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid)) | |
755 | --first; | |
756 | while (last < end && oideq(&ref_deltas[last + 1].oid, oid)) | |
757 | ++last; | |
758 | *first_index = first; | |
759 | *last_index = last; | |
760 | } | |
761 | ||
762 | struct compare_data { | |
763 | struct object_entry *entry; | |
764 | struct git_istream *st; | |
765 | unsigned char *buf; | |
766 | unsigned long buf_size; | |
767 | }; | |
768 | ||
769 | static int compare_objects(const unsigned char *buf, unsigned long size, | |
770 | void *cb_data) | |
771 | { | |
772 | struct compare_data *data = cb_data; | |
773 | ||
774 | if (data->buf_size < size) { | |
775 | free(data->buf); | |
776 | data->buf = xmalloc(size); | |
777 | data->buf_size = size; | |
778 | } | |
779 | ||
780 | while (size) { | |
781 | ssize_t len = read_istream(data->st, data->buf, size); | |
782 | if (len == 0) | |
783 | die(_("SHA1 COLLISION FOUND WITH %s !"), | |
784 | oid_to_hex(&data->entry->idx.oid)); | |
785 | if (len < 0) | |
786 | die(_("unable to read %s"), | |
787 | oid_to_hex(&data->entry->idx.oid)); | |
788 | if (memcmp(buf, data->buf, len)) | |
789 | die(_("SHA1 COLLISION FOUND WITH %s !"), | |
790 | oid_to_hex(&data->entry->idx.oid)); | |
791 | size -= len; | |
792 | buf += len; | |
793 | } | |
794 | return 0; | |
795 | } | |
796 | ||
797 | static int check_collison(struct object_entry *entry) | |
798 | { | |
799 | struct compare_data data; | |
800 | enum object_type type; | |
801 | unsigned long size; | |
802 | ||
803 | if (entry->size <= repo_settings_get_big_file_threshold(the_repository) || | |
804 | entry->type != OBJ_BLOB) | |
805 | return -1; | |
806 | ||
807 | memset(&data, 0, sizeof(data)); | |
808 | data.entry = entry; | |
809 | data.st = open_istream(the_repository, &entry->idx.oid, &type, &size, | |
810 | NULL); | |
811 | if (!data.st) | |
812 | return -1; | |
813 | if (size != entry->size || type != entry->type) | |
814 | die(_("SHA1 COLLISION FOUND WITH %s !"), | |
815 | oid_to_hex(&entry->idx.oid)); | |
816 | unpack_data(entry, compare_objects, &data); | |
817 | close_istream(data.st); | |
818 | free(data.buf); | |
819 | return 0; | |
820 | } | |
821 | ||
822 | static void record_outgoing_link(const struct object_id *oid) | |
823 | { | |
824 | oidset_insert(&outgoing_links, oid); | |
825 | } | |
826 | ||
827 | static void maybe_record_name_entry(const struct name_entry *entry) | |
828 | { | |
829 | /* | |
830 | * Checking only trees here results in a significantly faster packfile | |
831 | * indexing, but the drawback is that if the packfile to be indexed | |
832 | * references a local blob only directly (that is, never through a | |
833 | * local tree), that local blob is in danger of being garbage | |
834 | * collected. Such a situation may arise if we push local commits, | |
835 | * including one with a change to a blob in the root tree, and then the | |
836 | * server incorporates them into its main branch through a "rebase" or | |
837 | * "squash" merge strategy, and then we fetch the new main branch from | |
838 | * the server. | |
839 | * | |
840 | * This situation has not been observed yet - we have only noticed | |
841 | * missing commits, not missing trees or blobs. (In fact, if it were | |
842 | * believed that only missing commits are problematic, one could argue | |
843 | * that we should also exclude trees during the outgoing link check; | |
844 | * but it is safer to include them.) | |
845 | * | |
846 | * Due to the rarity of the situation (it has not been observed to | |
847 | * happen in real life), and because the "penalty" in such a situation | |
848 | * is merely to refetch the missing blob when it's needed (and this | |
849 | * happens only once - when refetched, the blob goes into a promisor | |
850 | * pack, so it won't be GC-ed, the tradeoff seems worth it. | |
851 | */ | |
852 | if (S_ISDIR(entry->mode)) | |
853 | record_outgoing_link(&entry->oid); | |
854 | } | |
855 | ||
856 | static void do_record_outgoing_links(struct object *obj) | |
857 | { | |
858 | if (obj->type == OBJ_TREE) { | |
859 | struct tree *tree = (struct tree *)obj; | |
860 | struct tree_desc desc; | |
861 | struct name_entry entry; | |
862 | if (init_tree_desc_gently(&desc, &tree->object.oid, | |
863 | tree->buffer, tree->size, 0)) | |
864 | /* | |
865 | * Error messages are given when packs are | |
866 | * verified, so do not print any here. | |
867 | */ | |
868 | return; | |
869 | while (tree_entry_gently(&desc, &entry)) | |
870 | maybe_record_name_entry(&entry); | |
871 | } else if (obj->type == OBJ_COMMIT) { | |
872 | struct commit *commit = (struct commit *) obj; | |
873 | struct commit_list *parents = commit->parents; | |
874 | ||
875 | record_outgoing_link(get_commit_tree_oid(commit)); | |
876 | for (; parents; parents = parents->next) | |
877 | record_outgoing_link(&parents->item->object.oid); | |
878 | } else if (obj->type == OBJ_TAG) { | |
879 | struct tag *tag = (struct tag *) obj; | |
880 | record_outgoing_link(get_tagged_oid(tag)); | |
881 | } | |
882 | } | |
883 | ||
884 | static void sha1_object(const void *data, struct object_entry *obj_entry, | |
885 | unsigned long size, enum object_type type, | |
886 | const struct object_id *oid) | |
887 | { | |
888 | void *new_data = NULL; | |
889 | int collision_test_needed = 0; | |
890 | ||
891 | assert(data || obj_entry); | |
892 | ||
893 | if (startup_info->have_repository) { | |
894 | read_lock(); | |
895 | collision_test_needed = has_object(the_repository, oid, | |
896 | HAS_OBJECT_FETCH_PROMISOR); | |
897 | read_unlock(); | |
898 | } | |
899 | ||
900 | if (collision_test_needed && !data) { | |
901 | read_lock(); | |
902 | if (!check_collison(obj_entry)) | |
903 | collision_test_needed = 0; | |
904 | read_unlock(); | |
905 | } | |
906 | if (collision_test_needed) { | |
907 | void *has_data; | |
908 | enum object_type has_type; | |
909 | unsigned long has_size; | |
910 | read_lock(); | |
911 | has_type = oid_object_info(the_repository, oid, &has_size); | |
912 | if (has_type < 0) | |
913 | die(_("cannot read existing object info %s"), oid_to_hex(oid)); | |
914 | if (has_type != type || has_size != size) | |
915 | die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid)); | |
916 | has_data = repo_read_object_file(the_repository, oid, | |
917 | &has_type, &has_size); | |
918 | read_unlock(); | |
919 | if (!data) | |
920 | data = new_data = get_data_from_pack(obj_entry); | |
921 | if (!has_data) | |
922 | die(_("cannot read existing object %s"), oid_to_hex(oid)); | |
923 | if (size != has_size || type != has_type || | |
924 | memcmp(data, has_data, size) != 0) | |
925 | die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid)); | |
926 | free(has_data); | |
927 | } | |
928 | ||
929 | if (strict || do_fsck_object || record_outgoing_links) { | |
930 | read_lock(); | |
931 | if (type == OBJ_BLOB) { | |
932 | struct blob *blob = lookup_blob(the_repository, oid); | |
933 | if (blob) | |
934 | blob->object.flags |= FLAG_CHECKED; | |
935 | else | |
936 | die(_("invalid blob object %s"), oid_to_hex(oid)); | |
937 | if (do_fsck_object && | |
938 | fsck_object(&blob->object, (void *)data, size, &fsck_options)) | |
939 | die(_("fsck error in packed object")); | |
940 | } else { | |
941 | struct object *obj; | |
942 | int eaten; | |
943 | void *buf = (void *) data; | |
944 | ||
945 | assert(data && "data can only be NULL for large _blobs_"); | |
946 | ||
947 | /* | |
948 | * we do not need to free the memory here, as the | |
949 | * buf is deleted by the caller. | |
950 | */ | |
951 | obj = parse_object_buffer(the_repository, oid, type, | |
952 | size, buf, | |
953 | &eaten); | |
954 | if (!obj) | |
955 | die(_("invalid %s"), type_name(type)); | |
956 | if (do_fsck_object && | |
957 | fsck_object(obj, buf, size, &fsck_options)) | |
958 | die(_("fsck error in packed object")); | |
959 | if (strict && fsck_walk(obj, NULL, &fsck_options)) | |
960 | die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid)); | |
961 | if (record_outgoing_links) | |
962 | do_record_outgoing_links(obj); | |
963 | ||
964 | if (obj->type == OBJ_TREE) { | |
965 | struct tree *item = (struct tree *) obj; | |
966 | item->buffer = NULL; | |
967 | obj->parsed = 0; | |
968 | } | |
969 | if (obj->type == OBJ_COMMIT) { | |
970 | struct commit *commit = (struct commit *) obj; | |
971 | if (detach_commit_buffer(commit, NULL) != data) | |
972 | BUG("parse_object_buffer transmogrified our buffer"); | |
973 | } | |
974 | obj->flags |= FLAG_CHECKED; | |
975 | } | |
976 | read_unlock(); | |
977 | } | |
978 | ||
979 | free(new_data); | |
980 | } | |
981 | ||
982 | /* | |
983 | * Ensure that this node has been reconstructed and return its contents. | |
984 | * | |
985 | * In the typical and best case, this node would already be reconstructed | |
986 | * (through the invocation to resolve_delta() in threaded_second_pass()) and it | |
987 | * would not be pruned. However, if pruning of this node was necessary due to | |
988 | * reaching delta_base_cache_limit, this function will find the closest | |
989 | * ancestor with reconstructed data that has not been pruned (or if there is | |
990 | * none, the ultimate base object), and reconstruct each node in the delta | |
991 | * chain in order to generate the reconstructed data for this node. | |
992 | */ | |
993 | static void *get_base_data(struct base_data *c) | |
994 | { | |
995 | if (!c->data) { | |
996 | struct object_entry *obj = c->obj; | |
997 | struct base_data **delta = NULL; | |
998 | int delta_nr = 0, delta_alloc = 0; | |
999 | ||
1000 | while (is_delta_type(c->obj->type) && !c->data) { | |
1001 | ALLOC_GROW(delta, delta_nr + 1, delta_alloc); | |
1002 | delta[delta_nr++] = c; | |
1003 | c = c->base; | |
1004 | } | |
1005 | if (!delta_nr) { | |
1006 | c->data = get_data_from_pack(obj); | |
1007 | c->size = obj->size; | |
1008 | base_cache_used += c->size; | |
1009 | prune_base_data(c); | |
1010 | } | |
1011 | for (; delta_nr > 0; delta_nr--) { | |
1012 | void *base, *raw; | |
1013 | c = delta[delta_nr - 1]; | |
1014 | obj = c->obj; | |
1015 | base = get_base_data(c->base); | |
1016 | raw = get_data_from_pack(obj); | |
1017 | c->data = patch_delta( | |
1018 | base, c->base->size, | |
1019 | raw, obj->size, | |
1020 | &c->size); | |
1021 | free(raw); | |
1022 | if (!c->data) | |
1023 | bad_object(obj->idx.offset, _("failed to apply delta")); | |
1024 | base_cache_used += c->size; | |
1025 | prune_base_data(c); | |
1026 | } | |
1027 | free(delta); | |
1028 | } | |
1029 | return c->data; | |
1030 | } | |
1031 | ||
1032 | static struct base_data *make_base(struct object_entry *obj, | |
1033 | struct base_data *parent) | |
1034 | { | |
1035 | struct base_data *base = xcalloc(1, sizeof(struct base_data)); | |
1036 | base->base = parent; | |
1037 | base->obj = obj; | |
1038 | find_ref_delta_children(&obj->idx.oid, | |
1039 | &base->ref_first, &base->ref_last); | |
1040 | find_ofs_delta_children(obj->idx.offset, | |
1041 | &base->ofs_first, &base->ofs_last); | |
1042 | base->children_remaining = base->ref_last - base->ref_first + | |
1043 | base->ofs_last - base->ofs_first + 2; | |
1044 | return base; | |
1045 | } | |
1046 | ||
1047 | static struct base_data *resolve_delta(struct object_entry *delta_obj, | |
1048 | struct base_data *base) | |
1049 | { | |
1050 | void *delta_data, *result_data; | |
1051 | struct base_data *result; | |
1052 | unsigned long result_size; | |
1053 | ||
1054 | if (show_stat) { | |
1055 | int i = delta_obj - objects; | |
1056 | int j = base->obj - objects; | |
1057 | obj_stat[i].delta_depth = obj_stat[j].delta_depth + 1; | |
1058 | deepest_delta_lock(); | |
1059 | if (deepest_delta < obj_stat[i].delta_depth) | |
1060 | deepest_delta = obj_stat[i].delta_depth; | |
1061 | deepest_delta_unlock(); | |
1062 | obj_stat[i].base_object_no = j; | |
1063 | } | |
1064 | delta_data = get_data_from_pack(delta_obj); | |
1065 | assert(base->data); | |
1066 | result_data = patch_delta(base->data, base->size, | |
1067 | delta_data, delta_obj->size, &result_size); | |
1068 | free(delta_data); | |
1069 | if (!result_data) | |
1070 | bad_object(delta_obj->idx.offset, _("failed to apply delta")); | |
1071 | hash_object_file(the_hash_algo, result_data, result_size, | |
1072 | delta_obj->real_type, &delta_obj->idx.oid); | |
1073 | sha1_object(result_data, NULL, result_size, delta_obj->real_type, | |
1074 | &delta_obj->idx.oid); | |
1075 | ||
1076 | result = make_base(delta_obj, base); | |
1077 | result->data = result_data; | |
1078 | result->size = result_size; | |
1079 | ||
1080 | counter_lock(); | |
1081 | nr_resolved_deltas++; | |
1082 | counter_unlock(); | |
1083 | ||
1084 | return result; | |
1085 | } | |
1086 | ||
1087 | static int compare_ofs_delta_entry(const void *a, const void *b) | |
1088 | { | |
1089 | const struct ofs_delta_entry *delta_a = a; | |
1090 | const struct ofs_delta_entry *delta_b = b; | |
1091 | ||
1092 | return delta_a->offset < delta_b->offset ? -1 : | |
1093 | delta_a->offset > delta_b->offset ? 1 : | |
1094 | 0; | |
1095 | } | |
1096 | ||
1097 | static int compare_ref_delta_entry(const void *a, const void *b) | |
1098 | { | |
1099 | const struct ref_delta_entry *delta_a = a; | |
1100 | const struct ref_delta_entry *delta_b = b; | |
1101 | ||
1102 | return oidcmp(&delta_a->oid, &delta_b->oid); | |
1103 | } | |
1104 | ||
1105 | static void *threaded_second_pass(void *data) | |
1106 | { | |
1107 | if (data) | |
1108 | set_thread_data(data); | |
1109 | for (;;) { | |
1110 | struct base_data *parent = NULL; | |
1111 | struct object_entry *child_obj = NULL; | |
1112 | struct base_data *child = NULL; | |
1113 | ||
1114 | counter_lock(); | |
1115 | display_progress(progress, nr_resolved_deltas); | |
1116 | counter_unlock(); | |
1117 | ||
1118 | work_lock(); | |
1119 | if (list_empty(&work_head)) { | |
1120 | /* | |
1121 | * Take an object from the object array. | |
1122 | */ | |
1123 | while (nr_dispatched < nr_objects && | |
1124 | is_delta_type(objects[nr_dispatched].type)) | |
1125 | nr_dispatched++; | |
1126 | if (nr_dispatched >= nr_objects) { | |
1127 | work_unlock(); | |
1128 | break; | |
1129 | } | |
1130 | child_obj = &objects[nr_dispatched++]; | |
1131 | } else { | |
1132 | /* | |
1133 | * Peek at the top of the stack, and take a child from | |
1134 | * it. | |
1135 | */ | |
1136 | parent = list_first_entry(&work_head, struct base_data, | |
1137 | list); | |
1138 | ||
1139 | while (parent->ref_first <= parent->ref_last) { | |
1140 | int offset = ref_deltas[parent->ref_first++].obj_no; | |
1141 | child_obj = objects + offset; | |
1142 | if (child_obj->real_type != OBJ_REF_DELTA) { | |
1143 | child_obj = NULL; | |
1144 | continue; | |
1145 | } | |
1146 | child_obj->real_type = parent->obj->real_type; | |
1147 | break; | |
1148 | } | |
1149 | ||
1150 | if (!child_obj && parent->ofs_first <= parent->ofs_last) { | |
1151 | child_obj = objects + | |
1152 | ofs_deltas[parent->ofs_first++].obj_no; | |
1153 | assert(child_obj->real_type == OBJ_OFS_DELTA); | |
1154 | child_obj->real_type = parent->obj->real_type; | |
1155 | } | |
1156 | ||
1157 | if (parent->ref_first > parent->ref_last && | |
1158 | parent->ofs_first > parent->ofs_last) { | |
1159 | /* | |
1160 | * This parent has run out of children, so move | |
1161 | * it to done_head. | |
1162 | */ | |
1163 | list_del(&parent->list); | |
1164 | list_add(&parent->list, &done_head); | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * Ensure that the parent has data, since we will need | |
1169 | * it later. | |
1170 | * | |
1171 | * NEEDSWORK: If parent data needs to be reloaded, this | |
1172 | * prolongs the time that the current thread spends in | |
1173 | * the mutex. A mitigating factor is that parent data | |
1174 | * needs to be reloaded only if the delta base cache | |
1175 | * limit is exceeded, so in the typical case, this does | |
1176 | * not happen. | |
1177 | */ | |
1178 | get_base_data(parent); | |
1179 | parent->retain_data++; | |
1180 | } | |
1181 | work_unlock(); | |
1182 | ||
1183 | if (child_obj) { | |
1184 | if (parent) { | |
1185 | child = resolve_delta(child_obj, parent); | |
1186 | if (!child->children_remaining) | |
1187 | FREE_AND_NULL(child->data); | |
1188 | } else{ | |
1189 | child = make_base(child_obj, NULL); | |
1190 | if (child->children_remaining) { | |
1191 | /* | |
1192 | * Since this child has its own delta children, | |
1193 | * we will need this data in the future. | |
1194 | * Inflate now so that future iterations will | |
1195 | * have access to this object's data while | |
1196 | * outside the work mutex. | |
1197 | */ | |
1198 | child->data = get_data_from_pack(child_obj); | |
1199 | child->size = child_obj->size; | |
1200 | } | |
1201 | } | |
1202 | } | |
1203 | ||
1204 | work_lock(); | |
1205 | if (parent) | |
1206 | parent->retain_data--; | |
1207 | ||
1208 | if (child && child->data) { | |
1209 | /* | |
1210 | * This child has its own children, so add it to | |
1211 | * work_head. | |
1212 | */ | |
1213 | list_add(&child->list, &work_head); | |
1214 | base_cache_used += child->size; | |
1215 | prune_base_data(NULL); | |
1216 | free_base_data(child); | |
1217 | } else if (child) { | |
1218 | /* | |
1219 | * This child does not have its own children. It may be | |
1220 | * the last descendant of its ancestors; free those | |
1221 | * that we can. | |
1222 | */ | |
1223 | struct base_data *p = parent; | |
1224 | ||
1225 | while (p) { | |
1226 | struct base_data *next_p; | |
1227 | ||
1228 | p->children_remaining--; | |
1229 | if (p->children_remaining) | |
1230 | break; | |
1231 | ||
1232 | next_p = p->base; | |
1233 | free_base_data(p); | |
1234 | list_del(&p->list); | |
1235 | free(p); | |
1236 | ||
1237 | p = next_p; | |
1238 | } | |
1239 | FREE_AND_NULL(child); | |
1240 | } | |
1241 | work_unlock(); | |
1242 | } | |
1243 | return NULL; | |
1244 | } | |
1245 | ||
1246 | /* | |
1247 | * First pass: | |
1248 | * - find locations of all objects; | |
1249 | * - calculate SHA1 of all non-delta objects; | |
1250 | * - remember base (SHA1 or offset) for all deltas. | |
1251 | */ | |
1252 | static void parse_pack_objects(unsigned char *hash) | |
1253 | { | |
1254 | int i, nr_delays = 0; | |
1255 | struct ofs_delta_entry *ofs_delta = ofs_deltas; | |
1256 | struct object_id ref_delta_oid; | |
1257 | struct stat st; | |
1258 | struct git_hash_ctx tmp_ctx; | |
1259 | ||
1260 | if (verbose) | |
1261 | progress = start_progress( | |
1262 | the_repository, | |
1263 | progress_title ? progress_title : | |
1264 | from_stdin ? _("Receiving objects") : _("Indexing objects"), | |
1265 | nr_objects); | |
1266 | for (i = 0; i < nr_objects; i++) { | |
1267 | struct object_entry *obj = &objects[i]; | |
1268 | void *data = unpack_raw_entry(obj, &ofs_delta->offset, | |
1269 | &ref_delta_oid, | |
1270 | &obj->idx.oid); | |
1271 | obj->real_type = obj->type; | |
1272 | if (obj->type == OBJ_OFS_DELTA) { | |
1273 | nr_ofs_deltas++; | |
1274 | ofs_delta->obj_no = i; | |
1275 | ofs_delta++; | |
1276 | } else if (obj->type == OBJ_REF_DELTA) { | |
1277 | ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc); | |
1278 | oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid); | |
1279 | ref_deltas[nr_ref_deltas].obj_no = i; | |
1280 | nr_ref_deltas++; | |
1281 | } else if (!data) { | |
1282 | /* large blobs, check later */ | |
1283 | obj->real_type = OBJ_BAD; | |
1284 | nr_delays++; | |
1285 | } else | |
1286 | sha1_object(data, NULL, obj->size, obj->type, | |
1287 | &obj->idx.oid); | |
1288 | free(data); | |
1289 | display_progress(progress, i+1); | |
1290 | } | |
1291 | objects[i].idx.offset = consumed_bytes; | |
1292 | stop_progress(&progress); | |
1293 | ||
1294 | /* Check pack integrity */ | |
1295 | flush(); | |
1296 | the_hash_algo->init_fn(&tmp_ctx); | |
1297 | git_hash_clone(&tmp_ctx, &input_ctx); | |
1298 | git_hash_final(hash, &tmp_ctx); | |
1299 | if (!hasheq(fill(the_hash_algo->rawsz), hash, the_repository->hash_algo)) | |
1300 | die(_("pack is corrupted (SHA1 mismatch)")); | |
1301 | use(the_hash_algo->rawsz); | |
1302 | ||
1303 | /* If input_fd is a file, we should have reached its end now. */ | |
1304 | if (fstat(input_fd, &st)) | |
1305 | die_errno(_("cannot fstat packfile")); | |
1306 | if (S_ISREG(st.st_mode) && | |
1307 | lseek(input_fd, 0, SEEK_CUR) - input_len != st.st_size) | |
1308 | die(_("pack has junk at the end")); | |
1309 | ||
1310 | for (i = 0; i < nr_objects; i++) { | |
1311 | struct object_entry *obj = &objects[i]; | |
1312 | if (obj->real_type != OBJ_BAD) | |
1313 | continue; | |
1314 | obj->real_type = obj->type; | |
1315 | sha1_object(NULL, obj, obj->size, obj->type, | |
1316 | &obj->idx.oid); | |
1317 | nr_delays--; | |
1318 | } | |
1319 | if (nr_delays) | |
1320 | die(_("confusion beyond insanity in parse_pack_objects()")); | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * Second pass: | |
1325 | * - for all non-delta objects, look if it is used as a base for | |
1326 | * deltas; | |
1327 | * - if used as a base, uncompress the object and apply all deltas, | |
1328 | * recursively checking if the resulting object is used as a base | |
1329 | * for some more deltas. | |
1330 | */ | |
1331 | static void resolve_deltas(struct pack_idx_option *opts) | |
1332 | { | |
1333 | int i; | |
1334 | ||
1335 | if (!nr_ofs_deltas && !nr_ref_deltas) | |
1336 | return; | |
1337 | ||
1338 | /* Sort deltas by base SHA1/offset for fast searching */ | |
1339 | QSORT(ofs_deltas, nr_ofs_deltas, compare_ofs_delta_entry); | |
1340 | QSORT(ref_deltas, nr_ref_deltas, compare_ref_delta_entry); | |
1341 | ||
1342 | if (verbose || show_resolving_progress) | |
1343 | progress = start_progress(the_repository, | |
1344 | _("Resolving deltas"), | |
1345 | nr_ref_deltas + nr_ofs_deltas); | |
1346 | ||
1347 | nr_dispatched = 0; | |
1348 | base_cache_limit = opts->delta_base_cache_limit * nr_threads; | |
1349 | if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) { | |
1350 | init_thread(); | |
1351 | for (i = 0; i < nr_threads; i++) { | |
1352 | int ret = pthread_create(&thread_data[i].thread, NULL, | |
1353 | threaded_second_pass, thread_data + i); | |
1354 | if (ret) | |
1355 | die(_("unable to create thread: %s"), | |
1356 | strerror(ret)); | |
1357 | } | |
1358 | for (i = 0; i < nr_threads; i++) | |
1359 | pthread_join(thread_data[i].thread, NULL); | |
1360 | cleanup_thread(); | |
1361 | return; | |
1362 | } | |
1363 | threaded_second_pass(¬hread_data); | |
1364 | } | |
1365 | ||
1366 | /* | |
1367 | * Third pass: | |
1368 | * - append objects to convert thin pack to full pack if required | |
1369 | * - write the final pack hash | |
1370 | */ | |
1371 | static void fix_unresolved_deltas(struct hashfile *f); | |
1372 | static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash) | |
1373 | { | |
1374 | if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) { | |
1375 | stop_progress(&progress); | |
1376 | /* Flush remaining pack final hash. */ | |
1377 | flush(); | |
1378 | return; | |
1379 | } | |
1380 | ||
1381 | if (fix_thin_pack) { | |
1382 | struct hashfile *f; | |
1383 | unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ]; | |
1384 | struct strbuf msg = STRBUF_INIT; | |
1385 | int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas; | |
1386 | int nr_objects_initial = nr_objects; | |
1387 | if (nr_unresolved <= 0) | |
1388 | die(_("confusion beyond insanity")); | |
1389 | REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1); | |
1390 | memset(objects + nr_objects + 1, 0, | |
1391 | nr_unresolved * sizeof(*objects)); | |
1392 | f = hashfd(the_repository->hash_algo, output_fd, curr_pack); | |
1393 | fix_unresolved_deltas(f); | |
1394 | strbuf_addf(&msg, Q_("completed with %d local object", | |
1395 | "completed with %d local objects", | |
1396 | nr_objects - nr_objects_initial), | |
1397 | nr_objects - nr_objects_initial); | |
1398 | stop_progress_msg(&progress, msg.buf); | |
1399 | strbuf_release(&msg); | |
1400 | finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0); | |
1401 | hashcpy(read_hash, pack_hash, the_repository->hash_algo); | |
1402 | fixup_pack_header_footer(the_hash_algo, output_fd, pack_hash, | |
1403 | curr_pack, nr_objects, | |
1404 | read_hash, consumed_bytes-the_hash_algo->rawsz); | |
1405 | if (!hasheq(read_hash, tail_hash, the_repository->hash_algo)) | |
1406 | die(_("Unexpected tail checksum for %s " | |
1407 | "(disk corruption?)"), curr_pack); | |
1408 | } | |
1409 | if (nr_ofs_deltas + nr_ref_deltas != nr_resolved_deltas) | |
1410 | die(Q_("pack has %d unresolved delta", | |
1411 | "pack has %d unresolved deltas", | |
1412 | nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas), | |
1413 | nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas); | |
1414 | } | |
1415 | ||
1416 | static int write_compressed(struct hashfile *f, void *in, unsigned int size) | |
1417 | { | |
1418 | git_zstream stream; | |
1419 | int status; | |
1420 | unsigned char outbuf[4096]; | |
1421 | ||
1422 | git_deflate_init(&stream, zlib_compression_level); | |
1423 | stream.next_in = in; | |
1424 | stream.avail_in = size; | |
1425 | ||
1426 | do { | |
1427 | stream.next_out = outbuf; | |
1428 | stream.avail_out = sizeof(outbuf); | |
1429 | status = git_deflate(&stream, Z_FINISH); | |
1430 | hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out); | |
1431 | } while (status == Z_OK); | |
1432 | ||
1433 | if (status != Z_STREAM_END) | |
1434 | die(_("unable to deflate appended object (%d)"), status); | |
1435 | size = stream.total_out; | |
1436 | git_deflate_end(&stream); | |
1437 | return size; | |
1438 | } | |
1439 | ||
1440 | static struct object_entry *append_obj_to_pack(struct hashfile *f, | |
1441 | const unsigned char *sha1, void *buf, | |
1442 | unsigned long size, enum object_type type) | |
1443 | { | |
1444 | struct object_entry *obj = &objects[nr_objects++]; | |
1445 | unsigned char header[10]; | |
1446 | unsigned long s = size; | |
1447 | int n = 0; | |
1448 | unsigned char c = (type << 4) | (s & 15); | |
1449 | s >>= 4; | |
1450 | while (s) { | |
1451 | header[n++] = c | 0x80; | |
1452 | c = s & 0x7f; | |
1453 | s >>= 7; | |
1454 | } | |
1455 | header[n++] = c; | |
1456 | crc32_begin(f); | |
1457 | hashwrite(f, header, n); | |
1458 | obj[0].size = size; | |
1459 | obj[0].hdr_size = n; | |
1460 | obj[0].type = type; | |
1461 | obj[0].real_type = type; | |
1462 | obj[1].idx.offset = obj[0].idx.offset + n; | |
1463 | obj[1].idx.offset += write_compressed(f, buf, size); | |
1464 | obj[0].idx.crc32 = crc32_end(f); | |
1465 | hashflush(f); | |
1466 | oidread(&obj->idx.oid, sha1, the_repository->hash_algo); | |
1467 | return obj; | |
1468 | } | |
1469 | ||
1470 | static int delta_pos_compare(const void *_a, const void *_b) | |
1471 | { | |
1472 | struct ref_delta_entry *a = *(struct ref_delta_entry **)_a; | |
1473 | struct ref_delta_entry *b = *(struct ref_delta_entry **)_b; | |
1474 | return a->obj_no - b->obj_no; | |
1475 | } | |
1476 | ||
1477 | static void fix_unresolved_deltas(struct hashfile *f) | |
1478 | { | |
1479 | struct ref_delta_entry **sorted_by_pos; | |
1480 | int i; | |
1481 | ||
1482 | /* | |
1483 | * Since many unresolved deltas may well be themselves base objects | |
1484 | * for more unresolved deltas, we really want to include the | |
1485 | * smallest number of base objects that would cover as much delta | |
1486 | * as possible by picking the | |
1487 | * trunc deltas first, allowing for other deltas to resolve without | |
1488 | * additional base objects. Since most base objects are to be found | |
1489 | * before deltas depending on them, a good heuristic is to start | |
1490 | * resolving deltas in the same order as their position in the pack. | |
1491 | */ | |
1492 | ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas); | |
1493 | for (i = 0; i < nr_ref_deltas; i++) | |
1494 | sorted_by_pos[i] = &ref_deltas[i]; | |
1495 | QSORT(sorted_by_pos, nr_ref_deltas, delta_pos_compare); | |
1496 | ||
1497 | if (repo_has_promisor_remote(the_repository)) { | |
1498 | /* | |
1499 | * Prefetch the delta bases. | |
1500 | */ | |
1501 | struct oid_array to_fetch = OID_ARRAY_INIT; | |
1502 | for (i = 0; i < nr_ref_deltas; i++) { | |
1503 | struct ref_delta_entry *d = sorted_by_pos[i]; | |
1504 | if (!oid_object_info_extended(the_repository, &d->oid, | |
1505 | NULL, | |
1506 | OBJECT_INFO_FOR_PREFETCH)) | |
1507 | continue; | |
1508 | oid_array_append(&to_fetch, &d->oid); | |
1509 | } | |
1510 | promisor_remote_get_direct(the_repository, | |
1511 | to_fetch.oid, to_fetch.nr); | |
1512 | oid_array_clear(&to_fetch); | |
1513 | } | |
1514 | ||
1515 | for (i = 0; i < nr_ref_deltas; i++) { | |
1516 | struct ref_delta_entry *d = sorted_by_pos[i]; | |
1517 | enum object_type type; | |
1518 | void *data; | |
1519 | unsigned long size; | |
1520 | ||
1521 | if (objects[d->obj_no].real_type != OBJ_REF_DELTA) | |
1522 | continue; | |
1523 | data = repo_read_object_file(the_repository, &d->oid, &type, | |
1524 | &size); | |
1525 | if (!data) | |
1526 | continue; | |
1527 | ||
1528 | if (check_object_signature(the_repository, &d->oid, data, size, | |
1529 | type) < 0) | |
1530 | die(_("local object %s is corrupt"), oid_to_hex(&d->oid)); | |
1531 | ||
1532 | /* | |
1533 | * Add this as an object to the objects array and call | |
1534 | * threaded_second_pass() (which will pick up the added | |
1535 | * object). | |
1536 | */ | |
1537 | append_obj_to_pack(f, d->oid.hash, data, size, type); | |
1538 | free(data); | |
1539 | threaded_second_pass(NULL); | |
1540 | ||
1541 | display_progress(progress, nr_resolved_deltas); | |
1542 | } | |
1543 | free(sorted_by_pos); | |
1544 | } | |
1545 | ||
1546 | static const char *derive_filename(const char *pack_name, const char *strip, | |
1547 | const char *suffix, struct strbuf *buf) | |
1548 | { | |
1549 | size_t len; | |
1550 | if (!strip_suffix(pack_name, strip, &len) || !len || | |
1551 | pack_name[len - 1] != '.') | |
1552 | die(_("packfile name '%s' does not end with '.%s'"), | |
1553 | pack_name, strip); | |
1554 | strbuf_add(buf, pack_name, len); | |
1555 | strbuf_addstr(buf, suffix); | |
1556 | return buf->buf; | |
1557 | } | |
1558 | ||
1559 | static void write_special_file(const char *suffix, const char *msg, | |
1560 | const char *pack_name, const unsigned char *hash, | |
1561 | const char **report) | |
1562 | { | |
1563 | struct strbuf name_buf = STRBUF_INIT; | |
1564 | const char *filename; | |
1565 | int fd; | |
1566 | int msg_len = strlen(msg); | |
1567 | ||
1568 | if (pack_name) | |
1569 | filename = derive_filename(pack_name, "pack", suffix, &name_buf); | |
1570 | else | |
1571 | filename = odb_pack_name(the_repository, &name_buf, hash, suffix); | |
1572 | ||
1573 | fd = safe_create_file_with_leading_directories(the_repository, filename); | |
1574 | if (fd < 0) { | |
1575 | if (errno != EEXIST) | |
1576 | die_errno(_("cannot write %s file '%s'"), | |
1577 | suffix, filename); | |
1578 | } else { | |
1579 | if (msg_len > 0) { | |
1580 | write_or_die(fd, msg, msg_len); | |
1581 | write_or_die(fd, "\n", 1); | |
1582 | } | |
1583 | if (close(fd) != 0) | |
1584 | die_errno(_("cannot close written %s file '%s'"), | |
1585 | suffix, filename); | |
1586 | if (report) | |
1587 | *report = suffix; | |
1588 | } | |
1589 | strbuf_release(&name_buf); | |
1590 | } | |
1591 | ||
1592 | static void rename_tmp_packfile(const char **final_name, | |
1593 | const char *curr_name, | |
1594 | struct strbuf *name, unsigned char *hash, | |
1595 | const char *ext, int make_read_only_if_same) | |
1596 | { | |
1597 | if (!*final_name || strcmp(*final_name, curr_name)) { | |
1598 | if (!*final_name) | |
1599 | *final_name = odb_pack_name(the_repository, name, hash, ext); | |
1600 | if (finalize_object_file(curr_name, *final_name)) | |
1601 | die(_("unable to rename temporary '*.%s' file to '%s'"), | |
1602 | ext, *final_name); | |
1603 | } else if (make_read_only_if_same) { | |
1604 | chmod(*final_name, 0444); | |
1605 | } | |
1606 | } | |
1607 | ||
1608 | static void final(const char *final_pack_name, const char *curr_pack_name, | |
1609 | const char *final_index_name, const char *curr_index_name, | |
1610 | const char *final_rev_index_name, const char *curr_rev_index_name, | |
1611 | const char *keep_msg, const char *promisor_msg, | |
1612 | unsigned char *hash) | |
1613 | { | |
1614 | const char *report = "pack"; | |
1615 | struct strbuf pack_name = STRBUF_INIT; | |
1616 | struct strbuf index_name = STRBUF_INIT; | |
1617 | struct strbuf rev_index_name = STRBUF_INIT; | |
1618 | ||
1619 | if (!from_stdin) { | |
1620 | close(input_fd); | |
1621 | } else { | |
1622 | fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name); | |
1623 | if (close(output_fd)) | |
1624 | die_errno(_("error while closing pack file")); | |
1625 | } | |
1626 | ||
1627 | if (keep_msg) | |
1628 | write_special_file("keep", keep_msg, final_pack_name, hash, | |
1629 | &report); | |
1630 | if (promisor_msg) | |
1631 | write_special_file("promisor", promisor_msg, final_pack_name, | |
1632 | hash, NULL); | |
1633 | ||
1634 | rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name, | |
1635 | hash, "pack", from_stdin); | |
1636 | if (curr_rev_index_name) | |
1637 | rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name, | |
1638 | &rev_index_name, hash, "rev", 1); | |
1639 | rename_tmp_packfile(&final_index_name, curr_index_name, &index_name, | |
1640 | hash, "idx", 1); | |
1641 | ||
1642 | if (do_fsck_object) { | |
1643 | struct packed_git *p; | |
1644 | p = add_packed_git(the_repository, final_index_name, | |
1645 | strlen(final_index_name), 0); | |
1646 | if (p) | |
1647 | install_packed_git(the_repository, p); | |
1648 | } | |
1649 | ||
1650 | if (!from_stdin) { | |
1651 | printf("%s\n", hash_to_hex(hash)); | |
1652 | } else { | |
1653 | struct strbuf buf = STRBUF_INIT; | |
1654 | ||
1655 | strbuf_addf(&buf, "%s\t%s\n", report, hash_to_hex(hash)); | |
1656 | write_or_die(1, buf.buf, buf.len); | |
1657 | strbuf_release(&buf); | |
1658 | ||
1659 | /* Write the last part of the buffer to stdout */ | |
1660 | write_in_full(1, input_buffer + input_offset, input_len); | |
1661 | } | |
1662 | ||
1663 | strbuf_release(&rev_index_name); | |
1664 | strbuf_release(&index_name); | |
1665 | strbuf_release(&pack_name); | |
1666 | } | |
1667 | ||
1668 | static int git_index_pack_config(const char *k, const char *v, | |
1669 | const struct config_context *ctx, void *cb) | |
1670 | { | |
1671 | struct pack_idx_option *opts = cb; | |
1672 | ||
1673 | if (!strcmp(k, "pack.indexversion")) { | |
1674 | opts->version = git_config_int(k, v, ctx->kvi); | |
1675 | if (opts->version > 2) | |
1676 | die(_("bad pack.indexVersion=%"PRIu32), opts->version); | |
1677 | return 0; | |
1678 | } | |
1679 | if (!strcmp(k, "pack.threads")) { | |
1680 | nr_threads = git_config_int(k, v, ctx->kvi); | |
1681 | if (nr_threads < 0) | |
1682 | die(_("invalid number of threads specified (%d)"), | |
1683 | nr_threads); | |
1684 | if (!HAVE_THREADS && nr_threads != 1) { | |
1685 | warning(_("no threads support, ignoring %s"), k); | |
1686 | nr_threads = 1; | |
1687 | } | |
1688 | return 0; | |
1689 | } | |
1690 | if (!strcmp(k, "pack.writereverseindex")) { | |
1691 | if (git_config_bool(k, v)) | |
1692 | opts->flags |= WRITE_REV; | |
1693 | else | |
1694 | opts->flags &= ~WRITE_REV; | |
1695 | } | |
1696 | if (!strcmp(k, "core.deltabasecachelimit")) { | |
1697 | opts->delta_base_cache_limit = git_config_ulong(k, v, ctx->kvi); | |
1698 | return 0; | |
1699 | } | |
1700 | return git_default_config(k, v, ctx, cb); | |
1701 | } | |
1702 | ||
1703 | static int cmp_uint32(const void *a_, const void *b_) | |
1704 | { | |
1705 | uint32_t a = *((uint32_t *)a_); | |
1706 | uint32_t b = *((uint32_t *)b_); | |
1707 | ||
1708 | return (a < b) ? -1 : (a != b); | |
1709 | } | |
1710 | ||
1711 | static void read_v2_anomalous_offsets(struct packed_git *p, | |
1712 | struct pack_idx_option *opts) | |
1713 | { | |
1714 | const uint32_t *idx1, *idx2; | |
1715 | uint32_t i; | |
1716 | ||
1717 | /* The address of the 4-byte offset table */ | |
1718 | idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset)) | |
1719 | + (size_t)p->num_objects /* CRC32 table */ | |
1720 | ); | |
1721 | ||
1722 | /* The address of the 8-byte offset table */ | |
1723 | idx2 = idx1 + p->num_objects; | |
1724 | ||
1725 | for (i = 0; i < p->num_objects; i++) { | |
1726 | uint32_t off = ntohl(idx1[i]); | |
1727 | if (!(off & 0x80000000)) | |
1728 | continue; | |
1729 | off = off & 0x7fffffff; | |
1730 | check_pack_index_ptr(p, &idx2[off * 2]); | |
1731 | if (idx2[off * 2]) | |
1732 | continue; | |
1733 | /* | |
1734 | * The real offset is ntohl(idx2[off * 2]) in high 4 | |
1735 | * octets, and ntohl(idx2[off * 2 + 1]) in low 4 | |
1736 | * octets. But idx2[off * 2] is Zero!!! | |
1737 | */ | |
1738 | ALLOC_GROW(opts->anomaly, opts->anomaly_nr + 1, opts->anomaly_alloc); | |
1739 | opts->anomaly[opts->anomaly_nr++] = ntohl(idx2[off * 2 + 1]); | |
1740 | } | |
1741 | ||
1742 | QSORT(opts->anomaly, opts->anomaly_nr, cmp_uint32); | |
1743 | } | |
1744 | ||
1745 | static void read_idx_option(struct pack_idx_option *opts, const char *pack_name) | |
1746 | { | |
1747 | struct packed_git *p = add_packed_git(the_repository, pack_name, | |
1748 | strlen(pack_name), 1); | |
1749 | ||
1750 | if (!p) | |
1751 | die(_("Cannot open existing pack file '%s'"), pack_name); | |
1752 | if (open_pack_index(p)) | |
1753 | die(_("Cannot open existing pack idx file for '%s'"), pack_name); | |
1754 | ||
1755 | /* Read the attributes from the existing idx file */ | |
1756 | opts->version = p->index_version; | |
1757 | ||
1758 | if (opts->version == 2) | |
1759 | read_v2_anomalous_offsets(p, opts); | |
1760 | ||
1761 | /* | |
1762 | * Get rid of the idx file as we do not need it anymore. | |
1763 | * NEEDSWORK: extract this bit from free_pack_by_name() in | |
1764 | * object-file.c, perhaps? It shouldn't matter very much as we | |
1765 | * know we haven't installed this pack (hence we never have | |
1766 | * read anything from it). | |
1767 | */ | |
1768 | close_pack_index(p); | |
1769 | free(p); | |
1770 | } | |
1771 | ||
1772 | static void show_pack_info(int stat_only) | |
1773 | { | |
1774 | int i, baseobjects = nr_objects - nr_ref_deltas - nr_ofs_deltas; | |
1775 | unsigned long *chain_histogram = NULL; | |
1776 | ||
1777 | if (deepest_delta) | |
1778 | CALLOC_ARRAY(chain_histogram, deepest_delta); | |
1779 | ||
1780 | for (i = 0; i < nr_objects; i++) { | |
1781 | struct object_entry *obj = &objects[i]; | |
1782 | ||
1783 | if (is_delta_type(obj->type)) | |
1784 | chain_histogram[obj_stat[i].delta_depth - 1]++; | |
1785 | if (stat_only) | |
1786 | continue; | |
1787 | printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX, | |
1788 | oid_to_hex(&obj->idx.oid), | |
1789 | type_name(obj->real_type), (uintmax_t)obj->size, | |
1790 | (uintmax_t)(obj[1].idx.offset - obj->idx.offset), | |
1791 | (uintmax_t)obj->idx.offset); | |
1792 | if (is_delta_type(obj->type)) { | |
1793 | struct object_entry *bobj = &objects[obj_stat[i].base_object_no]; | |
1794 | printf(" %u %s", obj_stat[i].delta_depth, | |
1795 | oid_to_hex(&bobj->idx.oid)); | |
1796 | } | |
1797 | putchar('\n'); | |
1798 | } | |
1799 | ||
1800 | if (baseobjects) | |
1801 | printf_ln(Q_("non delta: %d object", | |
1802 | "non delta: %d objects", | |
1803 | baseobjects), | |
1804 | baseobjects); | |
1805 | for (i = 0; i < deepest_delta; i++) { | |
1806 | if (!chain_histogram[i]) | |
1807 | continue; | |
1808 | printf_ln(Q_("chain length = %d: %lu object", | |
1809 | "chain length = %d: %lu objects", | |
1810 | chain_histogram[i]), | |
1811 | i + 1, | |
1812 | chain_histogram[i]); | |
1813 | } | |
1814 | free(chain_histogram); | |
1815 | } | |
1816 | ||
1817 | static void repack_local_links(void) | |
1818 | { | |
1819 | struct child_process cmd = CHILD_PROCESS_INIT; | |
1820 | FILE *out; | |
1821 | struct strbuf line = STRBUF_INIT; | |
1822 | struct oidset_iter iter; | |
1823 | struct object_id *oid; | |
1824 | char *base_name = NULL; | |
1825 | ||
1826 | if (!oidset_size(&outgoing_links)) | |
1827 | return; | |
1828 | ||
1829 | oidset_iter_init(&outgoing_links, &iter); | |
1830 | while ((oid = oidset_iter_next(&iter))) { | |
1831 | struct object_info info = OBJECT_INFO_INIT; | |
1832 | if (oid_object_info_extended(the_repository, oid, &info, 0)) | |
1833 | /* Missing; assume it is a promisor object */ | |
1834 | continue; | |
1835 | if (info.whence == OI_PACKED && info.u.packed.pack->pack_promisor) | |
1836 | continue; | |
1837 | ||
1838 | if (!cmd.args.nr) { | |
1839 | base_name = mkpathdup( | |
1840 | "%s/pack/pack", | |
1841 | repo_get_object_directory(the_repository)); | |
1842 | strvec_push(&cmd.args, "pack-objects"); | |
1843 | strvec_push(&cmd.args, | |
1844 | "--exclude-promisor-objects-best-effort"); | |
1845 | strvec_push(&cmd.args, base_name); | |
1846 | cmd.git_cmd = 1; | |
1847 | cmd.in = -1; | |
1848 | cmd.out = -1; | |
1849 | if (start_command(&cmd)) | |
1850 | die(_("could not start pack-objects to repack local links")); | |
1851 | } | |
1852 | ||
1853 | if (write_in_full(cmd.in, oid_to_hex(oid), the_hash_algo->hexsz) < 0 || | |
1854 | write_in_full(cmd.in, "\n", 1) < 0) | |
1855 | die(_("failed to feed local object to pack-objects")); | |
1856 | } | |
1857 | ||
1858 | if (!cmd.args.nr) | |
1859 | return; | |
1860 | ||
1861 | close(cmd.in); | |
1862 | ||
1863 | out = xfdopen(cmd.out, "r"); | |
1864 | while (strbuf_getline_lf(&line, out) != EOF) { | |
1865 | unsigned char binary[GIT_MAX_RAWSZ]; | |
1866 | if (line.len != the_hash_algo->hexsz || | |
1867 | !hex_to_bytes(binary, line.buf, line.len)) | |
1868 | die(_("index-pack: Expecting full hex object ID lines only from pack-objects.")); | |
1869 | ||
1870 | /* | |
1871 | * pack-objects creates the .pack and .idx files, but not the | |
1872 | * .promisor file. Create the .promisor file, which is empty. | |
1873 | */ | |
1874 | write_special_file("promisor", "", NULL, binary, NULL); | |
1875 | } | |
1876 | ||
1877 | fclose(out); | |
1878 | if (finish_command(&cmd)) | |
1879 | die(_("could not finish pack-objects to repack local links")); | |
1880 | strbuf_release(&line); | |
1881 | free(base_name); | |
1882 | } | |
1883 | ||
1884 | int cmd_index_pack(int argc, | |
1885 | const char **argv, | |
1886 | const char *prefix, | |
1887 | struct repository *repo UNUSED) | |
1888 | { | |
1889 | int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index; | |
1890 | const char *curr_index; | |
1891 | char *curr_rev_index = NULL; | |
1892 | const char *index_name = NULL, *pack_name = NULL, *rev_index_name = NULL; | |
1893 | const char *keep_msg = NULL; | |
1894 | const char *promisor_msg = NULL; | |
1895 | struct strbuf index_name_buf = STRBUF_INIT; | |
1896 | struct strbuf rev_index_name_buf = STRBUF_INIT; | |
1897 | struct pack_idx_entry **idx_objects; | |
1898 | struct pack_idx_option opts; | |
1899 | unsigned char pack_hash[GIT_MAX_RAWSZ]; | |
1900 | unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */ | |
1901 | int report_end_of_input = 0; | |
1902 | int hash_algo = 0; | |
1903 | ||
1904 | /* | |
1905 | * index-pack never needs to fetch missing objects except when | |
1906 | * REF_DELTA bases are missing (which are explicitly handled). It only | |
1907 | * accesses the repo to do hash collision checks and to check which | |
1908 | * REF_DELTA bases need to be fetched. | |
1909 | */ | |
1910 | fetch_if_missing = 0; | |
1911 | ||
1912 | show_usage_if_asked(argc, argv, index_pack_usage); | |
1913 | ||
1914 | disable_replace_refs(); | |
1915 | fsck_options.walk = mark_link; | |
1916 | ||
1917 | reset_pack_idx_option(&opts); | |
1918 | opts.flags |= WRITE_REV; | |
1919 | git_config(git_index_pack_config, &opts); | |
1920 | if (prefix && chdir(prefix)) | |
1921 | die(_("Cannot come back to cwd")); | |
1922 | ||
1923 | if (git_env_bool(GIT_TEST_NO_WRITE_REV_INDEX, 0)) | |
1924 | rev_index = 0; | |
1925 | else | |
1926 | rev_index = !!(opts.flags & (WRITE_REV_VERIFY | WRITE_REV)); | |
1927 | ||
1928 | for (i = 1; i < argc; i++) { | |
1929 | const char *arg = argv[i]; | |
1930 | ||
1931 | if (*arg == '-') { | |
1932 | if (!strcmp(arg, "--stdin")) { | |
1933 | from_stdin = 1; | |
1934 | } else if (!strcmp(arg, "--fix-thin")) { | |
1935 | fix_thin_pack = 1; | |
1936 | } else if (skip_to_optional_arg(arg, "--strict", &arg)) { | |
1937 | strict = 1; | |
1938 | do_fsck_object = 1; | |
1939 | fsck_set_msg_types(&fsck_options, arg); | |
1940 | } else if (!strcmp(arg, "--check-self-contained-and-connected")) { | |
1941 | strict = 1; | |
1942 | check_self_contained_and_connected = 1; | |
1943 | } else if (skip_to_optional_arg(arg, "--fsck-objects", &arg)) { | |
1944 | do_fsck_object = 1; | |
1945 | fsck_set_msg_types(&fsck_options, arg); | |
1946 | } else if (!strcmp(arg, "--verify")) { | |
1947 | verify = 1; | |
1948 | } else if (!strcmp(arg, "--verify-stat")) { | |
1949 | verify = 1; | |
1950 | show_stat = 1; | |
1951 | } else if (!strcmp(arg, "--verify-stat-only")) { | |
1952 | verify = 1; | |
1953 | show_stat = 1; | |
1954 | stat_only = 1; | |
1955 | } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) { | |
1956 | ; /* nothing to do */ | |
1957 | } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) { | |
1958 | record_outgoing_links = 1; | |
1959 | } else if (starts_with(arg, "--threads=")) { | |
1960 | char *end; | |
1961 | nr_threads = strtoul(arg+10, &end, 0); | |
1962 | if (!arg[10] || *end || nr_threads < 0) | |
1963 | usage(index_pack_usage); | |
1964 | if (!HAVE_THREADS && nr_threads != 1) { | |
1965 | warning(_("no threads support, ignoring %s"), arg); | |
1966 | nr_threads = 1; | |
1967 | } | |
1968 | } else if (skip_prefix(arg, "--pack_header=", &arg)) { | |
1969 | if (parse_pack_header_option(arg, | |
1970 | input_buffer, | |
1971 | &input_len) < 0) | |
1972 | die(_("bad --pack_header: %s"), arg); | |
1973 | } else if (!strcmp(arg, "-v")) { | |
1974 | verbose = 1; | |
1975 | } else if (!strcmp(arg, "--progress-title")) { | |
1976 | if (progress_title || (i+1) >= argc) | |
1977 | usage(index_pack_usage); | |
1978 | progress_title = argv[++i]; | |
1979 | } else if (!strcmp(arg, "--show-resolving-progress")) { | |
1980 | show_resolving_progress = 1; | |
1981 | } else if (!strcmp(arg, "--report-end-of-input")) { | |
1982 | report_end_of_input = 1; | |
1983 | } else if (!strcmp(arg, "-o")) { | |
1984 | if (index_name || (i+1) >= argc) | |
1985 | usage(index_pack_usage); | |
1986 | index_name = argv[++i]; | |
1987 | } else if (starts_with(arg, "--index-version=")) { | |
1988 | char *c; | |
1989 | opts.version = strtoul(arg + 16, &c, 10); | |
1990 | if (opts.version > 2) | |
1991 | die(_("bad %s"), arg); | |
1992 | if (*c == ',') | |
1993 | opts.off32_limit = strtoul(c+1, &c, 0); | |
1994 | if (*c || opts.off32_limit & 0x80000000) | |
1995 | die(_("bad %s"), arg); | |
1996 | } else if (skip_prefix(arg, "--max-input-size=", &arg)) { | |
1997 | max_input_size = strtoumax(arg, NULL, 10); | |
1998 | } else if (skip_prefix(arg, "--object-format=", &arg)) { | |
1999 | hash_algo = hash_algo_by_name(arg); | |
2000 | if (hash_algo == GIT_HASH_UNKNOWN) | |
2001 | die(_("unknown hash algorithm '%s'"), arg); | |
2002 | repo_set_hash_algo(the_repository, hash_algo); | |
2003 | } else if (!strcmp(arg, "--rev-index")) { | |
2004 | rev_index = 1; | |
2005 | } else if (!strcmp(arg, "--no-rev-index")) { | |
2006 | rev_index = 0; | |
2007 | } else | |
2008 | usage(index_pack_usage); | |
2009 | continue; | |
2010 | } | |
2011 | ||
2012 | if (pack_name) | |
2013 | usage(index_pack_usage); | |
2014 | pack_name = arg; | |
2015 | } | |
2016 | ||
2017 | if (!pack_name && !from_stdin) | |
2018 | usage(index_pack_usage); | |
2019 | if (fix_thin_pack && !from_stdin) | |
2020 | die(_("the option '%s' requires '%s'"), "--fix-thin", "--stdin"); | |
2021 | if (promisor_msg && pack_name) | |
2022 | die(_("--promisor cannot be used with a pack name")); | |
2023 | if (from_stdin && !startup_info->have_repository) | |
2024 | die(_("--stdin requires a git repository")); | |
2025 | if (from_stdin && hash_algo) | |
2026 | die(_("options '%s' and '%s' cannot be used together"), "--object-format", "--stdin"); | |
2027 | if (!index_name && pack_name) | |
2028 | index_name = derive_filename(pack_name, "pack", "idx", &index_name_buf); | |
2029 | ||
2030 | /* | |
2031 | * Packfiles and indices do not carry enough information to be able to | |
2032 | * identify their object hash. So when we are neither in a repository | |
2033 | * nor has the user told us which object hash to use we have no other | |
2034 | * choice but to guess the object hash. | |
2035 | */ | |
2036 | if (!the_repository->hash_algo) | |
2037 | repo_set_hash_algo(the_repository, GIT_HASH_SHA1); | |
2038 | ||
2039 | opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY); | |
2040 | if (rev_index) { | |
2041 | opts.flags |= verify ? WRITE_REV_VERIFY : WRITE_REV; | |
2042 | if (index_name) | |
2043 | rev_index_name = derive_filename(index_name, | |
2044 | "idx", "rev", | |
2045 | &rev_index_name_buf); | |
2046 | } | |
2047 | ||
2048 | if (verify) { | |
2049 | if (!index_name) | |
2050 | die(_("--verify with no packfile name given")); | |
2051 | read_idx_option(&opts, index_name); | |
2052 | opts.flags |= WRITE_IDX_VERIFY | WRITE_IDX_STRICT; | |
2053 | } | |
2054 | if (strict) | |
2055 | opts.flags |= WRITE_IDX_STRICT; | |
2056 | ||
2057 | if (HAVE_THREADS && !nr_threads) { | |
2058 | nr_threads = online_cpus(); | |
2059 | /* | |
2060 | * Experiments show that going above 20 threads doesn't help, | |
2061 | * no matter how many cores you have. Below that, we tend to | |
2062 | * max at half the number of online_cpus(), presumably because | |
2063 | * half of those are hyperthreads rather than full cores. We'll | |
2064 | * never reduce the level below "3", though, to match a | |
2065 | * historical value that nobody complained about. | |
2066 | */ | |
2067 | if (nr_threads < 4) | |
2068 | ; /* too few cores to consider capping */ | |
2069 | else if (nr_threads < 6) | |
2070 | nr_threads = 3; /* historic cap */ | |
2071 | else if (nr_threads < 40) | |
2072 | nr_threads /= 2; | |
2073 | else | |
2074 | nr_threads = 20; /* hard cap */ | |
2075 | } | |
2076 | ||
2077 | curr_pack = open_pack_file(pack_name); | |
2078 | parse_pack_header(); | |
2079 | CALLOC_ARRAY(objects, st_add(nr_objects, 1)); | |
2080 | if (show_stat) | |
2081 | CALLOC_ARRAY(obj_stat, st_add(nr_objects, 1)); | |
2082 | CALLOC_ARRAY(ofs_deltas, nr_objects); | |
2083 | parse_pack_objects(pack_hash); | |
2084 | if (report_end_of_input) | |
2085 | write_in_full(2, "\0", 1); | |
2086 | resolve_deltas(&opts); | |
2087 | conclude_pack(fix_thin_pack, curr_pack, pack_hash); | |
2088 | free(ofs_deltas); | |
2089 | free(ref_deltas); | |
2090 | if (strict) | |
2091 | foreign_nr = check_objects(); | |
2092 | ||
2093 | if (show_stat) | |
2094 | show_pack_info(stat_only); | |
2095 | ||
2096 | ALLOC_ARRAY(idx_objects, nr_objects); | |
2097 | for (i = 0; i < nr_objects; i++) | |
2098 | idx_objects[i] = &objects[i].idx; | |
2099 | curr_index = write_idx_file(the_repository, index_name, idx_objects, | |
2100 | nr_objects, &opts, pack_hash); | |
2101 | if (rev_index) | |
2102 | curr_rev_index = write_rev_file(the_repository, rev_index_name, | |
2103 | idx_objects, nr_objects, | |
2104 | pack_hash, opts.flags); | |
2105 | free(idx_objects); | |
2106 | ||
2107 | if (!verify) | |
2108 | final(pack_name, curr_pack, | |
2109 | index_name, curr_index, | |
2110 | rev_index_name, curr_rev_index, | |
2111 | keep_msg, promisor_msg, | |
2112 | pack_hash); | |
2113 | else | |
2114 | close(input_fd); | |
2115 | ||
2116 | if (do_fsck_object && fsck_finish(&fsck_options)) | |
2117 | die(_("fsck error in pack objects")); | |
2118 | ||
2119 | free(opts.anomaly); | |
2120 | free(objects); | |
2121 | strbuf_release(&index_name_buf); | |
2122 | strbuf_release(&rev_index_name_buf); | |
2123 | if (!pack_name) | |
2124 | free((void *) curr_pack); | |
2125 | if (!index_name) | |
2126 | free((void *) curr_index); | |
2127 | free(curr_rev_index); | |
2128 | ||
2129 | repack_local_links(); | |
2130 | ||
2131 | /* | |
2132 | * Let the caller know this pack is not self contained | |
2133 | */ | |
2134 | if (check_self_contained_and_connected && foreign_nr) | |
2135 | return 1; | |
2136 | ||
2137 | return 0; | |
2138 | } |