]>
Commit | Line | Data |
---|---|---|
1 | #include "builtin.h" | |
2 | #include "cache.h" | |
3 | #include "repository.h" | |
4 | #include "config.h" | |
5 | #include "attr.h" | |
6 | #include "object.h" | |
7 | #include "blob.h" | |
8 | #include "commit.h" | |
9 | #include "tag.h" | |
10 | #include "tree.h" | |
11 | #include "delta.h" | |
12 | #include "pack.h" | |
13 | #include "pack-revindex.h" | |
14 | #include "csum-file.h" | |
15 | #include "tree-walk.h" | |
16 | #include "diff.h" | |
17 | #include "revision.h" | |
18 | #include "list-objects.h" | |
19 | #include "list-objects-filter.h" | |
20 | #include "list-objects-filter-options.h" | |
21 | #include "pack-objects.h" | |
22 | #include "progress.h" | |
23 | #include "refs.h" | |
24 | #include "streaming.h" | |
25 | #include "thread-utils.h" | |
26 | #include "pack-bitmap.h" | |
27 | #include "delta-islands.h" | |
28 | #include "reachable.h" | |
29 | #include "oid-array.h" | |
30 | #include "argv-array.h" | |
31 | #include "list.h" | |
32 | #include "packfile.h" | |
33 | #include "object-store.h" | |
34 | #include "dir.h" | |
35 | #include "midx.h" | |
36 | #include "trace2.h" | |
37 | #include "shallow.h" | |
38 | ||
39 | #define IN_PACK(obj) oe_in_pack(&to_pack, obj) | |
40 | #define SIZE(obj) oe_size(&to_pack, obj) | |
41 | #define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) | |
42 | #define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) | |
43 | #define DELTA(obj) oe_delta(&to_pack, obj) | |
44 | #define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) | |
45 | #define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) | |
46 | #define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) | |
47 | #define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid) | |
48 | #define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) | |
49 | #define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) | |
50 | #define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) | |
51 | ||
52 | static const char *pack_usage[] = { | |
53 | N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), | |
54 | N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), | |
55 | NULL | |
56 | }; | |
57 | ||
58 | /* | |
59 | * Objects we are going to pack are collected in the `to_pack` structure. | |
60 | * It contains an array (dynamically expanded) of the object data, and a map | |
61 | * that can resolve SHA1s to their position in the array. | |
62 | */ | |
63 | static struct packing_data to_pack; | |
64 | ||
65 | static struct pack_idx_entry **written_list; | |
66 | static uint32_t nr_result, nr_written, nr_seen; | |
67 | static struct bitmap_index *bitmap_git; | |
68 | static uint32_t write_layer; | |
69 | ||
70 | static int non_empty; | |
71 | static int reuse_delta = 1, reuse_object = 1; | |
72 | static int keep_unreachable, unpack_unreachable, include_tag; | |
73 | static timestamp_t unpack_unreachable_expiration; | |
74 | static int pack_loose_unreachable; | |
75 | static int local; | |
76 | static int have_non_local_packs; | |
77 | static int incremental; | |
78 | static int ignore_packed_keep_on_disk; | |
79 | static int ignore_packed_keep_in_core; | |
80 | static int allow_ofs_delta; | |
81 | static struct pack_idx_option pack_idx_opts; | |
82 | static const char *base_name; | |
83 | static int progress = 1; | |
84 | static int window = 10; | |
85 | static unsigned long pack_size_limit; | |
86 | static int depth = 50; | |
87 | static int delta_search_threads; | |
88 | static int pack_to_stdout; | |
89 | static int sparse; | |
90 | static int thin; | |
91 | static int num_preferred_base; | |
92 | static struct progress *progress_state; | |
93 | ||
94 | static struct packed_git *reuse_packfile; | |
95 | static uint32_t reuse_packfile_objects; | |
96 | static struct bitmap *reuse_packfile_bitmap; | |
97 | ||
98 | static int use_bitmap_index_default = 1; | |
99 | static int use_bitmap_index = -1; | |
100 | static int allow_pack_reuse = 1; | |
101 | static enum { | |
102 | WRITE_BITMAP_FALSE = 0, | |
103 | WRITE_BITMAP_QUIET, | |
104 | WRITE_BITMAP_TRUE, | |
105 | } write_bitmap_index; | |
106 | static uint16_t write_bitmap_options = BITMAP_OPT_HASH_CACHE; | |
107 | ||
108 | static int exclude_promisor_objects; | |
109 | ||
110 | static int use_delta_islands; | |
111 | ||
112 | static unsigned long delta_cache_size = 0; | |
113 | static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; | |
114 | static unsigned long cache_max_small_delta_size = 1000; | |
115 | ||
116 | static unsigned long window_memory_limit = 0; | |
117 | ||
118 | static struct list_objects_filter_options filter_options; | |
119 | ||
120 | enum missing_action { | |
121 | MA_ERROR = 0, /* fail if any missing objects are encountered */ | |
122 | MA_ALLOW_ANY, /* silently allow ALL missing objects */ | |
123 | MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ | |
124 | }; | |
125 | static enum missing_action arg_missing_action; | |
126 | static show_object_fn fn_show_object; | |
127 | ||
128 | /* | |
129 | * stats | |
130 | */ | |
131 | static uint32_t written, written_delta; | |
132 | static uint32_t reused, reused_delta; | |
133 | ||
134 | /* | |
135 | * Indexed commits | |
136 | */ | |
137 | static struct commit **indexed_commits; | |
138 | static unsigned int indexed_commits_nr; | |
139 | static unsigned int indexed_commits_alloc; | |
140 | ||
141 | static void index_commit_for_bitmap(struct commit *commit) | |
142 | { | |
143 | if (indexed_commits_nr >= indexed_commits_alloc) { | |
144 | indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; | |
145 | REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); | |
146 | } | |
147 | ||
148 | indexed_commits[indexed_commits_nr++] = commit; | |
149 | } | |
150 | ||
151 | static void *get_delta(struct object_entry *entry) | |
152 | { | |
153 | unsigned long size, base_size, delta_size; | |
154 | void *buf, *base_buf, *delta_buf; | |
155 | enum object_type type; | |
156 | ||
157 | buf = read_object_file(&entry->idx.oid, &type, &size); | |
158 | if (!buf) | |
159 | die(_("unable to read %s"), oid_to_hex(&entry->idx.oid)); | |
160 | base_buf = read_object_file(&DELTA(entry)->idx.oid, &type, | |
161 | &base_size); | |
162 | if (!base_buf) | |
163 | die("unable to read %s", | |
164 | oid_to_hex(&DELTA(entry)->idx.oid)); | |
165 | delta_buf = diff_delta(base_buf, base_size, | |
166 | buf, size, &delta_size, 0); | |
167 | /* | |
168 | * We successfully computed this delta once but dropped it for | |
169 | * memory reasons. Something is very wrong if this time we | |
170 | * recompute and create a different delta. | |
171 | */ | |
172 | if (!delta_buf || delta_size != DELTA_SIZE(entry)) | |
173 | BUG("delta size changed"); | |
174 | free(buf); | |
175 | free(base_buf); | |
176 | return delta_buf; | |
177 | } | |
178 | ||
179 | static unsigned long do_compress(void **pptr, unsigned long size) | |
180 | { | |
181 | git_zstream stream; | |
182 | void *in, *out; | |
183 | unsigned long maxsize; | |
184 | ||
185 | git_deflate_init(&stream, pack_compression_level); | |
186 | maxsize = git_deflate_bound(&stream, size); | |
187 | ||
188 | in = *pptr; | |
189 | out = xmalloc(maxsize); | |
190 | *pptr = out; | |
191 | ||
192 | stream.next_in = in; | |
193 | stream.avail_in = size; | |
194 | stream.next_out = out; | |
195 | stream.avail_out = maxsize; | |
196 | while (git_deflate(&stream, Z_FINISH) == Z_OK) | |
197 | ; /* nothing */ | |
198 | git_deflate_end(&stream); | |
199 | ||
200 | free(in); | |
201 | return stream.total_out; | |
202 | } | |
203 | ||
204 | static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, | |
205 | const struct object_id *oid) | |
206 | { | |
207 | git_zstream stream; | |
208 | unsigned char ibuf[1024 * 16]; | |
209 | unsigned char obuf[1024 * 16]; | |
210 | unsigned long olen = 0; | |
211 | ||
212 | git_deflate_init(&stream, pack_compression_level); | |
213 | ||
214 | for (;;) { | |
215 | ssize_t readlen; | |
216 | int zret = Z_OK; | |
217 | readlen = read_istream(st, ibuf, sizeof(ibuf)); | |
218 | if (readlen == -1) | |
219 | die(_("unable to read %s"), oid_to_hex(oid)); | |
220 | ||
221 | stream.next_in = ibuf; | |
222 | stream.avail_in = readlen; | |
223 | while ((stream.avail_in || readlen == 0) && | |
224 | (zret == Z_OK || zret == Z_BUF_ERROR)) { | |
225 | stream.next_out = obuf; | |
226 | stream.avail_out = sizeof(obuf); | |
227 | zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); | |
228 | hashwrite(f, obuf, stream.next_out - obuf); | |
229 | olen += stream.next_out - obuf; | |
230 | } | |
231 | if (stream.avail_in) | |
232 | die(_("deflate error (%d)"), zret); | |
233 | if (readlen == 0) { | |
234 | if (zret != Z_STREAM_END) | |
235 | die(_("deflate error (%d)"), zret); | |
236 | break; | |
237 | } | |
238 | } | |
239 | git_deflate_end(&stream); | |
240 | return olen; | |
241 | } | |
242 | ||
243 | /* | |
244 | * we are going to reuse the existing object data as is. make | |
245 | * sure it is not corrupt. | |
246 | */ | |
247 | static int check_pack_inflate(struct packed_git *p, | |
248 | struct pack_window **w_curs, | |
249 | off_t offset, | |
250 | off_t len, | |
251 | unsigned long expect) | |
252 | { | |
253 | git_zstream stream; | |
254 | unsigned char fakebuf[4096], *in; | |
255 | int st; | |
256 | ||
257 | memset(&stream, 0, sizeof(stream)); | |
258 | git_inflate_init(&stream); | |
259 | do { | |
260 | in = use_pack(p, w_curs, offset, &stream.avail_in); | |
261 | stream.next_in = in; | |
262 | stream.next_out = fakebuf; | |
263 | stream.avail_out = sizeof(fakebuf); | |
264 | st = git_inflate(&stream, Z_FINISH); | |
265 | offset += stream.next_in - in; | |
266 | } while (st == Z_OK || st == Z_BUF_ERROR); | |
267 | git_inflate_end(&stream); | |
268 | return (st == Z_STREAM_END && | |
269 | stream.total_out == expect && | |
270 | stream.total_in == len) ? 0 : -1; | |
271 | } | |
272 | ||
273 | static void copy_pack_data(struct hashfile *f, | |
274 | struct packed_git *p, | |
275 | struct pack_window **w_curs, | |
276 | off_t offset, | |
277 | off_t len) | |
278 | { | |
279 | unsigned char *in; | |
280 | unsigned long avail; | |
281 | ||
282 | while (len) { | |
283 | in = use_pack(p, w_curs, offset, &avail); | |
284 | if (avail > len) | |
285 | avail = (unsigned long)len; | |
286 | hashwrite(f, in, avail); | |
287 | offset += avail; | |
288 | len -= avail; | |
289 | } | |
290 | } | |
291 | ||
292 | /* Return 0 if we will bust the pack-size limit */ | |
293 | static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, | |
294 | unsigned long limit, int usable_delta) | |
295 | { | |
296 | unsigned long size, datalen; | |
297 | unsigned char header[MAX_PACK_OBJECT_HEADER], | |
298 | dheader[MAX_PACK_OBJECT_HEADER]; | |
299 | unsigned hdrlen; | |
300 | enum object_type type; | |
301 | void *buf; | |
302 | struct git_istream *st = NULL; | |
303 | const unsigned hashsz = the_hash_algo->rawsz; | |
304 | ||
305 | if (!usable_delta) { | |
306 | if (oe_type(entry) == OBJ_BLOB && | |
307 | oe_size_greater_than(&to_pack, entry, big_file_threshold) && | |
308 | (st = open_istream(the_repository, &entry->idx.oid, &type, | |
309 | &size, NULL)) != NULL) | |
310 | buf = NULL; | |
311 | else { | |
312 | buf = read_object_file(&entry->idx.oid, &type, &size); | |
313 | if (!buf) | |
314 | die(_("unable to read %s"), | |
315 | oid_to_hex(&entry->idx.oid)); | |
316 | } | |
317 | /* | |
318 | * make sure no cached delta data remains from a | |
319 | * previous attempt before a pack split occurred. | |
320 | */ | |
321 | FREE_AND_NULL(entry->delta_data); | |
322 | entry->z_delta_size = 0; | |
323 | } else if (entry->delta_data) { | |
324 | size = DELTA_SIZE(entry); | |
325 | buf = entry->delta_data; | |
326 | entry->delta_data = NULL; | |
327 | type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? | |
328 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
329 | } else { | |
330 | buf = get_delta(entry); | |
331 | size = DELTA_SIZE(entry); | |
332 | type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? | |
333 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
334 | } | |
335 | ||
336 | if (st) /* large blob case, just assume we don't compress well */ | |
337 | datalen = size; | |
338 | else if (entry->z_delta_size) | |
339 | datalen = entry->z_delta_size; | |
340 | else | |
341 | datalen = do_compress(&buf, size); | |
342 | ||
343 | /* | |
344 | * The object header is a byte of 'type' followed by zero or | |
345 | * more bytes of length. | |
346 | */ | |
347 | hdrlen = encode_in_pack_object_header(header, sizeof(header), | |
348 | type, size); | |
349 | ||
350 | if (type == OBJ_OFS_DELTA) { | |
351 | /* | |
352 | * Deltas with relative base contain an additional | |
353 | * encoding of the relative offset for the delta | |
354 | * base from this object's position in the pack. | |
355 | */ | |
356 | off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; | |
357 | unsigned pos = sizeof(dheader) - 1; | |
358 | dheader[pos] = ofs & 127; | |
359 | while (ofs >>= 7) | |
360 | dheader[--pos] = 128 | (--ofs & 127); | |
361 | if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { | |
362 | if (st) | |
363 | close_istream(st); | |
364 | free(buf); | |
365 | return 0; | |
366 | } | |
367 | hashwrite(f, header, hdrlen); | |
368 | hashwrite(f, dheader + pos, sizeof(dheader) - pos); | |
369 | hdrlen += sizeof(dheader) - pos; | |
370 | } else if (type == OBJ_REF_DELTA) { | |
371 | /* | |
372 | * Deltas with a base reference contain | |
373 | * additional bytes for the base object ID. | |
374 | */ | |
375 | if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { | |
376 | if (st) | |
377 | close_istream(st); | |
378 | free(buf); | |
379 | return 0; | |
380 | } | |
381 | hashwrite(f, header, hdrlen); | |
382 | hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); | |
383 | hdrlen += hashsz; | |
384 | } else { | |
385 | if (limit && hdrlen + datalen + hashsz >= limit) { | |
386 | if (st) | |
387 | close_istream(st); | |
388 | free(buf); | |
389 | return 0; | |
390 | } | |
391 | hashwrite(f, header, hdrlen); | |
392 | } | |
393 | if (st) { | |
394 | datalen = write_large_blob_data(st, f, &entry->idx.oid); | |
395 | close_istream(st); | |
396 | } else { | |
397 | hashwrite(f, buf, datalen); | |
398 | free(buf); | |
399 | } | |
400 | ||
401 | return hdrlen + datalen; | |
402 | } | |
403 | ||
404 | /* Return 0 if we will bust the pack-size limit */ | |
405 | static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, | |
406 | unsigned long limit, int usable_delta) | |
407 | { | |
408 | struct packed_git *p = IN_PACK(entry); | |
409 | struct pack_window *w_curs = NULL; | |
410 | struct revindex_entry *revidx; | |
411 | off_t offset; | |
412 | enum object_type type = oe_type(entry); | |
413 | off_t datalen; | |
414 | unsigned char header[MAX_PACK_OBJECT_HEADER], | |
415 | dheader[MAX_PACK_OBJECT_HEADER]; | |
416 | unsigned hdrlen; | |
417 | const unsigned hashsz = the_hash_algo->rawsz; | |
418 | unsigned long entry_size = SIZE(entry); | |
419 | ||
420 | if (DELTA(entry)) | |
421 | type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? | |
422 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
423 | hdrlen = encode_in_pack_object_header(header, sizeof(header), | |
424 | type, entry_size); | |
425 | ||
426 | offset = entry->in_pack_offset; | |
427 | revidx = find_pack_revindex(p, offset); | |
428 | datalen = revidx[1].offset - offset; | |
429 | if (!pack_to_stdout && p->index_version > 1 && | |
430 | check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { | |
431 | error(_("bad packed object CRC for %s"), | |
432 | oid_to_hex(&entry->idx.oid)); | |
433 | unuse_pack(&w_curs); | |
434 | return write_no_reuse_object(f, entry, limit, usable_delta); | |
435 | } | |
436 | ||
437 | offset += entry->in_pack_header_size; | |
438 | datalen -= entry->in_pack_header_size; | |
439 | ||
440 | if (!pack_to_stdout && p->index_version == 1 && | |
441 | check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { | |
442 | error(_("corrupt packed object for %s"), | |
443 | oid_to_hex(&entry->idx.oid)); | |
444 | unuse_pack(&w_curs); | |
445 | return write_no_reuse_object(f, entry, limit, usable_delta); | |
446 | } | |
447 | ||
448 | if (type == OBJ_OFS_DELTA) { | |
449 | off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; | |
450 | unsigned pos = sizeof(dheader) - 1; | |
451 | dheader[pos] = ofs & 127; | |
452 | while (ofs >>= 7) | |
453 | dheader[--pos] = 128 | (--ofs & 127); | |
454 | if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { | |
455 | unuse_pack(&w_curs); | |
456 | return 0; | |
457 | } | |
458 | hashwrite(f, header, hdrlen); | |
459 | hashwrite(f, dheader + pos, sizeof(dheader) - pos); | |
460 | hdrlen += sizeof(dheader) - pos; | |
461 | reused_delta++; | |
462 | } else if (type == OBJ_REF_DELTA) { | |
463 | if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { | |
464 | unuse_pack(&w_curs); | |
465 | return 0; | |
466 | } | |
467 | hashwrite(f, header, hdrlen); | |
468 | hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); | |
469 | hdrlen += hashsz; | |
470 | reused_delta++; | |
471 | } else { | |
472 | if (limit && hdrlen + datalen + hashsz >= limit) { | |
473 | unuse_pack(&w_curs); | |
474 | return 0; | |
475 | } | |
476 | hashwrite(f, header, hdrlen); | |
477 | } | |
478 | copy_pack_data(f, p, &w_curs, offset, datalen); | |
479 | unuse_pack(&w_curs); | |
480 | reused++; | |
481 | return hdrlen + datalen; | |
482 | } | |
483 | ||
484 | /* Return 0 if we will bust the pack-size limit */ | |
485 | static off_t write_object(struct hashfile *f, | |
486 | struct object_entry *entry, | |
487 | off_t write_offset) | |
488 | { | |
489 | unsigned long limit; | |
490 | off_t len; | |
491 | int usable_delta, to_reuse; | |
492 | ||
493 | if (!pack_to_stdout) | |
494 | crc32_begin(f); | |
495 | ||
496 | /* apply size limit if limited packsize and not first object */ | |
497 | if (!pack_size_limit || !nr_written) | |
498 | limit = 0; | |
499 | else if (pack_size_limit <= write_offset) | |
500 | /* | |
501 | * the earlier object did not fit the limit; avoid | |
502 | * mistaking this with unlimited (i.e. limit = 0). | |
503 | */ | |
504 | limit = 1; | |
505 | else | |
506 | limit = pack_size_limit - write_offset; | |
507 | ||
508 | if (!DELTA(entry)) | |
509 | usable_delta = 0; /* no delta */ | |
510 | else if (!pack_size_limit) | |
511 | usable_delta = 1; /* unlimited packfile */ | |
512 | else if (DELTA(entry)->idx.offset == (off_t)-1) | |
513 | usable_delta = 0; /* base was written to another pack */ | |
514 | else if (DELTA(entry)->idx.offset) | |
515 | usable_delta = 1; /* base already exists in this pack */ | |
516 | else | |
517 | usable_delta = 0; /* base could end up in another pack */ | |
518 | ||
519 | if (!reuse_object) | |
520 | to_reuse = 0; /* explicit */ | |
521 | else if (!IN_PACK(entry)) | |
522 | to_reuse = 0; /* can't reuse what we don't have */ | |
523 | else if (oe_type(entry) == OBJ_REF_DELTA || | |
524 | oe_type(entry) == OBJ_OFS_DELTA) | |
525 | /* check_object() decided it for us ... */ | |
526 | to_reuse = usable_delta; | |
527 | /* ... but pack split may override that */ | |
528 | else if (oe_type(entry) != entry->in_pack_type) | |
529 | to_reuse = 0; /* pack has delta which is unusable */ | |
530 | else if (DELTA(entry)) | |
531 | to_reuse = 0; /* we want to pack afresh */ | |
532 | else | |
533 | to_reuse = 1; /* we have it in-pack undeltified, | |
534 | * and we do not need to deltify it. | |
535 | */ | |
536 | ||
537 | if (!to_reuse) | |
538 | len = write_no_reuse_object(f, entry, limit, usable_delta); | |
539 | else | |
540 | len = write_reuse_object(f, entry, limit, usable_delta); | |
541 | if (!len) | |
542 | return 0; | |
543 | ||
544 | if (usable_delta) | |
545 | written_delta++; | |
546 | written++; | |
547 | if (!pack_to_stdout) | |
548 | entry->idx.crc32 = crc32_end(f); | |
549 | return len; | |
550 | } | |
551 | ||
552 | enum write_one_status { | |
553 | WRITE_ONE_SKIP = -1, /* already written */ | |
554 | WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */ | |
555 | WRITE_ONE_WRITTEN = 1, /* normal */ | |
556 | WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ | |
557 | }; | |
558 | ||
559 | static enum write_one_status write_one(struct hashfile *f, | |
560 | struct object_entry *e, | |
561 | off_t *offset) | |
562 | { | |
563 | off_t size; | |
564 | int recursing; | |
565 | ||
566 | /* | |
567 | * we set offset to 1 (which is an impossible value) to mark | |
568 | * the fact that this object is involved in "write its base | |
569 | * first before writing a deltified object" recursion. | |
570 | */ | |
571 | recursing = (e->idx.offset == 1); | |
572 | if (recursing) { | |
573 | warning(_("recursive delta detected for object %s"), | |
574 | oid_to_hex(&e->idx.oid)); | |
575 | return WRITE_ONE_RECURSIVE; | |
576 | } else if (e->idx.offset || e->preferred_base) { | |
577 | /* offset is non zero if object is written already. */ | |
578 | return WRITE_ONE_SKIP; | |
579 | } | |
580 | ||
581 | /* if we are deltified, write out base object first. */ | |
582 | if (DELTA(e)) { | |
583 | e->idx.offset = 1; /* now recurse */ | |
584 | switch (write_one(f, DELTA(e), offset)) { | |
585 | case WRITE_ONE_RECURSIVE: | |
586 | /* we cannot depend on this one */ | |
587 | SET_DELTA(e, NULL); | |
588 | break; | |
589 | default: | |
590 | break; | |
591 | case WRITE_ONE_BREAK: | |
592 | e->idx.offset = recursing; | |
593 | return WRITE_ONE_BREAK; | |
594 | } | |
595 | } | |
596 | ||
597 | e->idx.offset = *offset; | |
598 | size = write_object(f, e, *offset); | |
599 | if (!size) { | |
600 | e->idx.offset = recursing; | |
601 | return WRITE_ONE_BREAK; | |
602 | } | |
603 | written_list[nr_written++] = &e->idx; | |
604 | ||
605 | /* make sure off_t is sufficiently large not to wrap */ | |
606 | if (signed_add_overflows(*offset, size)) | |
607 | die(_("pack too large for current definition of off_t")); | |
608 | *offset += size; | |
609 | return WRITE_ONE_WRITTEN; | |
610 | } | |
611 | ||
612 | static int mark_tagged(const char *path, const struct object_id *oid, int flag, | |
613 | void *cb_data) | |
614 | { | |
615 | struct object_id peeled; | |
616 | struct object_entry *entry = packlist_find(&to_pack, oid); | |
617 | ||
618 | if (entry) | |
619 | entry->tagged = 1; | |
620 | if (!peel_ref(path, &peeled)) { | |
621 | entry = packlist_find(&to_pack, &peeled); | |
622 | if (entry) | |
623 | entry->tagged = 1; | |
624 | } | |
625 | return 0; | |
626 | } | |
627 | ||
628 | static inline void add_to_write_order(struct object_entry **wo, | |
629 | unsigned int *endp, | |
630 | struct object_entry *e) | |
631 | { | |
632 | if (e->filled || oe_layer(&to_pack, e) != write_layer) | |
633 | return; | |
634 | wo[(*endp)++] = e; | |
635 | e->filled = 1; | |
636 | } | |
637 | ||
638 | static void add_descendants_to_write_order(struct object_entry **wo, | |
639 | unsigned int *endp, | |
640 | struct object_entry *e) | |
641 | { | |
642 | int add_to_order = 1; | |
643 | while (e) { | |
644 | if (add_to_order) { | |
645 | struct object_entry *s; | |
646 | /* add this node... */ | |
647 | add_to_write_order(wo, endp, e); | |
648 | /* all its siblings... */ | |
649 | for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) { | |
650 | add_to_write_order(wo, endp, s); | |
651 | } | |
652 | } | |
653 | /* drop down a level to add left subtree nodes if possible */ | |
654 | if (DELTA_CHILD(e)) { | |
655 | add_to_order = 1; | |
656 | e = DELTA_CHILD(e); | |
657 | } else { | |
658 | add_to_order = 0; | |
659 | /* our sibling might have some children, it is next */ | |
660 | if (DELTA_SIBLING(e)) { | |
661 | e = DELTA_SIBLING(e); | |
662 | continue; | |
663 | } | |
664 | /* go back to our parent node */ | |
665 | e = DELTA(e); | |
666 | while (e && !DELTA_SIBLING(e)) { | |
667 | /* we're on the right side of a subtree, keep | |
668 | * going up until we can go right again */ | |
669 | e = DELTA(e); | |
670 | } | |
671 | if (!e) { | |
672 | /* done- we hit our original root node */ | |
673 | return; | |
674 | } | |
675 | /* pass it off to sibling at this level */ | |
676 | e = DELTA_SIBLING(e); | |
677 | } | |
678 | }; | |
679 | } | |
680 | ||
681 | static void add_family_to_write_order(struct object_entry **wo, | |
682 | unsigned int *endp, | |
683 | struct object_entry *e) | |
684 | { | |
685 | struct object_entry *root; | |
686 | ||
687 | for (root = e; DELTA(root); root = DELTA(root)) | |
688 | ; /* nothing */ | |
689 | add_descendants_to_write_order(wo, endp, root); | |
690 | } | |
691 | ||
692 | static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end) | |
693 | { | |
694 | unsigned int i, last_untagged; | |
695 | struct object_entry *objects = to_pack.objects; | |
696 | ||
697 | for (i = 0; i < to_pack.nr_objects; i++) { | |
698 | if (objects[i].tagged) | |
699 | break; | |
700 | add_to_write_order(wo, wo_end, &objects[i]); | |
701 | } | |
702 | last_untagged = i; | |
703 | ||
704 | /* | |
705 | * Then fill all the tagged tips. | |
706 | */ | |
707 | for (; i < to_pack.nr_objects; i++) { | |
708 | if (objects[i].tagged) | |
709 | add_to_write_order(wo, wo_end, &objects[i]); | |
710 | } | |
711 | ||
712 | /* | |
713 | * And then all remaining commits and tags. | |
714 | */ | |
715 | for (i = last_untagged; i < to_pack.nr_objects; i++) { | |
716 | if (oe_type(&objects[i]) != OBJ_COMMIT && | |
717 | oe_type(&objects[i]) != OBJ_TAG) | |
718 | continue; | |
719 | add_to_write_order(wo, wo_end, &objects[i]); | |
720 | } | |
721 | ||
722 | /* | |
723 | * And then all the trees. | |
724 | */ | |
725 | for (i = last_untagged; i < to_pack.nr_objects; i++) { | |
726 | if (oe_type(&objects[i]) != OBJ_TREE) | |
727 | continue; | |
728 | add_to_write_order(wo, wo_end, &objects[i]); | |
729 | } | |
730 | ||
731 | /* | |
732 | * Finally all the rest in really tight order | |
733 | */ | |
734 | for (i = last_untagged; i < to_pack.nr_objects; i++) { | |
735 | if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer) | |
736 | add_family_to_write_order(wo, wo_end, &objects[i]); | |
737 | } | |
738 | } | |
739 | ||
740 | static struct object_entry **compute_write_order(void) | |
741 | { | |
742 | uint32_t max_layers = 1; | |
743 | unsigned int i, wo_end; | |
744 | ||
745 | struct object_entry **wo; | |
746 | struct object_entry *objects = to_pack.objects; | |
747 | ||
748 | for (i = 0; i < to_pack.nr_objects; i++) { | |
749 | objects[i].tagged = 0; | |
750 | objects[i].filled = 0; | |
751 | SET_DELTA_CHILD(&objects[i], NULL); | |
752 | SET_DELTA_SIBLING(&objects[i], NULL); | |
753 | } | |
754 | ||
755 | /* | |
756 | * Fully connect delta_child/delta_sibling network. | |
757 | * Make sure delta_sibling is sorted in the original | |
758 | * recency order. | |
759 | */ | |
760 | for (i = to_pack.nr_objects; i > 0;) { | |
761 | struct object_entry *e = &objects[--i]; | |
762 | if (!DELTA(e)) | |
763 | continue; | |
764 | /* Mark me as the first child */ | |
765 | e->delta_sibling_idx = DELTA(e)->delta_child_idx; | |
766 | SET_DELTA_CHILD(DELTA(e), e); | |
767 | } | |
768 | ||
769 | /* | |
770 | * Mark objects that are at the tip of tags. | |
771 | */ | |
772 | for_each_tag_ref(mark_tagged, NULL); | |
773 | ||
774 | if (use_delta_islands) | |
775 | max_layers = compute_pack_layers(&to_pack); | |
776 | ||
777 | ALLOC_ARRAY(wo, to_pack.nr_objects); | |
778 | wo_end = 0; | |
779 | ||
780 | for (; write_layer < max_layers; ++write_layer) | |
781 | compute_layer_order(wo, &wo_end); | |
782 | ||
783 | if (wo_end != to_pack.nr_objects) | |
784 | die(_("ordered %u objects, expected %"PRIu32), | |
785 | wo_end, to_pack.nr_objects); | |
786 | ||
787 | return wo; | |
788 | } | |
789 | ||
790 | ||
791 | /* | |
792 | * A reused set of objects. All objects in a chunk have the same | |
793 | * relative position in the original packfile and the generated | |
794 | * packfile. | |
795 | */ | |
796 | ||
797 | static struct reused_chunk { | |
798 | /* The offset of the first object of this chunk in the original | |
799 | * packfile. */ | |
800 | off_t original; | |
801 | /* The offset of the first object of this chunk in the generated | |
802 | * packfile minus "original". */ | |
803 | off_t difference; | |
804 | } *reused_chunks; | |
805 | static int reused_chunks_nr; | |
806 | static int reused_chunks_alloc; | |
807 | ||
808 | static void record_reused_object(off_t where, off_t offset) | |
809 | { | |
810 | if (reused_chunks_nr && reused_chunks[reused_chunks_nr-1].difference == offset) | |
811 | return; | |
812 | ||
813 | ALLOC_GROW(reused_chunks, reused_chunks_nr + 1, | |
814 | reused_chunks_alloc); | |
815 | reused_chunks[reused_chunks_nr].original = where; | |
816 | reused_chunks[reused_chunks_nr].difference = offset; | |
817 | reused_chunks_nr++; | |
818 | } | |
819 | ||
820 | /* | |
821 | * Binary search to find the chunk that "where" is in. Note | |
822 | * that we're not looking for an exact match, just the first | |
823 | * chunk that contains it (which implicitly ends at the start | |
824 | * of the next chunk. | |
825 | */ | |
826 | static off_t find_reused_offset(off_t where) | |
827 | { | |
828 | int lo = 0, hi = reused_chunks_nr; | |
829 | while (lo < hi) { | |
830 | int mi = lo + ((hi - lo) / 2); | |
831 | if (where == reused_chunks[mi].original) | |
832 | return reused_chunks[mi].difference; | |
833 | if (where < reused_chunks[mi].original) | |
834 | hi = mi; | |
835 | else | |
836 | lo = mi + 1; | |
837 | } | |
838 | ||
839 | /* | |
840 | * The first chunk starts at zero, so we can't have gone below | |
841 | * there. | |
842 | */ | |
843 | assert(lo); | |
844 | return reused_chunks[lo-1].difference; | |
845 | } | |
846 | ||
847 | static void write_reused_pack_one(size_t pos, struct hashfile *out, | |
848 | struct pack_window **w_curs) | |
849 | { | |
850 | off_t offset, next, cur; | |
851 | enum object_type type; | |
852 | unsigned long size; | |
853 | ||
854 | offset = reuse_packfile->revindex[pos].offset; | |
855 | next = reuse_packfile->revindex[pos + 1].offset; | |
856 | ||
857 | record_reused_object(offset, offset - hashfile_total(out)); | |
858 | ||
859 | cur = offset; | |
860 | type = unpack_object_header(reuse_packfile, w_curs, &cur, &size); | |
861 | assert(type >= 0); | |
862 | ||
863 | if (type == OBJ_OFS_DELTA) { | |
864 | off_t base_offset; | |
865 | off_t fixup; | |
866 | ||
867 | unsigned char header[MAX_PACK_OBJECT_HEADER]; | |
868 | unsigned len; | |
869 | ||
870 | base_offset = get_delta_base(reuse_packfile, w_curs, &cur, type, offset); | |
871 | assert(base_offset != 0); | |
872 | ||
873 | /* Convert to REF_DELTA if we must... */ | |
874 | if (!allow_ofs_delta) { | |
875 | int base_pos = find_revindex_position(reuse_packfile, base_offset); | |
876 | struct object_id base_oid; | |
877 | ||
878 | nth_packed_object_id(&base_oid, reuse_packfile, | |
879 | reuse_packfile->revindex[base_pos].nr); | |
880 | ||
881 | len = encode_in_pack_object_header(header, sizeof(header), | |
882 | OBJ_REF_DELTA, size); | |
883 | hashwrite(out, header, len); | |
884 | hashwrite(out, base_oid.hash, the_hash_algo->rawsz); | |
885 | copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur); | |
886 | return; | |
887 | } | |
888 | ||
889 | /* Otherwise see if we need to rewrite the offset... */ | |
890 | fixup = find_reused_offset(offset) - | |
891 | find_reused_offset(base_offset); | |
892 | if (fixup) { | |
893 | unsigned char ofs_header[10]; | |
894 | unsigned i, ofs_len; | |
895 | off_t ofs = offset - base_offset - fixup; | |
896 | ||
897 | len = encode_in_pack_object_header(header, sizeof(header), | |
898 | OBJ_OFS_DELTA, size); | |
899 | ||
900 | i = sizeof(ofs_header) - 1; | |
901 | ofs_header[i] = ofs & 127; | |
902 | while (ofs >>= 7) | |
903 | ofs_header[--i] = 128 | (--ofs & 127); | |
904 | ||
905 | ofs_len = sizeof(ofs_header) - i; | |
906 | ||
907 | hashwrite(out, header, len); | |
908 | hashwrite(out, ofs_header + sizeof(ofs_header) - ofs_len, ofs_len); | |
909 | copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur); | |
910 | return; | |
911 | } | |
912 | ||
913 | /* ...otherwise we have no fixup, and can write it verbatim */ | |
914 | } | |
915 | ||
916 | copy_pack_data(out, reuse_packfile, w_curs, offset, next - offset); | |
917 | } | |
918 | ||
919 | static size_t write_reused_pack_verbatim(struct hashfile *out, | |
920 | struct pack_window **w_curs) | |
921 | { | |
922 | size_t pos = 0; | |
923 | ||
924 | while (pos < reuse_packfile_bitmap->word_alloc && | |
925 | reuse_packfile_bitmap->words[pos] == (eword_t)~0) | |
926 | pos++; | |
927 | ||
928 | if (pos) { | |
929 | off_t to_write; | |
930 | ||
931 | written = (pos * BITS_IN_EWORD); | |
932 | to_write = reuse_packfile->revindex[written].offset | |
933 | - sizeof(struct pack_header); | |
934 | ||
935 | /* We're recording one chunk, not one object. */ | |
936 | record_reused_object(sizeof(struct pack_header), 0); | |
937 | hashflush(out); | |
938 | copy_pack_data(out, reuse_packfile, w_curs, | |
939 | sizeof(struct pack_header), to_write); | |
940 | ||
941 | display_progress(progress_state, written); | |
942 | } | |
943 | return pos; | |
944 | } | |
945 | ||
946 | static void write_reused_pack(struct hashfile *f) | |
947 | { | |
948 | size_t i = 0; | |
949 | uint32_t offset; | |
950 | struct pack_window *w_curs = NULL; | |
951 | ||
952 | if (allow_ofs_delta) | |
953 | i = write_reused_pack_verbatim(f, &w_curs); | |
954 | ||
955 | for (; i < reuse_packfile_bitmap->word_alloc; ++i) { | |
956 | eword_t word = reuse_packfile_bitmap->words[i]; | |
957 | size_t pos = (i * BITS_IN_EWORD); | |
958 | ||
959 | for (offset = 0; offset < BITS_IN_EWORD; ++offset) { | |
960 | if ((word >> offset) == 0) | |
961 | break; | |
962 | ||
963 | offset += ewah_bit_ctz64(word >> offset); | |
964 | write_reused_pack_one(pos + offset, f, &w_curs); | |
965 | display_progress(progress_state, ++written); | |
966 | } | |
967 | } | |
968 | ||
969 | unuse_pack(&w_curs); | |
970 | } | |
971 | ||
972 | static const char no_split_warning[] = N_( | |
973 | "disabling bitmap writing, packs are split due to pack.packSizeLimit" | |
974 | ); | |
975 | ||
976 | static void write_pack_file(void) | |
977 | { | |
978 | uint32_t i = 0, j; | |
979 | struct hashfile *f; | |
980 | off_t offset; | |
981 | uint32_t nr_remaining = nr_result; | |
982 | time_t last_mtime = 0; | |
983 | struct object_entry **write_order; | |
984 | ||
985 | if (progress > pack_to_stdout) | |
986 | progress_state = start_progress(_("Writing objects"), nr_result); | |
987 | ALLOC_ARRAY(written_list, to_pack.nr_objects); | |
988 | write_order = compute_write_order(); | |
989 | ||
990 | do { | |
991 | struct object_id oid; | |
992 | char *pack_tmp_name = NULL; | |
993 | ||
994 | if (pack_to_stdout) | |
995 | f = hashfd_throughput(1, "<stdout>", progress_state); | |
996 | else | |
997 | f = create_tmp_packfile(&pack_tmp_name); | |
998 | ||
999 | offset = write_pack_header(f, nr_remaining); | |
1000 | ||
1001 | if (reuse_packfile) { | |
1002 | assert(pack_to_stdout); | |
1003 | write_reused_pack(f); | |
1004 | offset = hashfile_total(f); | |
1005 | } | |
1006 | ||
1007 | nr_written = 0; | |
1008 | for (; i < to_pack.nr_objects; i++) { | |
1009 | struct object_entry *e = write_order[i]; | |
1010 | if (write_one(f, e, &offset) == WRITE_ONE_BREAK) | |
1011 | break; | |
1012 | display_progress(progress_state, written); | |
1013 | } | |
1014 | ||
1015 | /* | |
1016 | * Did we write the wrong # entries in the header? | |
1017 | * If so, rewrite it like in fast-import | |
1018 | */ | |
1019 | if (pack_to_stdout) { | |
1020 | finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); | |
1021 | } else if (nr_written == nr_remaining) { | |
1022 | finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); | |
1023 | } else { | |
1024 | int fd = finalize_hashfile(f, oid.hash, 0); | |
1025 | fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, | |
1026 | nr_written, oid.hash, offset); | |
1027 | close(fd); | |
1028 | if (write_bitmap_index) { | |
1029 | if (write_bitmap_index != WRITE_BITMAP_QUIET) | |
1030 | warning(_(no_split_warning)); | |
1031 | write_bitmap_index = 0; | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | if (!pack_to_stdout) { | |
1036 | struct stat st; | |
1037 | struct strbuf tmpname = STRBUF_INIT; | |
1038 | ||
1039 | /* | |
1040 | * Packs are runtime accessed in their mtime | |
1041 | * order since newer packs are more likely to contain | |
1042 | * younger objects. So if we are creating multiple | |
1043 | * packs then we should modify the mtime of later ones | |
1044 | * to preserve this property. | |
1045 | */ | |
1046 | if (stat(pack_tmp_name, &st) < 0) { | |
1047 | warning_errno(_("failed to stat %s"), pack_tmp_name); | |
1048 | } else if (!last_mtime) { | |
1049 | last_mtime = st.st_mtime; | |
1050 | } else { | |
1051 | struct utimbuf utb; | |
1052 | utb.actime = st.st_atime; | |
1053 | utb.modtime = --last_mtime; | |
1054 | if (utime(pack_tmp_name, &utb) < 0) | |
1055 | warning_errno(_("failed utime() on %s"), pack_tmp_name); | |
1056 | } | |
1057 | ||
1058 | strbuf_addf(&tmpname, "%s-", base_name); | |
1059 | ||
1060 | if (write_bitmap_index) { | |
1061 | bitmap_writer_set_checksum(oid.hash); | |
1062 | bitmap_writer_build_type_index( | |
1063 | &to_pack, written_list, nr_written); | |
1064 | } | |
1065 | ||
1066 | finish_tmp_packfile(&tmpname, pack_tmp_name, | |
1067 | written_list, nr_written, | |
1068 | &pack_idx_opts, oid.hash); | |
1069 | ||
1070 | if (write_bitmap_index) { | |
1071 | strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid)); | |
1072 | ||
1073 | stop_progress(&progress_state); | |
1074 | ||
1075 | bitmap_writer_show_progress(progress); | |
1076 | bitmap_writer_reuse_bitmaps(&to_pack); | |
1077 | bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); | |
1078 | bitmap_writer_build(&to_pack); | |
1079 | bitmap_writer_finish(written_list, nr_written, | |
1080 | tmpname.buf, write_bitmap_options); | |
1081 | write_bitmap_index = 0; | |
1082 | } | |
1083 | ||
1084 | strbuf_release(&tmpname); | |
1085 | free(pack_tmp_name); | |
1086 | puts(oid_to_hex(&oid)); | |
1087 | } | |
1088 | ||
1089 | /* mark written objects as written to previous pack */ | |
1090 | for (j = 0; j < nr_written; j++) { | |
1091 | written_list[j]->offset = (off_t)-1; | |
1092 | } | |
1093 | nr_remaining -= nr_written; | |
1094 | } while (nr_remaining && i < to_pack.nr_objects); | |
1095 | ||
1096 | free(written_list); | |
1097 | free(write_order); | |
1098 | stop_progress(&progress_state); | |
1099 | if (written != nr_result) | |
1100 | die(_("wrote %"PRIu32" objects while expecting %"PRIu32), | |
1101 | written, nr_result); | |
1102 | trace2_data_intmax("pack-objects", the_repository, | |
1103 | "write_pack_file/wrote", nr_result); | |
1104 | } | |
1105 | ||
1106 | static int no_try_delta(const char *path) | |
1107 | { | |
1108 | static struct attr_check *check; | |
1109 | ||
1110 | if (!check) | |
1111 | check = attr_check_initl("delta", NULL); | |
1112 | git_check_attr(the_repository->index, path, check); | |
1113 | if (ATTR_FALSE(check->items[0].value)) | |
1114 | return 1; | |
1115 | return 0; | |
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * When adding an object, check whether we have already added it | |
1120 | * to our packing list. If so, we can skip. However, if we are | |
1121 | * being asked to excludei t, but the previous mention was to include | |
1122 | * it, make sure to adjust its flags and tweak our numbers accordingly. | |
1123 | * | |
1124 | * As an optimization, we pass out the index position where we would have | |
1125 | * found the item, since that saves us from having to look it up again a | |
1126 | * few lines later when we want to add the new entry. | |
1127 | */ | |
1128 | static int have_duplicate_entry(const struct object_id *oid, | |
1129 | int exclude) | |
1130 | { | |
1131 | struct object_entry *entry; | |
1132 | ||
1133 | if (reuse_packfile_bitmap && | |
1134 | bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid)) | |
1135 | return 1; | |
1136 | ||
1137 | entry = packlist_find(&to_pack, oid); | |
1138 | if (!entry) | |
1139 | return 0; | |
1140 | ||
1141 | if (exclude) { | |
1142 | if (!entry->preferred_base) | |
1143 | nr_result--; | |
1144 | entry->preferred_base = 1; | |
1145 | } | |
1146 | ||
1147 | return 1; | |
1148 | } | |
1149 | ||
1150 | static int want_found_object(int exclude, struct packed_git *p) | |
1151 | { | |
1152 | if (exclude) | |
1153 | return 1; | |
1154 | if (incremental) | |
1155 | return 0; | |
1156 | ||
1157 | /* | |
1158 | * When asked to do --local (do not include an object that appears in a | |
1159 | * pack we borrow from elsewhere) or --honor-pack-keep (do not include | |
1160 | * an object that appears in a pack marked with .keep), finding a pack | |
1161 | * that matches the criteria is sufficient for us to decide to omit it. | |
1162 | * However, even if this pack does not satisfy the criteria, we need to | |
1163 | * make sure no copy of this object appears in _any_ pack that makes us | |
1164 | * to omit the object, so we need to check all the packs. | |
1165 | * | |
1166 | * We can however first check whether these options can possible matter; | |
1167 | * if they do not matter we know we want the object in generated pack. | |
1168 | * Otherwise, we signal "-1" at the end to tell the caller that we do | |
1169 | * not know either way, and it needs to check more packs. | |
1170 | */ | |
1171 | if (!ignore_packed_keep_on_disk && | |
1172 | !ignore_packed_keep_in_core && | |
1173 | (!local || !have_non_local_packs)) | |
1174 | return 1; | |
1175 | ||
1176 | if (local && !p->pack_local) | |
1177 | return 0; | |
1178 | if (p->pack_local && | |
1179 | ((ignore_packed_keep_on_disk && p->pack_keep) || | |
1180 | (ignore_packed_keep_in_core && p->pack_keep_in_core))) | |
1181 | return 0; | |
1182 | ||
1183 | /* we don't know yet; keep looking for more packs */ | |
1184 | return -1; | |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * Check whether we want the object in the pack (e.g., we do not want | |
1189 | * objects found in non-local stores if the "--local" option was used). | |
1190 | * | |
1191 | * If the caller already knows an existing pack it wants to take the object | |
1192 | * from, that is passed in *found_pack and *found_offset; otherwise this | |
1193 | * function finds if there is any pack that has the object and returns the pack | |
1194 | * and its offset in these variables. | |
1195 | */ | |
1196 | static int want_object_in_pack(const struct object_id *oid, | |
1197 | int exclude, | |
1198 | struct packed_git **found_pack, | |
1199 | off_t *found_offset) | |
1200 | { | |
1201 | int want; | |
1202 | struct list_head *pos; | |
1203 | struct multi_pack_index *m; | |
1204 | ||
1205 | if (!exclude && local && has_loose_object_nonlocal(oid)) | |
1206 | return 0; | |
1207 | ||
1208 | /* | |
1209 | * If we already know the pack object lives in, start checks from that | |
1210 | * pack - in the usual case when neither --local was given nor .keep files | |
1211 | * are present we will determine the answer right now. | |
1212 | */ | |
1213 | if (*found_pack) { | |
1214 | want = want_found_object(exclude, *found_pack); | |
1215 | if (want != -1) | |
1216 | return want; | |
1217 | } | |
1218 | ||
1219 | for (m = get_multi_pack_index(the_repository); m; m = m->next) { | |
1220 | struct pack_entry e; | |
1221 | if (fill_midx_entry(the_repository, oid, &e, m)) { | |
1222 | struct packed_git *p = e.p; | |
1223 | off_t offset; | |
1224 | ||
1225 | if (p == *found_pack) | |
1226 | offset = *found_offset; | |
1227 | else | |
1228 | offset = find_pack_entry_one(oid->hash, p); | |
1229 | ||
1230 | if (offset) { | |
1231 | if (!*found_pack) { | |
1232 | if (!is_pack_valid(p)) | |
1233 | continue; | |
1234 | *found_offset = offset; | |
1235 | *found_pack = p; | |
1236 | } | |
1237 | want = want_found_object(exclude, p); | |
1238 | if (want != -1) | |
1239 | return want; | |
1240 | } | |
1241 | } | |
1242 | } | |
1243 | ||
1244 | list_for_each(pos, get_packed_git_mru(the_repository)) { | |
1245 | struct packed_git *p = list_entry(pos, struct packed_git, mru); | |
1246 | off_t offset; | |
1247 | ||
1248 | if (p == *found_pack) | |
1249 | offset = *found_offset; | |
1250 | else | |
1251 | offset = find_pack_entry_one(oid->hash, p); | |
1252 | ||
1253 | if (offset) { | |
1254 | if (!*found_pack) { | |
1255 | if (!is_pack_valid(p)) | |
1256 | continue; | |
1257 | *found_offset = offset; | |
1258 | *found_pack = p; | |
1259 | } | |
1260 | want = want_found_object(exclude, p); | |
1261 | if (!exclude && want > 0) | |
1262 | list_move(&p->mru, | |
1263 | get_packed_git_mru(the_repository)); | |
1264 | if (want != -1) | |
1265 | return want; | |
1266 | } | |
1267 | } | |
1268 | ||
1269 | return 1; | |
1270 | } | |
1271 | ||
1272 | static void create_object_entry(const struct object_id *oid, | |
1273 | enum object_type type, | |
1274 | uint32_t hash, | |
1275 | int exclude, | |
1276 | int no_try_delta, | |
1277 | struct packed_git *found_pack, | |
1278 | off_t found_offset) | |
1279 | { | |
1280 | struct object_entry *entry; | |
1281 | ||
1282 | entry = packlist_alloc(&to_pack, oid); | |
1283 | entry->hash = hash; | |
1284 | oe_set_type(entry, type); | |
1285 | if (exclude) | |
1286 | entry->preferred_base = 1; | |
1287 | else | |
1288 | nr_result++; | |
1289 | if (found_pack) { | |
1290 | oe_set_in_pack(&to_pack, entry, found_pack); | |
1291 | entry->in_pack_offset = found_offset; | |
1292 | } | |
1293 | ||
1294 | entry->no_try_delta = no_try_delta; | |
1295 | } | |
1296 | ||
1297 | static const char no_closure_warning[] = N_( | |
1298 | "disabling bitmap writing, as some objects are not being packed" | |
1299 | ); | |
1300 | ||
1301 | static int add_object_entry(const struct object_id *oid, enum object_type type, | |
1302 | const char *name, int exclude) | |
1303 | { | |
1304 | struct packed_git *found_pack = NULL; | |
1305 | off_t found_offset = 0; | |
1306 | ||
1307 | display_progress(progress_state, ++nr_seen); | |
1308 | ||
1309 | if (have_duplicate_entry(oid, exclude)) | |
1310 | return 0; | |
1311 | ||
1312 | if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) { | |
1313 | /* The pack is missing an object, so it will not have closure */ | |
1314 | if (write_bitmap_index) { | |
1315 | if (write_bitmap_index != WRITE_BITMAP_QUIET) | |
1316 | warning(_(no_closure_warning)); | |
1317 | write_bitmap_index = 0; | |
1318 | } | |
1319 | return 0; | |
1320 | } | |
1321 | ||
1322 | create_object_entry(oid, type, pack_name_hash(name), | |
1323 | exclude, name && no_try_delta(name), | |
1324 | found_pack, found_offset); | |
1325 | return 1; | |
1326 | } | |
1327 | ||
1328 | static int add_object_entry_from_bitmap(const struct object_id *oid, | |
1329 | enum object_type type, | |
1330 | int flags, uint32_t name_hash, | |
1331 | struct packed_git *pack, off_t offset) | |
1332 | { | |
1333 | display_progress(progress_state, ++nr_seen); | |
1334 | ||
1335 | if (have_duplicate_entry(oid, 0)) | |
1336 | return 0; | |
1337 | ||
1338 | if (!want_object_in_pack(oid, 0, &pack, &offset)) | |
1339 | return 0; | |
1340 | ||
1341 | create_object_entry(oid, type, name_hash, 0, 0, pack, offset); | |
1342 | return 1; | |
1343 | } | |
1344 | ||
1345 | struct pbase_tree_cache { | |
1346 | struct object_id oid; | |
1347 | int ref; | |
1348 | int temporary; | |
1349 | void *tree_data; | |
1350 | unsigned long tree_size; | |
1351 | }; | |
1352 | ||
1353 | static struct pbase_tree_cache *(pbase_tree_cache[256]); | |
1354 | static int pbase_tree_cache_ix(const struct object_id *oid) | |
1355 | { | |
1356 | return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache); | |
1357 | } | |
1358 | static int pbase_tree_cache_ix_incr(int ix) | |
1359 | { | |
1360 | return (ix+1) % ARRAY_SIZE(pbase_tree_cache); | |
1361 | } | |
1362 | ||
1363 | static struct pbase_tree { | |
1364 | struct pbase_tree *next; | |
1365 | /* This is a phony "cache" entry; we are not | |
1366 | * going to evict it or find it through _get() | |
1367 | * mechanism -- this is for the toplevel node that | |
1368 | * would almost always change with any commit. | |
1369 | */ | |
1370 | struct pbase_tree_cache pcache; | |
1371 | } *pbase_tree; | |
1372 | ||
1373 | static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid) | |
1374 | { | |
1375 | struct pbase_tree_cache *ent, *nent; | |
1376 | void *data; | |
1377 | unsigned long size; | |
1378 | enum object_type type; | |
1379 | int neigh; | |
1380 | int my_ix = pbase_tree_cache_ix(oid); | |
1381 | int available_ix = -1; | |
1382 | ||
1383 | /* pbase-tree-cache acts as a limited hashtable. | |
1384 | * your object will be found at your index or within a few | |
1385 | * slots after that slot if it is cached. | |
1386 | */ | |
1387 | for (neigh = 0; neigh < 8; neigh++) { | |
1388 | ent = pbase_tree_cache[my_ix]; | |
1389 | if (ent && oideq(&ent->oid, oid)) { | |
1390 | ent->ref++; | |
1391 | return ent; | |
1392 | } | |
1393 | else if (((available_ix < 0) && (!ent || !ent->ref)) || | |
1394 | ((0 <= available_ix) && | |
1395 | (!ent && pbase_tree_cache[available_ix]))) | |
1396 | available_ix = my_ix; | |
1397 | if (!ent) | |
1398 | break; | |
1399 | my_ix = pbase_tree_cache_ix_incr(my_ix); | |
1400 | } | |
1401 | ||
1402 | /* Did not find one. Either we got a bogus request or | |
1403 | * we need to read and perhaps cache. | |
1404 | */ | |
1405 | data = read_object_file(oid, &type, &size); | |
1406 | if (!data) | |
1407 | return NULL; | |
1408 | if (type != OBJ_TREE) { | |
1409 | free(data); | |
1410 | return NULL; | |
1411 | } | |
1412 | ||
1413 | /* We need to either cache or return a throwaway copy */ | |
1414 | ||
1415 | if (available_ix < 0) | |
1416 | ent = NULL; | |
1417 | else { | |
1418 | ent = pbase_tree_cache[available_ix]; | |
1419 | my_ix = available_ix; | |
1420 | } | |
1421 | ||
1422 | if (!ent) { | |
1423 | nent = xmalloc(sizeof(*nent)); | |
1424 | nent->temporary = (available_ix < 0); | |
1425 | } | |
1426 | else { | |
1427 | /* evict and reuse */ | |
1428 | free(ent->tree_data); | |
1429 | nent = ent; | |
1430 | } | |
1431 | oidcpy(&nent->oid, oid); | |
1432 | nent->tree_data = data; | |
1433 | nent->tree_size = size; | |
1434 | nent->ref = 1; | |
1435 | if (!nent->temporary) | |
1436 | pbase_tree_cache[my_ix] = nent; | |
1437 | return nent; | |
1438 | } | |
1439 | ||
1440 | static void pbase_tree_put(struct pbase_tree_cache *cache) | |
1441 | { | |
1442 | if (!cache->temporary) { | |
1443 | cache->ref--; | |
1444 | return; | |
1445 | } | |
1446 | free(cache->tree_data); | |
1447 | free(cache); | |
1448 | } | |
1449 | ||
1450 | static int name_cmp_len(const char *name) | |
1451 | { | |
1452 | int i; | |
1453 | for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++) | |
1454 | ; | |
1455 | return i; | |
1456 | } | |
1457 | ||
1458 | static void add_pbase_object(struct tree_desc *tree, | |
1459 | const char *name, | |
1460 | int cmplen, | |
1461 | const char *fullname) | |
1462 | { | |
1463 | struct name_entry entry; | |
1464 | int cmp; | |
1465 | ||
1466 | while (tree_entry(tree,&entry)) { | |
1467 | if (S_ISGITLINK(entry.mode)) | |
1468 | continue; | |
1469 | cmp = tree_entry_len(&entry) != cmplen ? 1 : | |
1470 | memcmp(name, entry.path, cmplen); | |
1471 | if (cmp > 0) | |
1472 | continue; | |
1473 | if (cmp < 0) | |
1474 | return; | |
1475 | if (name[cmplen] != '/') { | |
1476 | add_object_entry(&entry.oid, | |
1477 | object_type(entry.mode), | |
1478 | fullname, 1); | |
1479 | return; | |
1480 | } | |
1481 | if (S_ISDIR(entry.mode)) { | |
1482 | struct tree_desc sub; | |
1483 | struct pbase_tree_cache *tree; | |
1484 | const char *down = name+cmplen+1; | |
1485 | int downlen = name_cmp_len(down); | |
1486 | ||
1487 | tree = pbase_tree_get(&entry.oid); | |
1488 | if (!tree) | |
1489 | return; | |
1490 | init_tree_desc(&sub, tree->tree_data, tree->tree_size); | |
1491 | ||
1492 | add_pbase_object(&sub, down, downlen, fullname); | |
1493 | pbase_tree_put(tree); | |
1494 | } | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | static unsigned *done_pbase_paths; | |
1499 | static int done_pbase_paths_num; | |
1500 | static int done_pbase_paths_alloc; | |
1501 | static int done_pbase_path_pos(unsigned hash) | |
1502 | { | |
1503 | int lo = 0; | |
1504 | int hi = done_pbase_paths_num; | |
1505 | while (lo < hi) { | |
1506 | int mi = lo + (hi - lo) / 2; | |
1507 | if (done_pbase_paths[mi] == hash) | |
1508 | return mi; | |
1509 | if (done_pbase_paths[mi] < hash) | |
1510 | hi = mi; | |
1511 | else | |
1512 | lo = mi + 1; | |
1513 | } | |
1514 | return -lo-1; | |
1515 | } | |
1516 | ||
1517 | static int check_pbase_path(unsigned hash) | |
1518 | { | |
1519 | int pos = done_pbase_path_pos(hash); | |
1520 | if (0 <= pos) | |
1521 | return 1; | |
1522 | pos = -pos - 1; | |
1523 | ALLOC_GROW(done_pbase_paths, | |
1524 | done_pbase_paths_num + 1, | |
1525 | done_pbase_paths_alloc); | |
1526 | done_pbase_paths_num++; | |
1527 | if (pos < done_pbase_paths_num) | |
1528 | MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos, | |
1529 | done_pbase_paths_num - pos - 1); | |
1530 | done_pbase_paths[pos] = hash; | |
1531 | return 0; | |
1532 | } | |
1533 | ||
1534 | static void add_preferred_base_object(const char *name) | |
1535 | { | |
1536 | struct pbase_tree *it; | |
1537 | int cmplen; | |
1538 | unsigned hash = pack_name_hash(name); | |
1539 | ||
1540 | if (!num_preferred_base || check_pbase_path(hash)) | |
1541 | return; | |
1542 | ||
1543 | cmplen = name_cmp_len(name); | |
1544 | for (it = pbase_tree; it; it = it->next) { | |
1545 | if (cmplen == 0) { | |
1546 | add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1); | |
1547 | } | |
1548 | else { | |
1549 | struct tree_desc tree; | |
1550 | init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size); | |
1551 | add_pbase_object(&tree, name, cmplen, name); | |
1552 | } | |
1553 | } | |
1554 | } | |
1555 | ||
1556 | static void add_preferred_base(struct object_id *oid) | |
1557 | { | |
1558 | struct pbase_tree *it; | |
1559 | void *data; | |
1560 | unsigned long size; | |
1561 | struct object_id tree_oid; | |
1562 | ||
1563 | if (window <= num_preferred_base++) | |
1564 | return; | |
1565 | ||
1566 | data = read_object_with_reference(the_repository, oid, | |
1567 | tree_type, &size, &tree_oid); | |
1568 | if (!data) | |
1569 | return; | |
1570 | ||
1571 | for (it = pbase_tree; it; it = it->next) { | |
1572 | if (oideq(&it->pcache.oid, &tree_oid)) { | |
1573 | free(data); | |
1574 | return; | |
1575 | } | |
1576 | } | |
1577 | ||
1578 | it = xcalloc(1, sizeof(*it)); | |
1579 | it->next = pbase_tree; | |
1580 | pbase_tree = it; | |
1581 | ||
1582 | oidcpy(&it->pcache.oid, &tree_oid); | |
1583 | it->pcache.tree_data = data; | |
1584 | it->pcache.tree_size = size; | |
1585 | } | |
1586 | ||
1587 | static void cleanup_preferred_base(void) | |
1588 | { | |
1589 | struct pbase_tree *it; | |
1590 | unsigned i; | |
1591 | ||
1592 | it = pbase_tree; | |
1593 | pbase_tree = NULL; | |
1594 | while (it) { | |
1595 | struct pbase_tree *tmp = it; | |
1596 | it = tmp->next; | |
1597 | free(tmp->pcache.tree_data); | |
1598 | free(tmp); | |
1599 | } | |
1600 | ||
1601 | for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) { | |
1602 | if (!pbase_tree_cache[i]) | |
1603 | continue; | |
1604 | free(pbase_tree_cache[i]->tree_data); | |
1605 | FREE_AND_NULL(pbase_tree_cache[i]); | |
1606 | } | |
1607 | ||
1608 | FREE_AND_NULL(done_pbase_paths); | |
1609 | done_pbase_paths_num = done_pbase_paths_alloc = 0; | |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * Return 1 iff the object specified by "delta" can be sent | |
1614 | * literally as a delta against the base in "base_sha1". If | |
1615 | * so, then *base_out will point to the entry in our packing | |
1616 | * list, or NULL if we must use the external-base list. | |
1617 | * | |
1618 | * Depth value does not matter - find_deltas() will | |
1619 | * never consider reused delta as the base object to | |
1620 | * deltify other objects against, in order to avoid | |
1621 | * circular deltas. | |
1622 | */ | |
1623 | static int can_reuse_delta(const struct object_id *base_oid, | |
1624 | struct object_entry *delta, | |
1625 | struct object_entry **base_out) | |
1626 | { | |
1627 | struct object_entry *base; | |
1628 | ||
1629 | /* | |
1630 | * First see if we're already sending the base (or it's explicitly in | |
1631 | * our "excluded" list). | |
1632 | */ | |
1633 | base = packlist_find(&to_pack, base_oid); | |
1634 | if (base) { | |
1635 | if (!in_same_island(&delta->idx.oid, &base->idx.oid)) | |
1636 | return 0; | |
1637 | *base_out = base; | |
1638 | return 1; | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * Otherwise, reachability bitmaps may tell us if the receiver has it, | |
1643 | * even if it was buried too deep in history to make it into the | |
1644 | * packing list. | |
1645 | */ | |
1646 | if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, base_oid)) { | |
1647 | if (use_delta_islands) { | |
1648 | if (!in_same_island(&delta->idx.oid, base_oid)) | |
1649 | return 0; | |
1650 | } | |
1651 | *base_out = NULL; | |
1652 | return 1; | |
1653 | } | |
1654 | ||
1655 | return 0; | |
1656 | } | |
1657 | ||
1658 | static void check_object(struct object_entry *entry) | |
1659 | { | |
1660 | unsigned long canonical_size; | |
1661 | ||
1662 | if (IN_PACK(entry)) { | |
1663 | struct packed_git *p = IN_PACK(entry); | |
1664 | struct pack_window *w_curs = NULL; | |
1665 | int have_base = 0; | |
1666 | struct object_id base_ref; | |
1667 | struct object_entry *base_entry; | |
1668 | unsigned long used, used_0; | |
1669 | unsigned long avail; | |
1670 | off_t ofs; | |
1671 | unsigned char *buf, c; | |
1672 | enum object_type type; | |
1673 | unsigned long in_pack_size; | |
1674 | ||
1675 | buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail); | |
1676 | ||
1677 | /* | |
1678 | * We want in_pack_type even if we do not reuse delta | |
1679 | * since non-delta representations could still be reused. | |
1680 | */ | |
1681 | used = unpack_object_header_buffer(buf, avail, | |
1682 | &type, | |
1683 | &in_pack_size); | |
1684 | if (used == 0) | |
1685 | goto give_up; | |
1686 | ||
1687 | if (type < 0) | |
1688 | BUG("invalid type %d", type); | |
1689 | entry->in_pack_type = type; | |
1690 | ||
1691 | /* | |
1692 | * Determine if this is a delta and if so whether we can | |
1693 | * reuse it or not. Otherwise let's find out as cheaply as | |
1694 | * possible what the actual type and size for this object is. | |
1695 | */ | |
1696 | switch (entry->in_pack_type) { | |
1697 | default: | |
1698 | /* Not a delta hence we've already got all we need. */ | |
1699 | oe_set_type(entry, entry->in_pack_type); | |
1700 | SET_SIZE(entry, in_pack_size); | |
1701 | entry->in_pack_header_size = used; | |
1702 | if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB) | |
1703 | goto give_up; | |
1704 | unuse_pack(&w_curs); | |
1705 | return; | |
1706 | case OBJ_REF_DELTA: | |
1707 | if (reuse_delta && !entry->preferred_base) { | |
1708 | oidread(&base_ref, | |
1709 | use_pack(p, &w_curs, | |
1710 | entry->in_pack_offset + used, | |
1711 | NULL)); | |
1712 | have_base = 1; | |
1713 | } | |
1714 | entry->in_pack_header_size = used + the_hash_algo->rawsz; | |
1715 | break; | |
1716 | case OBJ_OFS_DELTA: | |
1717 | buf = use_pack(p, &w_curs, | |
1718 | entry->in_pack_offset + used, NULL); | |
1719 | used_0 = 0; | |
1720 | c = buf[used_0++]; | |
1721 | ofs = c & 127; | |
1722 | while (c & 128) { | |
1723 | ofs += 1; | |
1724 | if (!ofs || MSB(ofs, 7)) { | |
1725 | error(_("delta base offset overflow in pack for %s"), | |
1726 | oid_to_hex(&entry->idx.oid)); | |
1727 | goto give_up; | |
1728 | } | |
1729 | c = buf[used_0++]; | |
1730 | ofs = (ofs << 7) + (c & 127); | |
1731 | } | |
1732 | ofs = entry->in_pack_offset - ofs; | |
1733 | if (ofs <= 0 || ofs >= entry->in_pack_offset) { | |
1734 | error(_("delta base offset out of bound for %s"), | |
1735 | oid_to_hex(&entry->idx.oid)); | |
1736 | goto give_up; | |
1737 | } | |
1738 | if (reuse_delta && !entry->preferred_base) { | |
1739 | struct revindex_entry *revidx; | |
1740 | revidx = find_pack_revindex(p, ofs); | |
1741 | if (!revidx) | |
1742 | goto give_up; | |
1743 | if (!nth_packed_object_id(&base_ref, p, revidx->nr)) | |
1744 | have_base = 1; | |
1745 | } | |
1746 | entry->in_pack_header_size = used + used_0; | |
1747 | break; | |
1748 | } | |
1749 | ||
1750 | if (have_base && | |
1751 | can_reuse_delta(&base_ref, entry, &base_entry)) { | |
1752 | oe_set_type(entry, entry->in_pack_type); | |
1753 | SET_SIZE(entry, in_pack_size); /* delta size */ | |
1754 | SET_DELTA_SIZE(entry, in_pack_size); | |
1755 | ||
1756 | if (base_entry) { | |
1757 | SET_DELTA(entry, base_entry); | |
1758 | entry->delta_sibling_idx = base_entry->delta_child_idx; | |
1759 | SET_DELTA_CHILD(base_entry, entry); | |
1760 | } else { | |
1761 | SET_DELTA_EXT(entry, &base_ref); | |
1762 | } | |
1763 | ||
1764 | unuse_pack(&w_curs); | |
1765 | return; | |
1766 | } | |
1767 | ||
1768 | if (oe_type(entry)) { | |
1769 | off_t delta_pos; | |
1770 | ||
1771 | /* | |
1772 | * This must be a delta and we already know what the | |
1773 | * final object type is. Let's extract the actual | |
1774 | * object size from the delta header. | |
1775 | */ | |
1776 | delta_pos = entry->in_pack_offset + entry->in_pack_header_size; | |
1777 | canonical_size = get_size_from_delta(p, &w_curs, delta_pos); | |
1778 | if (canonical_size == 0) | |
1779 | goto give_up; | |
1780 | SET_SIZE(entry, canonical_size); | |
1781 | unuse_pack(&w_curs); | |
1782 | return; | |
1783 | } | |
1784 | ||
1785 | /* | |
1786 | * No choice but to fall back to the recursive delta walk | |
1787 | * with oid_object_info() to find about the object type | |
1788 | * at this point... | |
1789 | */ | |
1790 | give_up: | |
1791 | unuse_pack(&w_curs); | |
1792 | } | |
1793 | ||
1794 | oe_set_type(entry, | |
1795 | oid_object_info(the_repository, &entry->idx.oid, &canonical_size)); | |
1796 | if (entry->type_valid) { | |
1797 | SET_SIZE(entry, canonical_size); | |
1798 | } else { | |
1799 | /* | |
1800 | * Bad object type is checked in prepare_pack(). This is | |
1801 | * to permit a missing preferred base object to be ignored | |
1802 | * as a preferred base. Doing so can result in a larger | |
1803 | * pack file, but the transfer will still take place. | |
1804 | */ | |
1805 | } | |
1806 | } | |
1807 | ||
1808 | static int pack_offset_sort(const void *_a, const void *_b) | |
1809 | { | |
1810 | const struct object_entry *a = *(struct object_entry **)_a; | |
1811 | const struct object_entry *b = *(struct object_entry **)_b; | |
1812 | const struct packed_git *a_in_pack = IN_PACK(a); | |
1813 | const struct packed_git *b_in_pack = IN_PACK(b); | |
1814 | ||
1815 | /* avoid filesystem trashing with loose objects */ | |
1816 | if (!a_in_pack && !b_in_pack) | |
1817 | return oidcmp(&a->idx.oid, &b->idx.oid); | |
1818 | ||
1819 | if (a_in_pack < b_in_pack) | |
1820 | return -1; | |
1821 | if (a_in_pack > b_in_pack) | |
1822 | return 1; | |
1823 | return a->in_pack_offset < b->in_pack_offset ? -1 : | |
1824 | (a->in_pack_offset > b->in_pack_offset); | |
1825 | } | |
1826 | ||
1827 | /* | |
1828 | * Drop an on-disk delta we were planning to reuse. Naively, this would | |
1829 | * just involve blanking out the "delta" field, but we have to deal | |
1830 | * with some extra book-keeping: | |
1831 | * | |
1832 | * 1. Removing ourselves from the delta_sibling linked list. | |
1833 | * | |
1834 | * 2. Updating our size/type to the non-delta representation. These were | |
1835 | * either not recorded initially (size) or overwritten with the delta type | |
1836 | * (type) when check_object() decided to reuse the delta. | |
1837 | * | |
1838 | * 3. Resetting our delta depth, as we are now a base object. | |
1839 | */ | |
1840 | static void drop_reused_delta(struct object_entry *entry) | |
1841 | { | |
1842 | unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx; | |
1843 | struct object_info oi = OBJECT_INFO_INIT; | |
1844 | enum object_type type; | |
1845 | unsigned long size; | |
1846 | ||
1847 | while (*idx) { | |
1848 | struct object_entry *oe = &to_pack.objects[*idx - 1]; | |
1849 | ||
1850 | if (oe == entry) | |
1851 | *idx = oe->delta_sibling_idx; | |
1852 | else | |
1853 | idx = &oe->delta_sibling_idx; | |
1854 | } | |
1855 | SET_DELTA(entry, NULL); | |
1856 | entry->depth = 0; | |
1857 | ||
1858 | oi.sizep = &size; | |
1859 | oi.typep = &type; | |
1860 | if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) { | |
1861 | /* | |
1862 | * We failed to get the info from this pack for some reason; | |
1863 | * fall back to oid_object_info, which may find another copy. | |
1864 | * And if that fails, the error will be recorded in oe_type(entry) | |
1865 | * and dealt with in prepare_pack(). | |
1866 | */ | |
1867 | oe_set_type(entry, | |
1868 | oid_object_info(the_repository, &entry->idx.oid, &size)); | |
1869 | } else { | |
1870 | oe_set_type(entry, type); | |
1871 | } | |
1872 | SET_SIZE(entry, size); | |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Follow the chain of deltas from this entry onward, throwing away any links | |
1877 | * that cause us to hit a cycle (as determined by the DFS state flags in | |
1878 | * the entries). | |
1879 | * | |
1880 | * We also detect too-long reused chains that would violate our --depth | |
1881 | * limit. | |
1882 | */ | |
1883 | static void break_delta_chains(struct object_entry *entry) | |
1884 | { | |
1885 | /* | |
1886 | * The actual depth of each object we will write is stored as an int, | |
1887 | * as it cannot exceed our int "depth" limit. But before we break | |
1888 | * changes based no that limit, we may potentially go as deep as the | |
1889 | * number of objects, which is elsewhere bounded to a uint32_t. | |
1890 | */ | |
1891 | uint32_t total_depth; | |
1892 | struct object_entry *cur, *next; | |
1893 | ||
1894 | for (cur = entry, total_depth = 0; | |
1895 | cur; | |
1896 | cur = DELTA(cur), total_depth++) { | |
1897 | if (cur->dfs_state == DFS_DONE) { | |
1898 | /* | |
1899 | * We've already seen this object and know it isn't | |
1900 | * part of a cycle. We do need to append its depth | |
1901 | * to our count. | |
1902 | */ | |
1903 | total_depth += cur->depth; | |
1904 | break; | |
1905 | } | |
1906 | ||
1907 | /* | |
1908 | * We break cycles before looping, so an ACTIVE state (or any | |
1909 | * other cruft which made its way into the state variable) | |
1910 | * is a bug. | |
1911 | */ | |
1912 | if (cur->dfs_state != DFS_NONE) | |
1913 | BUG("confusing delta dfs state in first pass: %d", | |
1914 | cur->dfs_state); | |
1915 | ||
1916 | /* | |
1917 | * Now we know this is the first time we've seen the object. If | |
1918 | * it's not a delta, we're done traversing, but we'll mark it | |
1919 | * done to save time on future traversals. | |
1920 | */ | |
1921 | if (!DELTA(cur)) { | |
1922 | cur->dfs_state = DFS_DONE; | |
1923 | break; | |
1924 | } | |
1925 | ||
1926 | /* | |
1927 | * Mark ourselves as active and see if the next step causes | |
1928 | * us to cycle to another active object. It's important to do | |
1929 | * this _before_ we loop, because it impacts where we make the | |
1930 | * cut, and thus how our total_depth counter works. | |
1931 | * E.g., We may see a partial loop like: | |
1932 | * | |
1933 | * A -> B -> C -> D -> B | |
1934 | * | |
1935 | * Cutting B->C breaks the cycle. But now the depth of A is | |
1936 | * only 1, and our total_depth counter is at 3. The size of the | |
1937 | * error is always one less than the size of the cycle we | |
1938 | * broke. Commits C and D were "lost" from A's chain. | |
1939 | * | |
1940 | * If we instead cut D->B, then the depth of A is correct at 3. | |
1941 | * We keep all commits in the chain that we examined. | |
1942 | */ | |
1943 | cur->dfs_state = DFS_ACTIVE; | |
1944 | if (DELTA(cur)->dfs_state == DFS_ACTIVE) { | |
1945 | drop_reused_delta(cur); | |
1946 | cur->dfs_state = DFS_DONE; | |
1947 | break; | |
1948 | } | |
1949 | } | |
1950 | ||
1951 | /* | |
1952 | * And now that we've gone all the way to the bottom of the chain, we | |
1953 | * need to clear the active flags and set the depth fields as | |
1954 | * appropriate. Unlike the loop above, which can quit when it drops a | |
1955 | * delta, we need to keep going to look for more depth cuts. So we need | |
1956 | * an extra "next" pointer to keep going after we reset cur->delta. | |
1957 | */ | |
1958 | for (cur = entry; cur; cur = next) { | |
1959 | next = DELTA(cur); | |
1960 | ||
1961 | /* | |
1962 | * We should have a chain of zero or more ACTIVE states down to | |
1963 | * a final DONE. We can quit after the DONE, because either it | |
1964 | * has no bases, or we've already handled them in a previous | |
1965 | * call. | |
1966 | */ | |
1967 | if (cur->dfs_state == DFS_DONE) | |
1968 | break; | |
1969 | else if (cur->dfs_state != DFS_ACTIVE) | |
1970 | BUG("confusing delta dfs state in second pass: %d", | |
1971 | cur->dfs_state); | |
1972 | ||
1973 | /* | |
1974 | * If the total_depth is more than depth, then we need to snip | |
1975 | * the chain into two or more smaller chains that don't exceed | |
1976 | * the maximum depth. Most of the resulting chains will contain | |
1977 | * (depth + 1) entries (i.e., depth deltas plus one base), and | |
1978 | * the last chain (i.e., the one containing entry) will contain | |
1979 | * whatever entries are left over, namely | |
1980 | * (total_depth % (depth + 1)) of them. | |
1981 | * | |
1982 | * Since we are iterating towards decreasing depth, we need to | |
1983 | * decrement total_depth as we go, and we need to write to the | |
1984 | * entry what its final depth will be after all of the | |
1985 | * snipping. Since we're snipping into chains of length (depth | |
1986 | * + 1) entries, the final depth of an entry will be its | |
1987 | * original depth modulo (depth + 1). Any time we encounter an | |
1988 | * entry whose final depth is supposed to be zero, we snip it | |
1989 | * from its delta base, thereby making it so. | |
1990 | */ | |
1991 | cur->depth = (total_depth--) % (depth + 1); | |
1992 | if (!cur->depth) | |
1993 | drop_reused_delta(cur); | |
1994 | ||
1995 | cur->dfs_state = DFS_DONE; | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | static void get_object_details(void) | |
2000 | { | |
2001 | uint32_t i; | |
2002 | struct object_entry **sorted_by_offset; | |
2003 | ||
2004 | if (progress) | |
2005 | progress_state = start_progress(_("Counting objects"), | |
2006 | to_pack.nr_objects); | |
2007 | ||
2008 | sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *)); | |
2009 | for (i = 0; i < to_pack.nr_objects; i++) | |
2010 | sorted_by_offset[i] = to_pack.objects + i; | |
2011 | QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort); | |
2012 | ||
2013 | for (i = 0; i < to_pack.nr_objects; i++) { | |
2014 | struct object_entry *entry = sorted_by_offset[i]; | |
2015 | check_object(entry); | |
2016 | if (entry->type_valid && | |
2017 | oe_size_greater_than(&to_pack, entry, big_file_threshold)) | |
2018 | entry->no_try_delta = 1; | |
2019 | display_progress(progress_state, i + 1); | |
2020 | } | |
2021 | stop_progress(&progress_state); | |
2022 | ||
2023 | /* | |
2024 | * This must happen in a second pass, since we rely on the delta | |
2025 | * information for the whole list being completed. | |
2026 | */ | |
2027 | for (i = 0; i < to_pack.nr_objects; i++) | |
2028 | break_delta_chains(&to_pack.objects[i]); | |
2029 | ||
2030 | free(sorted_by_offset); | |
2031 | } | |
2032 | ||
2033 | /* | |
2034 | * We search for deltas in a list sorted by type, by filename hash, and then | |
2035 | * by size, so that we see progressively smaller and smaller files. | |
2036 | * That's because we prefer deltas to be from the bigger file | |
2037 | * to the smaller -- deletes are potentially cheaper, but perhaps | |
2038 | * more importantly, the bigger file is likely the more recent | |
2039 | * one. The deepest deltas are therefore the oldest objects which are | |
2040 | * less susceptible to be accessed often. | |
2041 | */ | |
2042 | static int type_size_sort(const void *_a, const void *_b) | |
2043 | { | |
2044 | const struct object_entry *a = *(struct object_entry **)_a; | |
2045 | const struct object_entry *b = *(struct object_entry **)_b; | |
2046 | const enum object_type a_type = oe_type(a); | |
2047 | const enum object_type b_type = oe_type(b); | |
2048 | const unsigned long a_size = SIZE(a); | |
2049 | const unsigned long b_size = SIZE(b); | |
2050 | ||
2051 | if (a_type > b_type) | |
2052 | return -1; | |
2053 | if (a_type < b_type) | |
2054 | return 1; | |
2055 | if (a->hash > b->hash) | |
2056 | return -1; | |
2057 | if (a->hash < b->hash) | |
2058 | return 1; | |
2059 | if (a->preferred_base > b->preferred_base) | |
2060 | return -1; | |
2061 | if (a->preferred_base < b->preferred_base) | |
2062 | return 1; | |
2063 | if (use_delta_islands) { | |
2064 | const int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid); | |
2065 | if (island_cmp) | |
2066 | return island_cmp; | |
2067 | } | |
2068 | if (a_size > b_size) | |
2069 | return -1; | |
2070 | if (a_size < b_size) | |
2071 | return 1; | |
2072 | return a < b ? -1 : (a > b); /* newest first */ | |
2073 | } | |
2074 | ||
2075 | struct unpacked { | |
2076 | struct object_entry *entry; | |
2077 | void *data; | |
2078 | struct delta_index *index; | |
2079 | unsigned depth; | |
2080 | }; | |
2081 | ||
2082 | static int delta_cacheable(unsigned long src_size, unsigned long trg_size, | |
2083 | unsigned long delta_size) | |
2084 | { | |
2085 | if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size) | |
2086 | return 0; | |
2087 | ||
2088 | if (delta_size < cache_max_small_delta_size) | |
2089 | return 1; | |
2090 | ||
2091 | /* cache delta, if objects are large enough compared to delta size */ | |
2092 | if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10)) | |
2093 | return 1; | |
2094 | ||
2095 | return 0; | |
2096 | } | |
2097 | ||
2098 | /* Protect delta_cache_size */ | |
2099 | static pthread_mutex_t cache_mutex; | |
2100 | #define cache_lock() pthread_mutex_lock(&cache_mutex) | |
2101 | #define cache_unlock() pthread_mutex_unlock(&cache_mutex) | |
2102 | ||
2103 | /* | |
2104 | * Protect object list partitioning (e.g. struct thread_param) and | |
2105 | * progress_state | |
2106 | */ | |
2107 | static pthread_mutex_t progress_mutex; | |
2108 | #define progress_lock() pthread_mutex_lock(&progress_mutex) | |
2109 | #define progress_unlock() pthread_mutex_unlock(&progress_mutex) | |
2110 | ||
2111 | /* | |
2112 | * Access to struct object_entry is unprotected since each thread owns | |
2113 | * a portion of the main object list. Just don't access object entries | |
2114 | * ahead in the list because they can be stolen and would need | |
2115 | * progress_mutex for protection. | |
2116 | */ | |
2117 | ||
2118 | /* | |
2119 | * Return the size of the object without doing any delta | |
2120 | * reconstruction (so non-deltas are true object sizes, but deltas | |
2121 | * return the size of the delta data). | |
2122 | */ | |
2123 | unsigned long oe_get_size_slow(struct packing_data *pack, | |
2124 | const struct object_entry *e) | |
2125 | { | |
2126 | struct packed_git *p; | |
2127 | struct pack_window *w_curs; | |
2128 | unsigned char *buf; | |
2129 | enum object_type type; | |
2130 | unsigned long used, avail, size; | |
2131 | ||
2132 | if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) { | |
2133 | packing_data_lock(&to_pack); | |
2134 | if (oid_object_info(the_repository, &e->idx.oid, &size) < 0) | |
2135 | die(_("unable to get size of %s"), | |
2136 | oid_to_hex(&e->idx.oid)); | |
2137 | packing_data_unlock(&to_pack); | |
2138 | return size; | |
2139 | } | |
2140 | ||
2141 | p = oe_in_pack(pack, e); | |
2142 | if (!p) | |
2143 | BUG("when e->type is a delta, it must belong to a pack"); | |
2144 | ||
2145 | packing_data_lock(&to_pack); | |
2146 | w_curs = NULL; | |
2147 | buf = use_pack(p, &w_curs, e->in_pack_offset, &avail); | |
2148 | used = unpack_object_header_buffer(buf, avail, &type, &size); | |
2149 | if (used == 0) | |
2150 | die(_("unable to parse object header of %s"), | |
2151 | oid_to_hex(&e->idx.oid)); | |
2152 | ||
2153 | unuse_pack(&w_curs); | |
2154 | packing_data_unlock(&to_pack); | |
2155 | return size; | |
2156 | } | |
2157 | ||
2158 | static int try_delta(struct unpacked *trg, struct unpacked *src, | |
2159 | unsigned max_depth, unsigned long *mem_usage) | |
2160 | { | |
2161 | struct object_entry *trg_entry = trg->entry; | |
2162 | struct object_entry *src_entry = src->entry; | |
2163 | unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz; | |
2164 | unsigned ref_depth; | |
2165 | enum object_type type; | |
2166 | void *delta_buf; | |
2167 | ||
2168 | /* Don't bother doing diffs between different types */ | |
2169 | if (oe_type(trg_entry) != oe_type(src_entry)) | |
2170 | return -1; | |
2171 | ||
2172 | /* | |
2173 | * We do not bother to try a delta that we discarded on an | |
2174 | * earlier try, but only when reusing delta data. Note that | |
2175 | * src_entry that is marked as the preferred_base should always | |
2176 | * be considered, as even if we produce a suboptimal delta against | |
2177 | * it, we will still save the transfer cost, as we already know | |
2178 | * the other side has it and we won't send src_entry at all. | |
2179 | */ | |
2180 | if (reuse_delta && IN_PACK(trg_entry) && | |
2181 | IN_PACK(trg_entry) == IN_PACK(src_entry) && | |
2182 | !src_entry->preferred_base && | |
2183 | trg_entry->in_pack_type != OBJ_REF_DELTA && | |
2184 | trg_entry->in_pack_type != OBJ_OFS_DELTA) | |
2185 | return 0; | |
2186 | ||
2187 | /* Let's not bust the allowed depth. */ | |
2188 | if (src->depth >= max_depth) | |
2189 | return 0; | |
2190 | ||
2191 | /* Now some size filtering heuristics. */ | |
2192 | trg_size = SIZE(trg_entry); | |
2193 | if (!DELTA(trg_entry)) { | |
2194 | max_size = trg_size/2 - the_hash_algo->rawsz; | |
2195 | ref_depth = 1; | |
2196 | } else { | |
2197 | max_size = DELTA_SIZE(trg_entry); | |
2198 | ref_depth = trg->depth; | |
2199 | } | |
2200 | max_size = (uint64_t)max_size * (max_depth - src->depth) / | |
2201 | (max_depth - ref_depth + 1); | |
2202 | if (max_size == 0) | |
2203 | return 0; | |
2204 | src_size = SIZE(src_entry); | |
2205 | sizediff = src_size < trg_size ? trg_size - src_size : 0; | |
2206 | if (sizediff >= max_size) | |
2207 | return 0; | |
2208 | if (trg_size < src_size / 32) | |
2209 | return 0; | |
2210 | ||
2211 | if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid)) | |
2212 | return 0; | |
2213 | ||
2214 | /* Load data if not already done */ | |
2215 | if (!trg->data) { | |
2216 | packing_data_lock(&to_pack); | |
2217 | trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz); | |
2218 | packing_data_unlock(&to_pack); | |
2219 | if (!trg->data) | |
2220 | die(_("object %s cannot be read"), | |
2221 | oid_to_hex(&trg_entry->idx.oid)); | |
2222 | if (sz != trg_size) | |
2223 | die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"), | |
2224 | oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz, | |
2225 | (uintmax_t)trg_size); | |
2226 | *mem_usage += sz; | |
2227 | } | |
2228 | if (!src->data) { | |
2229 | packing_data_lock(&to_pack); | |
2230 | src->data = read_object_file(&src_entry->idx.oid, &type, &sz); | |
2231 | packing_data_unlock(&to_pack); | |
2232 | if (!src->data) { | |
2233 | if (src_entry->preferred_base) { | |
2234 | static int warned = 0; | |
2235 | if (!warned++) | |
2236 | warning(_("object %s cannot be read"), | |
2237 | oid_to_hex(&src_entry->idx.oid)); | |
2238 | /* | |
2239 | * Those objects are not included in the | |
2240 | * resulting pack. Be resilient and ignore | |
2241 | * them if they can't be read, in case the | |
2242 | * pack could be created nevertheless. | |
2243 | */ | |
2244 | return 0; | |
2245 | } | |
2246 | die(_("object %s cannot be read"), | |
2247 | oid_to_hex(&src_entry->idx.oid)); | |
2248 | } | |
2249 | if (sz != src_size) | |
2250 | die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"), | |
2251 | oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz, | |
2252 | (uintmax_t)src_size); | |
2253 | *mem_usage += sz; | |
2254 | } | |
2255 | if (!src->index) { | |
2256 | src->index = create_delta_index(src->data, src_size); | |
2257 | if (!src->index) { | |
2258 | static int warned = 0; | |
2259 | if (!warned++) | |
2260 | warning(_("suboptimal pack - out of memory")); | |
2261 | return 0; | |
2262 | } | |
2263 | *mem_usage += sizeof_delta_index(src->index); | |
2264 | } | |
2265 | ||
2266 | delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size); | |
2267 | if (!delta_buf) | |
2268 | return 0; | |
2269 | ||
2270 | if (DELTA(trg_entry)) { | |
2271 | /* Prefer only shallower same-sized deltas. */ | |
2272 | if (delta_size == DELTA_SIZE(trg_entry) && | |
2273 | src->depth + 1 >= trg->depth) { | |
2274 | free(delta_buf); | |
2275 | return 0; | |
2276 | } | |
2277 | } | |
2278 | ||
2279 | /* | |
2280 | * Handle memory allocation outside of the cache | |
2281 | * accounting lock. Compiler will optimize the strangeness | |
2282 | * away when NO_PTHREADS is defined. | |
2283 | */ | |
2284 | free(trg_entry->delta_data); | |
2285 | cache_lock(); | |
2286 | if (trg_entry->delta_data) { | |
2287 | delta_cache_size -= DELTA_SIZE(trg_entry); | |
2288 | trg_entry->delta_data = NULL; | |
2289 | } | |
2290 | if (delta_cacheable(src_size, trg_size, delta_size)) { | |
2291 | delta_cache_size += delta_size; | |
2292 | cache_unlock(); | |
2293 | trg_entry->delta_data = xrealloc(delta_buf, delta_size); | |
2294 | } else { | |
2295 | cache_unlock(); | |
2296 | free(delta_buf); | |
2297 | } | |
2298 | ||
2299 | SET_DELTA(trg_entry, src_entry); | |
2300 | SET_DELTA_SIZE(trg_entry, delta_size); | |
2301 | trg->depth = src->depth + 1; | |
2302 | ||
2303 | return 1; | |
2304 | } | |
2305 | ||
2306 | static unsigned int check_delta_limit(struct object_entry *me, unsigned int n) | |
2307 | { | |
2308 | struct object_entry *child = DELTA_CHILD(me); | |
2309 | unsigned int m = n; | |
2310 | while (child) { | |
2311 | const unsigned int c = check_delta_limit(child, n + 1); | |
2312 | if (m < c) | |
2313 | m = c; | |
2314 | child = DELTA_SIBLING(child); | |
2315 | } | |
2316 | return m; | |
2317 | } | |
2318 | ||
2319 | static unsigned long free_unpacked(struct unpacked *n) | |
2320 | { | |
2321 | unsigned long freed_mem = sizeof_delta_index(n->index); | |
2322 | free_delta_index(n->index); | |
2323 | n->index = NULL; | |
2324 | if (n->data) { | |
2325 | freed_mem += SIZE(n->entry); | |
2326 | FREE_AND_NULL(n->data); | |
2327 | } | |
2328 | n->entry = NULL; | |
2329 | n->depth = 0; | |
2330 | return freed_mem; | |
2331 | } | |
2332 | ||
2333 | static void find_deltas(struct object_entry **list, unsigned *list_size, | |
2334 | int window, int depth, unsigned *processed) | |
2335 | { | |
2336 | uint32_t i, idx = 0, count = 0; | |
2337 | struct unpacked *array; | |
2338 | unsigned long mem_usage = 0; | |
2339 | ||
2340 | array = xcalloc(window, sizeof(struct unpacked)); | |
2341 | ||
2342 | for (;;) { | |
2343 | struct object_entry *entry; | |
2344 | struct unpacked *n = array + idx; | |
2345 | int j, max_depth, best_base = -1; | |
2346 | ||
2347 | progress_lock(); | |
2348 | if (!*list_size) { | |
2349 | progress_unlock(); | |
2350 | break; | |
2351 | } | |
2352 | entry = *list++; | |
2353 | (*list_size)--; | |
2354 | if (!entry->preferred_base) { | |
2355 | (*processed)++; | |
2356 | display_progress(progress_state, *processed); | |
2357 | } | |
2358 | progress_unlock(); | |
2359 | ||
2360 | mem_usage -= free_unpacked(n); | |
2361 | n->entry = entry; | |
2362 | ||
2363 | while (window_memory_limit && | |
2364 | mem_usage > window_memory_limit && | |
2365 | count > 1) { | |
2366 | const uint32_t tail = (idx + window - count) % window; | |
2367 | mem_usage -= free_unpacked(array + tail); | |
2368 | count--; | |
2369 | } | |
2370 | ||
2371 | /* We do not compute delta to *create* objects we are not | |
2372 | * going to pack. | |
2373 | */ | |
2374 | if (entry->preferred_base) | |
2375 | goto next; | |
2376 | ||
2377 | /* | |
2378 | * If the current object is at pack edge, take the depth the | |
2379 | * objects that depend on the current object into account | |
2380 | * otherwise they would become too deep. | |
2381 | */ | |
2382 | max_depth = depth; | |
2383 | if (DELTA_CHILD(entry)) { | |
2384 | max_depth -= check_delta_limit(entry, 0); | |
2385 | if (max_depth <= 0) | |
2386 | goto next; | |
2387 | } | |
2388 | ||
2389 | j = window; | |
2390 | while (--j > 0) { | |
2391 | int ret; | |
2392 | uint32_t other_idx = idx + j; | |
2393 | struct unpacked *m; | |
2394 | if (other_idx >= window) | |
2395 | other_idx -= window; | |
2396 | m = array + other_idx; | |
2397 | if (!m->entry) | |
2398 | break; | |
2399 | ret = try_delta(n, m, max_depth, &mem_usage); | |
2400 | if (ret < 0) | |
2401 | break; | |
2402 | else if (ret > 0) | |
2403 | best_base = other_idx; | |
2404 | } | |
2405 | ||
2406 | /* | |
2407 | * If we decided to cache the delta data, then it is best | |
2408 | * to compress it right away. First because we have to do | |
2409 | * it anyway, and doing it here while we're threaded will | |
2410 | * save a lot of time in the non threaded write phase, | |
2411 | * as well as allow for caching more deltas within | |
2412 | * the same cache size limit. | |
2413 | * ... | |
2414 | * But only if not writing to stdout, since in that case | |
2415 | * the network is most likely throttling writes anyway, | |
2416 | * and therefore it is best to go to the write phase ASAP | |
2417 | * instead, as we can afford spending more time compressing | |
2418 | * between writes at that moment. | |
2419 | */ | |
2420 | if (entry->delta_data && !pack_to_stdout) { | |
2421 | unsigned long size; | |
2422 | ||
2423 | size = do_compress(&entry->delta_data, DELTA_SIZE(entry)); | |
2424 | if (size < (1U << OE_Z_DELTA_BITS)) { | |
2425 | entry->z_delta_size = size; | |
2426 | cache_lock(); | |
2427 | delta_cache_size -= DELTA_SIZE(entry); | |
2428 | delta_cache_size += entry->z_delta_size; | |
2429 | cache_unlock(); | |
2430 | } else { | |
2431 | FREE_AND_NULL(entry->delta_data); | |
2432 | entry->z_delta_size = 0; | |
2433 | } | |
2434 | } | |
2435 | ||
2436 | /* if we made n a delta, and if n is already at max | |
2437 | * depth, leaving it in the window is pointless. we | |
2438 | * should evict it first. | |
2439 | */ | |
2440 | if (DELTA(entry) && max_depth <= n->depth) | |
2441 | continue; | |
2442 | ||
2443 | /* | |
2444 | * Move the best delta base up in the window, after the | |
2445 | * currently deltified object, to keep it longer. It will | |
2446 | * be the first base object to be attempted next. | |
2447 | */ | |
2448 | if (DELTA(entry)) { | |
2449 | struct unpacked swap = array[best_base]; | |
2450 | int dist = (window + idx - best_base) % window; | |
2451 | int dst = best_base; | |
2452 | while (dist--) { | |
2453 | int src = (dst + 1) % window; | |
2454 | array[dst] = array[src]; | |
2455 | dst = src; | |
2456 | } | |
2457 | array[dst] = swap; | |
2458 | } | |
2459 | ||
2460 | next: | |
2461 | idx++; | |
2462 | if (count + 1 < window) | |
2463 | count++; | |
2464 | if (idx >= window) | |
2465 | idx = 0; | |
2466 | } | |
2467 | ||
2468 | for (i = 0; i < window; ++i) { | |
2469 | free_delta_index(array[i].index); | |
2470 | free(array[i].data); | |
2471 | } | |
2472 | free(array); | |
2473 | } | |
2474 | ||
2475 | /* | |
2476 | * The main object list is split into smaller lists, each is handed to | |
2477 | * one worker. | |
2478 | * | |
2479 | * The main thread waits on the condition that (at least) one of the workers | |
2480 | * has stopped working (which is indicated in the .working member of | |
2481 | * struct thread_params). | |
2482 | * | |
2483 | * When a work thread has completed its work, it sets .working to 0 and | |
2484 | * signals the main thread and waits on the condition that .data_ready | |
2485 | * becomes 1. | |
2486 | * | |
2487 | * The main thread steals half of the work from the worker that has | |
2488 | * most work left to hand it to the idle worker. | |
2489 | */ | |
2490 | ||
2491 | struct thread_params { | |
2492 | pthread_t thread; | |
2493 | struct object_entry **list; | |
2494 | unsigned list_size; | |
2495 | unsigned remaining; | |
2496 | int window; | |
2497 | int depth; | |
2498 | int working; | |
2499 | int data_ready; | |
2500 | pthread_mutex_t mutex; | |
2501 | pthread_cond_t cond; | |
2502 | unsigned *processed; | |
2503 | }; | |
2504 | ||
2505 | static pthread_cond_t progress_cond; | |
2506 | ||
2507 | /* | |
2508 | * Mutex and conditional variable can't be statically-initialized on Windows. | |
2509 | */ | |
2510 | static void init_threaded_search(void) | |
2511 | { | |
2512 | pthread_mutex_init(&cache_mutex, NULL); | |
2513 | pthread_mutex_init(&progress_mutex, NULL); | |
2514 | pthread_cond_init(&progress_cond, NULL); | |
2515 | } | |
2516 | ||
2517 | static void cleanup_threaded_search(void) | |
2518 | { | |
2519 | pthread_cond_destroy(&progress_cond); | |
2520 | pthread_mutex_destroy(&cache_mutex); | |
2521 | pthread_mutex_destroy(&progress_mutex); | |
2522 | } | |
2523 | ||
2524 | static void *threaded_find_deltas(void *arg) | |
2525 | { | |
2526 | struct thread_params *me = arg; | |
2527 | ||
2528 | progress_lock(); | |
2529 | while (me->remaining) { | |
2530 | progress_unlock(); | |
2531 | ||
2532 | find_deltas(me->list, &me->remaining, | |
2533 | me->window, me->depth, me->processed); | |
2534 | ||
2535 | progress_lock(); | |
2536 | me->working = 0; | |
2537 | pthread_cond_signal(&progress_cond); | |
2538 | progress_unlock(); | |
2539 | ||
2540 | /* | |
2541 | * We must not set ->data_ready before we wait on the | |
2542 | * condition because the main thread may have set it to 1 | |
2543 | * before we get here. In order to be sure that new | |
2544 | * work is available if we see 1 in ->data_ready, it | |
2545 | * was initialized to 0 before this thread was spawned | |
2546 | * and we reset it to 0 right away. | |
2547 | */ | |
2548 | pthread_mutex_lock(&me->mutex); | |
2549 | while (!me->data_ready) | |
2550 | pthread_cond_wait(&me->cond, &me->mutex); | |
2551 | me->data_ready = 0; | |
2552 | pthread_mutex_unlock(&me->mutex); | |
2553 | ||
2554 | progress_lock(); | |
2555 | } | |
2556 | progress_unlock(); | |
2557 | /* leave ->working 1 so that this doesn't get more work assigned */ | |
2558 | return NULL; | |
2559 | } | |
2560 | ||
2561 | static void ll_find_deltas(struct object_entry **list, unsigned list_size, | |
2562 | int window, int depth, unsigned *processed) | |
2563 | { | |
2564 | struct thread_params *p; | |
2565 | int i, ret, active_threads = 0; | |
2566 | ||
2567 | init_threaded_search(); | |
2568 | ||
2569 | if (delta_search_threads <= 1) { | |
2570 | find_deltas(list, &list_size, window, depth, processed); | |
2571 | cleanup_threaded_search(); | |
2572 | return; | |
2573 | } | |
2574 | if (progress > pack_to_stdout) | |
2575 | fprintf_ln(stderr, _("Delta compression using up to %d threads"), | |
2576 | delta_search_threads); | |
2577 | p = xcalloc(delta_search_threads, sizeof(*p)); | |
2578 | ||
2579 | /* Partition the work amongst work threads. */ | |
2580 | for (i = 0; i < delta_search_threads; i++) { | |
2581 | unsigned sub_size = list_size / (delta_search_threads - i); | |
2582 | ||
2583 | /* don't use too small segments or no deltas will be found */ | |
2584 | if (sub_size < 2*window && i+1 < delta_search_threads) | |
2585 | sub_size = 0; | |
2586 | ||
2587 | p[i].window = window; | |
2588 | p[i].depth = depth; | |
2589 | p[i].processed = processed; | |
2590 | p[i].working = 1; | |
2591 | p[i].data_ready = 0; | |
2592 | ||
2593 | /* try to split chunks on "path" boundaries */ | |
2594 | while (sub_size && sub_size < list_size && | |
2595 | list[sub_size]->hash && | |
2596 | list[sub_size]->hash == list[sub_size-1]->hash) | |
2597 | sub_size++; | |
2598 | ||
2599 | p[i].list = list; | |
2600 | p[i].list_size = sub_size; | |
2601 | p[i].remaining = sub_size; | |
2602 | ||
2603 | list += sub_size; | |
2604 | list_size -= sub_size; | |
2605 | } | |
2606 | ||
2607 | /* Start work threads. */ | |
2608 | for (i = 0; i < delta_search_threads; i++) { | |
2609 | if (!p[i].list_size) | |
2610 | continue; | |
2611 | pthread_mutex_init(&p[i].mutex, NULL); | |
2612 | pthread_cond_init(&p[i].cond, NULL); | |
2613 | ret = pthread_create(&p[i].thread, NULL, | |
2614 | threaded_find_deltas, &p[i]); | |
2615 | if (ret) | |
2616 | die(_("unable to create thread: %s"), strerror(ret)); | |
2617 | active_threads++; | |
2618 | } | |
2619 | ||
2620 | /* | |
2621 | * Now let's wait for work completion. Each time a thread is done | |
2622 | * with its work, we steal half of the remaining work from the | |
2623 | * thread with the largest number of unprocessed objects and give | |
2624 | * it to that newly idle thread. This ensure good load balancing | |
2625 | * until the remaining object list segments are simply too short | |
2626 | * to be worth splitting anymore. | |
2627 | */ | |
2628 | while (active_threads) { | |
2629 | struct thread_params *target = NULL; | |
2630 | struct thread_params *victim = NULL; | |
2631 | unsigned sub_size = 0; | |
2632 | ||
2633 | progress_lock(); | |
2634 | for (;;) { | |
2635 | for (i = 0; !target && i < delta_search_threads; i++) | |
2636 | if (!p[i].working) | |
2637 | target = &p[i]; | |
2638 | if (target) | |
2639 | break; | |
2640 | pthread_cond_wait(&progress_cond, &progress_mutex); | |
2641 | } | |
2642 | ||
2643 | for (i = 0; i < delta_search_threads; i++) | |
2644 | if (p[i].remaining > 2*window && | |
2645 | (!victim || victim->remaining < p[i].remaining)) | |
2646 | victim = &p[i]; | |
2647 | if (victim) { | |
2648 | sub_size = victim->remaining / 2; | |
2649 | list = victim->list + victim->list_size - sub_size; | |
2650 | while (sub_size && list[0]->hash && | |
2651 | list[0]->hash == list[-1]->hash) { | |
2652 | list++; | |
2653 | sub_size--; | |
2654 | } | |
2655 | if (!sub_size) { | |
2656 | /* | |
2657 | * It is possible for some "paths" to have | |
2658 | * so many objects that no hash boundary | |
2659 | * might be found. Let's just steal the | |
2660 | * exact half in that case. | |
2661 | */ | |
2662 | sub_size = victim->remaining / 2; | |
2663 | list -= sub_size; | |
2664 | } | |
2665 | target->list = list; | |
2666 | victim->list_size -= sub_size; | |
2667 | victim->remaining -= sub_size; | |
2668 | } | |
2669 | target->list_size = sub_size; | |
2670 | target->remaining = sub_size; | |
2671 | target->working = 1; | |
2672 | progress_unlock(); | |
2673 | ||
2674 | pthread_mutex_lock(&target->mutex); | |
2675 | target->data_ready = 1; | |
2676 | pthread_cond_signal(&target->cond); | |
2677 | pthread_mutex_unlock(&target->mutex); | |
2678 | ||
2679 | if (!sub_size) { | |
2680 | pthread_join(target->thread, NULL); | |
2681 | pthread_cond_destroy(&target->cond); | |
2682 | pthread_mutex_destroy(&target->mutex); | |
2683 | active_threads--; | |
2684 | } | |
2685 | } | |
2686 | cleanup_threaded_search(); | |
2687 | free(p); | |
2688 | } | |
2689 | ||
2690 | static int obj_is_packed(const struct object_id *oid) | |
2691 | { | |
2692 | return packlist_find(&to_pack, oid) || | |
2693 | (reuse_packfile_bitmap && | |
2694 | bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid)); | |
2695 | } | |
2696 | ||
2697 | static void add_tag_chain(const struct object_id *oid) | |
2698 | { | |
2699 | struct tag *tag; | |
2700 | ||
2701 | /* | |
2702 | * We catch duplicates already in add_object_entry(), but we'd | |
2703 | * prefer to do this extra check to avoid having to parse the | |
2704 | * tag at all if we already know that it's being packed (e.g., if | |
2705 | * it was included via bitmaps, we would not have parsed it | |
2706 | * previously). | |
2707 | */ | |
2708 | if (obj_is_packed(oid)) | |
2709 | return; | |
2710 | ||
2711 | tag = lookup_tag(the_repository, oid); | |
2712 | while (1) { | |
2713 | if (!tag || parse_tag(tag) || !tag->tagged) | |
2714 | die(_("unable to pack objects reachable from tag %s"), | |
2715 | oid_to_hex(oid)); | |
2716 | ||
2717 | add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0); | |
2718 | ||
2719 | if (tag->tagged->type != OBJ_TAG) | |
2720 | return; | |
2721 | ||
2722 | tag = (struct tag *)tag->tagged; | |
2723 | } | |
2724 | } | |
2725 | ||
2726 | static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data) | |
2727 | { | |
2728 | struct object_id peeled; | |
2729 | ||
2730 | if (starts_with(path, "refs/tags/") && /* is a tag? */ | |
2731 | !peel_ref(path, &peeled) && /* peelable? */ | |
2732 | obj_is_packed(&peeled)) /* object packed? */ | |
2733 | add_tag_chain(oid); | |
2734 | return 0; | |
2735 | } | |
2736 | ||
2737 | static void prepare_pack(int window, int depth) | |
2738 | { | |
2739 | struct object_entry **delta_list; | |
2740 | uint32_t i, nr_deltas; | |
2741 | unsigned n; | |
2742 | ||
2743 | if (use_delta_islands) | |
2744 | resolve_tree_islands(the_repository, progress, &to_pack); | |
2745 | ||
2746 | get_object_details(); | |
2747 | ||
2748 | /* | |
2749 | * If we're locally repacking then we need to be doubly careful | |
2750 | * from now on in order to make sure no stealth corruption gets | |
2751 | * propagated to the new pack. Clients receiving streamed packs | |
2752 | * should validate everything they get anyway so no need to incur | |
2753 | * the additional cost here in that case. | |
2754 | */ | |
2755 | if (!pack_to_stdout) | |
2756 | do_check_packed_object_crc = 1; | |
2757 | ||
2758 | if (!to_pack.nr_objects || !window || !depth) | |
2759 | return; | |
2760 | ||
2761 | ALLOC_ARRAY(delta_list, to_pack.nr_objects); | |
2762 | nr_deltas = n = 0; | |
2763 | ||
2764 | for (i = 0; i < to_pack.nr_objects; i++) { | |
2765 | struct object_entry *entry = to_pack.objects + i; | |
2766 | ||
2767 | if (DELTA(entry)) | |
2768 | /* This happens if we decided to reuse existing | |
2769 | * delta from a pack. "reuse_delta &&" is implied. | |
2770 | */ | |
2771 | continue; | |
2772 | ||
2773 | if (!entry->type_valid || | |
2774 | oe_size_less_than(&to_pack, entry, 50)) | |
2775 | continue; | |
2776 | ||
2777 | if (entry->no_try_delta) | |
2778 | continue; | |
2779 | ||
2780 | if (!entry->preferred_base) { | |
2781 | nr_deltas++; | |
2782 | if (oe_type(entry) < 0) | |
2783 | die(_("unable to get type of object %s"), | |
2784 | oid_to_hex(&entry->idx.oid)); | |
2785 | } else { | |
2786 | if (oe_type(entry) < 0) { | |
2787 | /* | |
2788 | * This object is not found, but we | |
2789 | * don't have to include it anyway. | |
2790 | */ | |
2791 | continue; | |
2792 | } | |
2793 | } | |
2794 | ||
2795 | delta_list[n++] = entry; | |
2796 | } | |
2797 | ||
2798 | if (nr_deltas && n > 1) { | |
2799 | unsigned nr_done = 0; | |
2800 | ||
2801 | if (progress) | |
2802 | progress_state = start_progress(_("Compressing objects"), | |
2803 | nr_deltas); | |
2804 | QSORT(delta_list, n, type_size_sort); | |
2805 | ll_find_deltas(delta_list, n, window+1, depth, &nr_done); | |
2806 | stop_progress(&progress_state); | |
2807 | if (nr_done != nr_deltas) | |
2808 | die(_("inconsistency with delta count")); | |
2809 | } | |
2810 | free(delta_list); | |
2811 | } | |
2812 | ||
2813 | static int git_pack_config(const char *k, const char *v, void *cb) | |
2814 | { | |
2815 | if (!strcmp(k, "pack.window")) { | |
2816 | window = git_config_int(k, v); | |
2817 | return 0; | |
2818 | } | |
2819 | if (!strcmp(k, "pack.windowmemory")) { | |
2820 | window_memory_limit = git_config_ulong(k, v); | |
2821 | return 0; | |
2822 | } | |
2823 | if (!strcmp(k, "pack.depth")) { | |
2824 | depth = git_config_int(k, v); | |
2825 | return 0; | |
2826 | } | |
2827 | if (!strcmp(k, "pack.deltacachesize")) { | |
2828 | max_delta_cache_size = git_config_int(k, v); | |
2829 | return 0; | |
2830 | } | |
2831 | if (!strcmp(k, "pack.deltacachelimit")) { | |
2832 | cache_max_small_delta_size = git_config_int(k, v); | |
2833 | return 0; | |
2834 | } | |
2835 | if (!strcmp(k, "pack.writebitmaphashcache")) { | |
2836 | if (git_config_bool(k, v)) | |
2837 | write_bitmap_options |= BITMAP_OPT_HASH_CACHE; | |
2838 | else | |
2839 | write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE; | |
2840 | } | |
2841 | if (!strcmp(k, "pack.usebitmaps")) { | |
2842 | use_bitmap_index_default = git_config_bool(k, v); | |
2843 | return 0; | |
2844 | } | |
2845 | if (!strcmp(k, "pack.allowpackreuse")) { | |
2846 | allow_pack_reuse = git_config_bool(k, v); | |
2847 | return 0; | |
2848 | } | |
2849 | if (!strcmp(k, "pack.threads")) { | |
2850 | delta_search_threads = git_config_int(k, v); | |
2851 | if (delta_search_threads < 0) | |
2852 | die(_("invalid number of threads specified (%d)"), | |
2853 | delta_search_threads); | |
2854 | if (!HAVE_THREADS && delta_search_threads != 1) { | |
2855 | warning(_("no threads support, ignoring %s"), k); | |
2856 | delta_search_threads = 0; | |
2857 | } | |
2858 | return 0; | |
2859 | } | |
2860 | if (!strcmp(k, "pack.indexversion")) { | |
2861 | pack_idx_opts.version = git_config_int(k, v); | |
2862 | if (pack_idx_opts.version > 2) | |
2863 | die(_("bad pack.indexversion=%"PRIu32), | |
2864 | pack_idx_opts.version); | |
2865 | return 0; | |
2866 | } | |
2867 | return git_default_config(k, v, cb); | |
2868 | } | |
2869 | ||
2870 | static void read_object_list_from_stdin(void) | |
2871 | { | |
2872 | char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2]; | |
2873 | struct object_id oid; | |
2874 | const char *p; | |
2875 | ||
2876 | for (;;) { | |
2877 | if (!fgets(line, sizeof(line), stdin)) { | |
2878 | if (feof(stdin)) | |
2879 | break; | |
2880 | if (!ferror(stdin)) | |
2881 | die("BUG: fgets returned NULL, not EOF, not error!"); | |
2882 | if (errno != EINTR) | |
2883 | die_errno("fgets"); | |
2884 | clearerr(stdin); | |
2885 | continue; | |
2886 | } | |
2887 | if (line[0] == '-') { | |
2888 | if (get_oid_hex(line+1, &oid)) | |
2889 | die(_("expected edge object ID, got garbage:\n %s"), | |
2890 | line); | |
2891 | add_preferred_base(&oid); | |
2892 | continue; | |
2893 | } | |
2894 | if (parse_oid_hex(line, &oid, &p)) | |
2895 | die(_("expected object ID, got garbage:\n %s"), line); | |
2896 | ||
2897 | add_preferred_base_object(p + 1); | |
2898 | add_object_entry(&oid, OBJ_NONE, p + 1, 0); | |
2899 | } | |
2900 | } | |
2901 | ||
2902 | /* Remember to update object flag allocation in object.h */ | |
2903 | #define OBJECT_ADDED (1u<<20) | |
2904 | ||
2905 | static void show_commit(struct commit *commit, void *data) | |
2906 | { | |
2907 | add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0); | |
2908 | commit->object.flags |= OBJECT_ADDED; | |
2909 | ||
2910 | if (write_bitmap_index) | |
2911 | index_commit_for_bitmap(commit); | |
2912 | ||
2913 | if (use_delta_islands) | |
2914 | propagate_island_marks(commit); | |
2915 | } | |
2916 | ||
2917 | static void show_object(struct object *obj, const char *name, void *data) | |
2918 | { | |
2919 | add_preferred_base_object(name); | |
2920 | add_object_entry(&obj->oid, obj->type, name, 0); | |
2921 | obj->flags |= OBJECT_ADDED; | |
2922 | ||
2923 | if (use_delta_islands) { | |
2924 | const char *p; | |
2925 | unsigned depth; | |
2926 | struct object_entry *ent; | |
2927 | ||
2928 | /* the empty string is a root tree, which is depth 0 */ | |
2929 | depth = *name ? 1 : 0; | |
2930 | for (p = strchr(name, '/'); p; p = strchr(p + 1, '/')) | |
2931 | depth++; | |
2932 | ||
2933 | ent = packlist_find(&to_pack, &obj->oid); | |
2934 | if (ent && depth > oe_tree_depth(&to_pack, ent)) | |
2935 | oe_set_tree_depth(&to_pack, ent, depth); | |
2936 | } | |
2937 | } | |
2938 | ||
2939 | static void show_object__ma_allow_any(struct object *obj, const char *name, void *data) | |
2940 | { | |
2941 | assert(arg_missing_action == MA_ALLOW_ANY); | |
2942 | ||
2943 | /* | |
2944 | * Quietly ignore ALL missing objects. This avoids problems with | |
2945 | * staging them now and getting an odd error later. | |
2946 | */ | |
2947 | if (!has_object_file(&obj->oid)) | |
2948 | return; | |
2949 | ||
2950 | show_object(obj, name, data); | |
2951 | } | |
2952 | ||
2953 | static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data) | |
2954 | { | |
2955 | assert(arg_missing_action == MA_ALLOW_PROMISOR); | |
2956 | ||
2957 | /* | |
2958 | * Quietly ignore EXPECTED missing objects. This avoids problems with | |
2959 | * staging them now and getting an odd error later. | |
2960 | */ | |
2961 | if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid)) | |
2962 | return; | |
2963 | ||
2964 | show_object(obj, name, data); | |
2965 | } | |
2966 | ||
2967 | static int option_parse_missing_action(const struct option *opt, | |
2968 | const char *arg, int unset) | |
2969 | { | |
2970 | assert(arg); | |
2971 | assert(!unset); | |
2972 | ||
2973 | if (!strcmp(arg, "error")) { | |
2974 | arg_missing_action = MA_ERROR; | |
2975 | fn_show_object = show_object; | |
2976 | return 0; | |
2977 | } | |
2978 | ||
2979 | if (!strcmp(arg, "allow-any")) { | |
2980 | arg_missing_action = MA_ALLOW_ANY; | |
2981 | fetch_if_missing = 0; | |
2982 | fn_show_object = show_object__ma_allow_any; | |
2983 | return 0; | |
2984 | } | |
2985 | ||
2986 | if (!strcmp(arg, "allow-promisor")) { | |
2987 | arg_missing_action = MA_ALLOW_PROMISOR; | |
2988 | fetch_if_missing = 0; | |
2989 | fn_show_object = show_object__ma_allow_promisor; | |
2990 | return 0; | |
2991 | } | |
2992 | ||
2993 | die(_("invalid value for --missing")); | |
2994 | return 0; | |
2995 | } | |
2996 | ||
2997 | static void show_edge(struct commit *commit) | |
2998 | { | |
2999 | add_preferred_base(&commit->object.oid); | |
3000 | } | |
3001 | ||
3002 | struct in_pack_object { | |
3003 | off_t offset; | |
3004 | struct object *object; | |
3005 | }; | |
3006 | ||
3007 | struct in_pack { | |
3008 | unsigned int alloc; | |
3009 | unsigned int nr; | |
3010 | struct in_pack_object *array; | |
3011 | }; | |
3012 | ||
3013 | static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack) | |
3014 | { | |
3015 | in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p); | |
3016 | in_pack->array[in_pack->nr].object = object; | |
3017 | in_pack->nr++; | |
3018 | } | |
3019 | ||
3020 | /* | |
3021 | * Compare the objects in the offset order, in order to emulate the | |
3022 | * "git rev-list --objects" output that produced the pack originally. | |
3023 | */ | |
3024 | static int ofscmp(const void *a_, const void *b_) | |
3025 | { | |
3026 | struct in_pack_object *a = (struct in_pack_object *)a_; | |
3027 | struct in_pack_object *b = (struct in_pack_object *)b_; | |
3028 | ||
3029 | if (a->offset < b->offset) | |
3030 | return -1; | |
3031 | else if (a->offset > b->offset) | |
3032 | return 1; | |
3033 | else | |
3034 | return oidcmp(&a->object->oid, &b->object->oid); | |
3035 | } | |
3036 | ||
3037 | static void add_objects_in_unpacked_packs(void) | |
3038 | { | |
3039 | struct packed_git *p; | |
3040 | struct in_pack in_pack; | |
3041 | uint32_t i; | |
3042 | ||
3043 | memset(&in_pack, 0, sizeof(in_pack)); | |
3044 | ||
3045 | for (p = get_all_packs(the_repository); p; p = p->next) { | |
3046 | struct object_id oid; | |
3047 | struct object *o; | |
3048 | ||
3049 | if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) | |
3050 | continue; | |
3051 | if (open_pack_index(p)) | |
3052 | die(_("cannot open pack index")); | |
3053 | ||
3054 | ALLOC_GROW(in_pack.array, | |
3055 | in_pack.nr + p->num_objects, | |
3056 | in_pack.alloc); | |
3057 | ||
3058 | for (i = 0; i < p->num_objects; i++) { | |
3059 | nth_packed_object_id(&oid, p, i); | |
3060 | o = lookup_unknown_object(&oid); | |
3061 | if (!(o->flags & OBJECT_ADDED)) | |
3062 | mark_in_pack_object(o, p, &in_pack); | |
3063 | o->flags |= OBJECT_ADDED; | |
3064 | } | |
3065 | } | |
3066 | ||
3067 | if (in_pack.nr) { | |
3068 | QSORT(in_pack.array, in_pack.nr, ofscmp); | |
3069 | for (i = 0; i < in_pack.nr; i++) { | |
3070 | struct object *o = in_pack.array[i].object; | |
3071 | add_object_entry(&o->oid, o->type, "", 0); | |
3072 | } | |
3073 | } | |
3074 | free(in_pack.array); | |
3075 | } | |
3076 | ||
3077 | static int add_loose_object(const struct object_id *oid, const char *path, | |
3078 | void *data) | |
3079 | { | |
3080 | enum object_type type = oid_object_info(the_repository, oid, NULL); | |
3081 | ||
3082 | if (type < 0) { | |
3083 | warning(_("loose object at %s could not be examined"), path); | |
3084 | return 0; | |
3085 | } | |
3086 | ||
3087 | add_object_entry(oid, type, "", 0); | |
3088 | return 0; | |
3089 | } | |
3090 | ||
3091 | /* | |
3092 | * We actually don't even have to worry about reachability here. | |
3093 | * add_object_entry will weed out duplicates, so we just add every | |
3094 | * loose object we find. | |
3095 | */ | |
3096 | static void add_unreachable_loose_objects(void) | |
3097 | { | |
3098 | for_each_loose_file_in_objdir(get_object_directory(), | |
3099 | add_loose_object, | |
3100 | NULL, NULL, NULL); | |
3101 | } | |
3102 | ||
3103 | static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid) | |
3104 | { | |
3105 | static struct packed_git *last_found = (void *)1; | |
3106 | struct packed_git *p; | |
3107 | ||
3108 | p = (last_found != (void *)1) ? last_found : | |
3109 | get_all_packs(the_repository); | |
3110 | ||
3111 | while (p) { | |
3112 | if ((!p->pack_local || p->pack_keep || | |
3113 | p->pack_keep_in_core) && | |
3114 | find_pack_entry_one(oid->hash, p)) { | |
3115 | last_found = p; | |
3116 | return 1; | |
3117 | } | |
3118 | if (p == last_found) | |
3119 | p = get_all_packs(the_repository); | |
3120 | else | |
3121 | p = p->next; | |
3122 | if (p == last_found) | |
3123 | p = p->next; | |
3124 | } | |
3125 | return 0; | |
3126 | } | |
3127 | ||
3128 | /* | |
3129 | * Store a list of sha1s that are should not be discarded | |
3130 | * because they are either written too recently, or are | |
3131 | * reachable from another object that was. | |
3132 | * | |
3133 | * This is filled by get_object_list. | |
3134 | */ | |
3135 | static struct oid_array recent_objects; | |
3136 | ||
3137 | static int loosened_object_can_be_discarded(const struct object_id *oid, | |
3138 | timestamp_t mtime) | |
3139 | { | |
3140 | if (!unpack_unreachable_expiration) | |
3141 | return 0; | |
3142 | if (mtime > unpack_unreachable_expiration) | |
3143 | return 0; | |
3144 | if (oid_array_lookup(&recent_objects, oid) >= 0) | |
3145 | return 0; | |
3146 | return 1; | |
3147 | } | |
3148 | ||
3149 | static void loosen_unused_packed_objects(void) | |
3150 | { | |
3151 | struct packed_git *p; | |
3152 | uint32_t i; | |
3153 | struct object_id oid; | |
3154 | ||
3155 | for (p = get_all_packs(the_repository); p; p = p->next) { | |
3156 | if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) | |
3157 | continue; | |
3158 | ||
3159 | if (open_pack_index(p)) | |
3160 | die(_("cannot open pack index")); | |
3161 | ||
3162 | for (i = 0; i < p->num_objects; i++) { | |
3163 | nth_packed_object_id(&oid, p, i); | |
3164 | if (!packlist_find(&to_pack, &oid) && | |
3165 | !has_sha1_pack_kept_or_nonlocal(&oid) && | |
3166 | !loosened_object_can_be_discarded(&oid, p->mtime)) | |
3167 | if (force_object_loose(&oid, p->mtime)) | |
3168 | die(_("unable to force loose object")); | |
3169 | } | |
3170 | } | |
3171 | } | |
3172 | ||
3173 | /* | |
3174 | * This tracks any options which pack-reuse code expects to be on, or which a | |
3175 | * reader of the pack might not understand, and which would therefore prevent | |
3176 | * blind reuse of what we have on disk. | |
3177 | */ | |
3178 | static int pack_options_allow_reuse(void) | |
3179 | { | |
3180 | return allow_pack_reuse && | |
3181 | pack_to_stdout && | |
3182 | !ignore_packed_keep_on_disk && | |
3183 | !ignore_packed_keep_in_core && | |
3184 | (!local || !have_non_local_packs) && | |
3185 | !incremental; | |
3186 | } | |
3187 | ||
3188 | static int get_object_list_from_bitmap(struct rev_info *revs) | |
3189 | { | |
3190 | if (!(bitmap_git = prepare_bitmap_walk(revs, &filter_options))) | |
3191 | return -1; | |
3192 | ||
3193 | if (pack_options_allow_reuse() && | |
3194 | !reuse_partial_packfile_from_bitmap( | |
3195 | bitmap_git, | |
3196 | &reuse_packfile, | |
3197 | &reuse_packfile_objects, | |
3198 | &reuse_packfile_bitmap)) { | |
3199 | assert(reuse_packfile_objects); | |
3200 | nr_result += reuse_packfile_objects; | |
3201 | display_progress(progress_state, nr_result); | |
3202 | } | |
3203 | ||
3204 | traverse_bitmap_commit_list(bitmap_git, revs, | |
3205 | &add_object_entry_from_bitmap); | |
3206 | return 0; | |
3207 | } | |
3208 | ||
3209 | static void record_recent_object(struct object *obj, | |
3210 | const char *name, | |
3211 | void *data) | |
3212 | { | |
3213 | oid_array_append(&recent_objects, &obj->oid); | |
3214 | } | |
3215 | ||
3216 | static void record_recent_commit(struct commit *commit, void *data) | |
3217 | { | |
3218 | oid_array_append(&recent_objects, &commit->object.oid); | |
3219 | } | |
3220 | ||
3221 | static void get_object_list(int ac, const char **av) | |
3222 | { | |
3223 | struct rev_info revs; | |
3224 | struct setup_revision_opt s_r_opt = { | |
3225 | .allow_exclude_promisor_objects = 1, | |
3226 | }; | |
3227 | char line[1000]; | |
3228 | int flags = 0; | |
3229 | int save_warning; | |
3230 | ||
3231 | repo_init_revisions(the_repository, &revs, NULL); | |
3232 | save_commit_buffer = 0; | |
3233 | setup_revisions(ac, av, &revs, &s_r_opt); | |
3234 | ||
3235 | /* make sure shallows are read */ | |
3236 | is_repository_shallow(the_repository); | |
3237 | ||
3238 | save_warning = warn_on_object_refname_ambiguity; | |
3239 | warn_on_object_refname_ambiguity = 0; | |
3240 | ||
3241 | while (fgets(line, sizeof(line), stdin) != NULL) { | |
3242 | int len = strlen(line); | |
3243 | if (len && line[len - 1] == '\n') | |
3244 | line[--len] = 0; | |
3245 | if (!len) | |
3246 | break; | |
3247 | if (*line == '-') { | |
3248 | if (!strcmp(line, "--not")) { | |
3249 | flags ^= UNINTERESTING; | |
3250 | write_bitmap_index = 0; | |
3251 | continue; | |
3252 | } | |
3253 | if (starts_with(line, "--shallow ")) { | |
3254 | struct object_id oid; | |
3255 | if (get_oid_hex(line + 10, &oid)) | |
3256 | die("not an SHA-1 '%s'", line + 10); | |
3257 | register_shallow(the_repository, &oid); | |
3258 | use_bitmap_index = 0; | |
3259 | continue; | |
3260 | } | |
3261 | die(_("not a rev '%s'"), line); | |
3262 | } | |
3263 | if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME)) | |
3264 | die(_("bad revision '%s'"), line); | |
3265 | } | |
3266 | ||
3267 | warn_on_object_refname_ambiguity = save_warning; | |
3268 | ||
3269 | if (use_bitmap_index && !get_object_list_from_bitmap(&revs)) | |
3270 | return; | |
3271 | ||
3272 | if (use_delta_islands) | |
3273 | load_delta_islands(the_repository, progress); | |
3274 | ||
3275 | if (prepare_revision_walk(&revs)) | |
3276 | die(_("revision walk setup failed")); | |
3277 | mark_edges_uninteresting(&revs, show_edge, sparse); | |
3278 | ||
3279 | if (!fn_show_object) | |
3280 | fn_show_object = show_object; | |
3281 | traverse_commit_list_filtered(&filter_options, &revs, | |
3282 | show_commit, fn_show_object, NULL, | |
3283 | NULL); | |
3284 | ||
3285 | if (unpack_unreachable_expiration) { | |
3286 | revs.ignore_missing_links = 1; | |
3287 | if (add_unseen_recent_objects_to_traversal(&revs, | |
3288 | unpack_unreachable_expiration)) | |
3289 | die(_("unable to add recent objects")); | |
3290 | if (prepare_revision_walk(&revs)) | |
3291 | die(_("revision walk setup failed")); | |
3292 | traverse_commit_list(&revs, record_recent_commit, | |
3293 | record_recent_object, NULL); | |
3294 | } | |
3295 | ||
3296 | if (keep_unreachable) | |
3297 | add_objects_in_unpacked_packs(); | |
3298 | if (pack_loose_unreachable) | |
3299 | add_unreachable_loose_objects(); | |
3300 | if (unpack_unreachable) | |
3301 | loosen_unused_packed_objects(); | |
3302 | ||
3303 | oid_array_clear(&recent_objects); | |
3304 | } | |
3305 | ||
3306 | static void add_extra_kept_packs(const struct string_list *names) | |
3307 | { | |
3308 | struct packed_git *p; | |
3309 | ||
3310 | if (!names->nr) | |
3311 | return; | |
3312 | ||
3313 | for (p = get_all_packs(the_repository); p; p = p->next) { | |
3314 | const char *name = basename(p->pack_name); | |
3315 | int i; | |
3316 | ||
3317 | if (!p->pack_local) | |
3318 | continue; | |
3319 | ||
3320 | for (i = 0; i < names->nr; i++) | |
3321 | if (!fspathcmp(name, names->items[i].string)) | |
3322 | break; | |
3323 | ||
3324 | if (i < names->nr) { | |
3325 | p->pack_keep_in_core = 1; | |
3326 | ignore_packed_keep_in_core = 1; | |
3327 | continue; | |
3328 | } | |
3329 | } | |
3330 | } | |
3331 | ||
3332 | static int option_parse_index_version(const struct option *opt, | |
3333 | const char *arg, int unset) | |
3334 | { | |
3335 | char *c; | |
3336 | const char *val = arg; | |
3337 | ||
3338 | BUG_ON_OPT_NEG(unset); | |
3339 | ||
3340 | pack_idx_opts.version = strtoul(val, &c, 10); | |
3341 | if (pack_idx_opts.version > 2) | |
3342 | die(_("unsupported index version %s"), val); | |
3343 | if (*c == ',' && c[1]) | |
3344 | pack_idx_opts.off32_limit = strtoul(c+1, &c, 0); | |
3345 | if (*c || pack_idx_opts.off32_limit & 0x80000000) | |
3346 | die(_("bad index version '%s'"), val); | |
3347 | return 0; | |
3348 | } | |
3349 | ||
3350 | static int option_parse_unpack_unreachable(const struct option *opt, | |
3351 | const char *arg, int unset) | |
3352 | { | |
3353 | if (unset) { | |
3354 | unpack_unreachable = 0; | |
3355 | unpack_unreachable_expiration = 0; | |
3356 | } | |
3357 | else { | |
3358 | unpack_unreachable = 1; | |
3359 | if (arg) | |
3360 | unpack_unreachable_expiration = approxidate(arg); | |
3361 | } | |
3362 | return 0; | |
3363 | } | |
3364 | ||
3365 | int cmd_pack_objects(int argc, const char **argv, const char *prefix) | |
3366 | { | |
3367 | int use_internal_rev_list = 0; | |
3368 | int shallow = 0; | |
3369 | int all_progress_implied = 0; | |
3370 | struct argv_array rp = ARGV_ARRAY_INIT; | |
3371 | int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0; | |
3372 | int rev_list_index = 0; | |
3373 | struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; | |
3374 | struct option pack_objects_options[] = { | |
3375 | OPT_SET_INT('q', "quiet", &progress, | |
3376 | N_("do not show progress meter"), 0), | |
3377 | OPT_SET_INT(0, "progress", &progress, | |
3378 | N_("show progress meter"), 1), | |
3379 | OPT_SET_INT(0, "all-progress", &progress, | |
3380 | N_("show progress meter during object writing phase"), 2), | |
3381 | OPT_BOOL(0, "all-progress-implied", | |
3382 | &all_progress_implied, | |
3383 | N_("similar to --all-progress when progress meter is shown")), | |
3384 | OPT_CALLBACK_F(0, "index-version", NULL, N_("<version>[,<offset>]"), | |
3385 | N_("write the pack index file in the specified idx format version"), | |
3386 | PARSE_OPT_NONEG, option_parse_index_version), | |
3387 | OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit, | |
3388 | N_("maximum size of each output pack file")), | |
3389 | OPT_BOOL(0, "local", &local, | |
3390 | N_("ignore borrowed objects from alternate object store")), | |
3391 | OPT_BOOL(0, "incremental", &incremental, | |
3392 | N_("ignore packed objects")), | |
3393 | OPT_INTEGER(0, "window", &window, | |
3394 | N_("limit pack window by objects")), | |
3395 | OPT_MAGNITUDE(0, "window-memory", &window_memory_limit, | |
3396 | N_("limit pack window by memory in addition to object limit")), | |
3397 | OPT_INTEGER(0, "depth", &depth, | |
3398 | N_("maximum length of delta chain allowed in the resulting pack")), | |
3399 | OPT_BOOL(0, "reuse-delta", &reuse_delta, | |
3400 | N_("reuse existing deltas")), | |
3401 | OPT_BOOL(0, "reuse-object", &reuse_object, | |
3402 | N_("reuse existing objects")), | |
3403 | OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta, | |
3404 | N_("use OFS_DELTA objects")), | |
3405 | OPT_INTEGER(0, "threads", &delta_search_threads, | |
3406 | N_("use threads when searching for best delta matches")), | |
3407 | OPT_BOOL(0, "non-empty", &non_empty, | |
3408 | N_("do not create an empty pack output")), | |
3409 | OPT_BOOL(0, "revs", &use_internal_rev_list, | |
3410 | N_("read revision arguments from standard input")), | |
3411 | OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked, | |
3412 | N_("limit the objects to those that are not yet packed"), | |
3413 | 1, PARSE_OPT_NONEG), | |
3414 | OPT_SET_INT_F(0, "all", &rev_list_all, | |
3415 | N_("include objects reachable from any reference"), | |
3416 | 1, PARSE_OPT_NONEG), | |
3417 | OPT_SET_INT_F(0, "reflog", &rev_list_reflog, | |
3418 | N_("include objects referred by reflog entries"), | |
3419 | 1, PARSE_OPT_NONEG), | |
3420 | OPT_SET_INT_F(0, "indexed-objects", &rev_list_index, | |
3421 | N_("include objects referred to by the index"), | |
3422 | 1, PARSE_OPT_NONEG), | |
3423 | OPT_BOOL(0, "stdout", &pack_to_stdout, | |
3424 | N_("output pack to stdout")), | |
3425 | OPT_BOOL(0, "include-tag", &include_tag, | |
3426 | N_("include tag objects that refer to objects to be packed")), | |
3427 | OPT_BOOL(0, "keep-unreachable", &keep_unreachable, | |
3428 | N_("keep unreachable objects")), | |
3429 | OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable, | |
3430 | N_("pack loose unreachable objects")), | |
3431 | OPT_CALLBACK_F(0, "unpack-unreachable", NULL, N_("time"), | |
3432 | N_("unpack unreachable objects newer than <time>"), | |
3433 | PARSE_OPT_OPTARG, option_parse_unpack_unreachable), | |
3434 | OPT_BOOL(0, "sparse", &sparse, | |
3435 | N_("use the sparse reachability algorithm")), | |
3436 | OPT_BOOL(0, "thin", &thin, | |
3437 | N_("create thin packs")), | |
3438 | OPT_BOOL(0, "shallow", &shallow, | |
3439 | N_("create packs suitable for shallow fetches")), | |
3440 | OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk, | |
3441 | N_("ignore packs that have companion .keep file")), | |
3442 | OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"), | |
3443 | N_("ignore this pack")), | |
3444 | OPT_INTEGER(0, "compression", &pack_compression_level, | |
3445 | N_("pack compression level")), | |
3446 | OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents, | |
3447 | N_("do not hide commits by grafts"), 0), | |
3448 | OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index, | |
3449 | N_("use a bitmap index if available to speed up counting objects")), | |
3450 | OPT_SET_INT(0, "write-bitmap-index", &write_bitmap_index, | |
3451 | N_("write a bitmap index together with the pack index"), | |
3452 | WRITE_BITMAP_TRUE), | |
3453 | OPT_SET_INT_F(0, "write-bitmap-index-quiet", | |
3454 | &write_bitmap_index, | |
3455 | N_("write a bitmap index if possible"), | |
3456 | WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN), | |
3457 | OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), | |
3458 | OPT_CALLBACK_F(0, "missing", NULL, N_("action"), | |
3459 | N_("handling for missing objects"), PARSE_OPT_NONEG, | |
3460 | option_parse_missing_action), | |
3461 | OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, | |
3462 | N_("do not pack objects in promisor packfiles")), | |
3463 | OPT_BOOL(0, "delta-islands", &use_delta_islands, | |
3464 | N_("respect islands during delta compression")), | |
3465 | OPT_END(), | |
3466 | }; | |
3467 | ||
3468 | if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS)) | |
3469 | BUG("too many dfs states, increase OE_DFS_STATE_BITS"); | |
3470 | ||
3471 | read_replace_refs = 0; | |
3472 | ||
3473 | sparse = git_env_bool("GIT_TEST_PACK_SPARSE", -1); | |
3474 | prepare_repo_settings(the_repository); | |
3475 | if (sparse < 0) | |
3476 | sparse = the_repository->settings.pack_use_sparse; | |
3477 | ||
3478 | reset_pack_idx_option(&pack_idx_opts); | |
3479 | git_config(git_pack_config, NULL); | |
3480 | ||
3481 | progress = isatty(2); | |
3482 | argc = parse_options(argc, argv, prefix, pack_objects_options, | |
3483 | pack_usage, 0); | |
3484 | ||
3485 | if (argc) { | |
3486 | base_name = argv[0]; | |
3487 | argc--; | |
3488 | } | |
3489 | if (pack_to_stdout != !base_name || argc) | |
3490 | usage_with_options(pack_usage, pack_objects_options); | |
3491 | ||
3492 | if (depth >= (1 << OE_DEPTH_BITS)) { | |
3493 | warning(_("delta chain depth %d is too deep, forcing %d"), | |
3494 | depth, (1 << OE_DEPTH_BITS) - 1); | |
3495 | depth = (1 << OE_DEPTH_BITS) - 1; | |
3496 | } | |
3497 | if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) { | |
3498 | warning(_("pack.deltaCacheLimit is too high, forcing %d"), | |
3499 | (1U << OE_Z_DELTA_BITS) - 1); | |
3500 | cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1; | |
3501 | } | |
3502 | ||
3503 | argv_array_push(&rp, "pack-objects"); | |
3504 | if (thin) { | |
3505 | use_internal_rev_list = 1; | |
3506 | argv_array_push(&rp, shallow | |
3507 | ? "--objects-edge-aggressive" | |
3508 | : "--objects-edge"); | |
3509 | } else | |
3510 | argv_array_push(&rp, "--objects"); | |
3511 | ||
3512 | if (rev_list_all) { | |
3513 | use_internal_rev_list = 1; | |
3514 | argv_array_push(&rp, "--all"); | |
3515 | } | |
3516 | if (rev_list_reflog) { | |
3517 | use_internal_rev_list = 1; | |
3518 | argv_array_push(&rp, "--reflog"); | |
3519 | } | |
3520 | if (rev_list_index) { | |
3521 | use_internal_rev_list = 1; | |
3522 | argv_array_push(&rp, "--indexed-objects"); | |
3523 | } | |
3524 | if (rev_list_unpacked) { | |
3525 | use_internal_rev_list = 1; | |
3526 | argv_array_push(&rp, "--unpacked"); | |
3527 | } | |
3528 | ||
3529 | if (exclude_promisor_objects) { | |
3530 | use_internal_rev_list = 1; | |
3531 | fetch_if_missing = 0; | |
3532 | argv_array_push(&rp, "--exclude-promisor-objects"); | |
3533 | } | |
3534 | if (unpack_unreachable || keep_unreachable || pack_loose_unreachable) | |
3535 | use_internal_rev_list = 1; | |
3536 | ||
3537 | if (!reuse_object) | |
3538 | reuse_delta = 0; | |
3539 | if (pack_compression_level == -1) | |
3540 | pack_compression_level = Z_DEFAULT_COMPRESSION; | |
3541 | else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION) | |
3542 | die(_("bad pack compression level %d"), pack_compression_level); | |
3543 | ||
3544 | if (!delta_search_threads) /* --threads=0 means autodetect */ | |
3545 | delta_search_threads = online_cpus(); | |
3546 | ||
3547 | if (!HAVE_THREADS && delta_search_threads != 1) | |
3548 | warning(_("no threads support, ignoring --threads")); | |
3549 | if (!pack_to_stdout && !pack_size_limit) | |
3550 | pack_size_limit = pack_size_limit_cfg; | |
3551 | if (pack_to_stdout && pack_size_limit) | |
3552 | die(_("--max-pack-size cannot be used to build a pack for transfer")); | |
3553 | if (pack_size_limit && pack_size_limit < 1024*1024) { | |
3554 | warning(_("minimum pack size limit is 1 MiB")); | |
3555 | pack_size_limit = 1024*1024; | |
3556 | } | |
3557 | ||
3558 | if (!pack_to_stdout && thin) | |
3559 | die(_("--thin cannot be used to build an indexable pack")); | |
3560 | ||
3561 | if (keep_unreachable && unpack_unreachable) | |
3562 | die(_("--keep-unreachable and --unpack-unreachable are incompatible")); | |
3563 | if (!rev_list_all || !rev_list_reflog || !rev_list_index) | |
3564 | unpack_unreachable_expiration = 0; | |
3565 | ||
3566 | if (filter_options.choice) { | |
3567 | if (!pack_to_stdout) | |
3568 | die(_("cannot use --filter without --stdout")); | |
3569 | } | |
3570 | ||
3571 | /* | |
3572 | * "soft" reasons not to use bitmaps - for on-disk repack by default we want | |
3573 | * | |
3574 | * - to produce good pack (with bitmap index not-yet-packed objects are | |
3575 | * packed in suboptimal order). | |
3576 | * | |
3577 | * - to use more robust pack-generation codepath (avoiding possible | |
3578 | * bugs in bitmap code and possible bitmap index corruption). | |
3579 | */ | |
3580 | if (!pack_to_stdout) | |
3581 | use_bitmap_index_default = 0; | |
3582 | ||
3583 | if (use_bitmap_index < 0) | |
3584 | use_bitmap_index = use_bitmap_index_default; | |
3585 | ||
3586 | /* "hard" reasons not to use bitmaps; these just won't work at all */ | |
3587 | if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository)) | |
3588 | use_bitmap_index = 0; | |
3589 | ||
3590 | if (pack_to_stdout || !rev_list_all) | |
3591 | write_bitmap_index = 0; | |
3592 | ||
3593 | if (use_delta_islands) | |
3594 | argv_array_push(&rp, "--topo-order"); | |
3595 | ||
3596 | if (progress && all_progress_implied) | |
3597 | progress = 2; | |
3598 | ||
3599 | add_extra_kept_packs(&keep_pack_list); | |
3600 | if (ignore_packed_keep_on_disk) { | |
3601 | struct packed_git *p; | |
3602 | for (p = get_all_packs(the_repository); p; p = p->next) | |
3603 | if (p->pack_local && p->pack_keep) | |
3604 | break; | |
3605 | if (!p) /* no keep-able packs found */ | |
3606 | ignore_packed_keep_on_disk = 0; | |
3607 | } | |
3608 | if (local) { | |
3609 | /* | |
3610 | * unlike ignore_packed_keep_on_disk above, we do not | |
3611 | * want to unset "local" based on looking at packs, as | |
3612 | * it also covers non-local objects | |
3613 | */ | |
3614 | struct packed_git *p; | |
3615 | for (p = get_all_packs(the_repository); p; p = p->next) { | |
3616 | if (!p->pack_local) { | |
3617 | have_non_local_packs = 1; | |
3618 | break; | |
3619 | } | |
3620 | } | |
3621 | } | |
3622 | ||
3623 | trace2_region_enter("pack-objects", "enumerate-objects", | |
3624 | the_repository); | |
3625 | prepare_packing_data(the_repository, &to_pack); | |
3626 | ||
3627 | if (progress) | |
3628 | progress_state = start_progress(_("Enumerating objects"), 0); | |
3629 | if (!use_internal_rev_list) | |
3630 | read_object_list_from_stdin(); | |
3631 | else { | |
3632 | get_object_list(rp.argc, rp.argv); | |
3633 | argv_array_clear(&rp); | |
3634 | } | |
3635 | cleanup_preferred_base(); | |
3636 | if (include_tag && nr_result) | |
3637 | for_each_ref(add_ref_tag, NULL); | |
3638 | stop_progress(&progress_state); | |
3639 | trace2_region_leave("pack-objects", "enumerate-objects", | |
3640 | the_repository); | |
3641 | ||
3642 | if (non_empty && !nr_result) | |
3643 | return 0; | |
3644 | if (nr_result) { | |
3645 | trace2_region_enter("pack-objects", "prepare-pack", | |
3646 | the_repository); | |
3647 | prepare_pack(window, depth); | |
3648 | trace2_region_leave("pack-objects", "prepare-pack", | |
3649 | the_repository); | |
3650 | } | |
3651 | ||
3652 | trace2_region_enter("pack-objects", "write-pack-file", the_repository); | |
3653 | write_pack_file(); | |
3654 | trace2_region_leave("pack-objects", "write-pack-file", the_repository); | |
3655 | ||
3656 | if (progress) | |
3657 | fprintf_ln(stderr, | |
3658 | _("Total %"PRIu32" (delta %"PRIu32")," | |
3659 | " reused %"PRIu32" (delta %"PRIu32")," | |
3660 | " pack-reused %"PRIu32), | |
3661 | written, written_delta, reused, reused_delta, | |
3662 | reuse_packfile_objects); | |
3663 | return 0; | |
3664 | } |