]>
Commit | Line | Data |
---|---|---|
5d4a6003 | 1 | #include "builtin.h" |
c323ac7d | 2 | #include "cache.h" |
a74db82e | 3 | #include "attr.h" |
c323ac7d | 4 | #include "object.h" |
8e440259 PE |
5 | #include "blob.h" |
6 | #include "commit.h" | |
7 | #include "tag.h" | |
8 | #include "tree.h" | |
c323ac7d | 9 | #include "delta.h" |
a733cb60 | 10 | #include "pack.h" |
3449f8c4 | 11 | #include "pack-revindex.h" |
c38138cd | 12 | #include "csum-file.h" |
1b0c7174 | 13 | #include "tree-walk.h" |
b5d97e6b JH |
14 | #include "diff.h" |
15 | #include "revision.h" | |
16 | #include "list-objects.h" | |
2834bc27 | 17 | #include "pack-objects.h" |
96a02f8f | 18 | #include "progress.h" |
f0a24aa5 | 19 | #include "refs.h" |
cf2ba13a | 20 | #include "streaming.h" |
93749194 | 21 | #include "thread-utils.h" |
6b8fda2d | 22 | #include "pack-bitmap.h" |
abcb8655 JK |
23 | #include "reachable.h" |
24 | #include "sha1-array.h" | |
edfbb2aa | 25 | #include "argv-array.h" |
c9af708b | 26 | #include "mru.h" |
8ecce684 | 27 | |
99fb6e04 | 28 | static const char *pack_usage[] = { |
b8c1d275 AH |
29 | N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), |
30 | N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), | |
99fb6e04 NTND |
31 | NULL |
32 | }; | |
c323ac7d | 33 | |
3f9ac8d2 | 34 | /* |
2834bc27 VM |
35 | * Objects we are going to pack are collected in the `to_pack` structure. |
36 | * It contains an array (dynamically expanded) of the object data, and a map | |
37 | * that can resolve SHA1s to their position in the array. | |
3f9ac8d2 | 38 | */ |
2834bc27 VM |
39 | static struct packing_data to_pack; |
40 | ||
79814f42 | 41 | static struct pack_idx_entry **written_list; |
2834bc27 | 42 | static uint32_t nr_result, nr_written; |
3f9ac8d2 | 43 | |
96f1e58f | 44 | static int non_empty; |
a7de7130 | 45 | static int reuse_delta = 1, reuse_object = 1; |
e5e9714a | 46 | static int keep_unreachable, unpack_unreachable, include_tag; |
7e52f566 | 47 | static unsigned long unpack_unreachable_expiration; |
e26a8c47 | 48 | static int pack_loose_unreachable; |
96f1e58f | 49 | static int local; |
56dfeb62 | 50 | static int have_non_local_packs; |
96f1e58f | 51 | static int incremental; |
e96fb9b8 | 52 | static int ignore_packed_keep; |
be6b1914 | 53 | static int allow_ofs_delta; |
ebcfb379 | 54 | static struct pack_idx_option pack_idx_opts; |
d01fb92f | 55 | static const char *base_name; |
024701f1 | 56 | static int progress = 1; |
4812a93a | 57 | static int window = 10; |
568508e7 | 58 | static unsigned long pack_size_limit; |
618e613a | 59 | static int depth = 50; |
43cc2b42 | 60 | static int delta_search_threads; |
df6d6101 | 61 | static int pack_to_stdout; |
8d1d8f83 | 62 | static int num_preferred_base; |
dc6a0757 | 63 | static struct progress *progress_state; |
c323ac7d | 64 | |
6b8fda2d VM |
65 | static struct packed_git *reuse_packfile; |
66 | static uint32_t reuse_packfile_objects; | |
67 | static off_t reuse_packfile_offset; | |
68 | ||
645c432d KS |
69 | static int use_bitmap_index_default = 1; |
70 | static int use_bitmap_index = -1; | |
7cc8f971 | 71 | static int write_bitmap_index; |
ae4f07fb | 72 | static uint16_t write_bitmap_options; |
6b8fda2d | 73 | |
074b2eea | 74 | static unsigned long delta_cache_size = 0; |
5749b0b2 | 75 | static unsigned long max_delta_cache_size = 256 * 1024 * 1024; |
e3dfddb3 | 76 | static unsigned long cache_max_small_delta_size = 1000; |
074b2eea | 77 | |
a97773ce BD |
78 | static unsigned long window_memory_limit = 0; |
79 | ||
3f9ac8d2 JH |
80 | /* |
81 | * stats | |
82 | */ | |
7cadf491 SP |
83 | static uint32_t written, written_delta; |
84 | static uint32_t reused, reused_delta; | |
3f9ac8d2 | 85 | |
7cc8f971 VM |
86 | /* |
87 | * Indexed commits | |
88 | */ | |
89 | static struct commit **indexed_commits; | |
90 | static unsigned int indexed_commits_nr; | |
91 | static unsigned int indexed_commits_alloc; | |
92 | ||
93 | static void index_commit_for_bitmap(struct commit *commit) | |
94 | { | |
95 | if (indexed_commits_nr >= indexed_commits_alloc) { | |
96 | indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; | |
2756ca43 | 97 | REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); |
7cc8f971 VM |
98 | } |
99 | ||
100 | indexed_commits[indexed_commits_nr++] = commit; | |
101 | } | |
780e6e73 | 102 | |
3613f9b4 | 103 | static void *get_delta(struct object_entry *entry) |
c323ac7d | 104 | { |
3613f9b4 NP |
105 | unsigned long size, base_size, delta_size; |
106 | void *buf, *base_buf, *delta_buf; | |
21666f1a | 107 | enum object_type type; |
c323ac7d | 108 | |
3613f9b4 NP |
109 | buf = read_sha1_file(entry->idx.sha1, &type, &size); |
110 | if (!buf) | |
111 | die("unable to read %s", sha1_to_hex(entry->idx.sha1)); | |
112 | base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size); | |
113 | if (!base_buf) | |
aa7e44bf | 114 | die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1)); |
3613f9b4 | 115 | delta_buf = diff_delta(base_buf, base_size, |
dcde55bc | 116 | buf, size, &delta_size, 0); |
3613f9b4 | 117 | if (!delta_buf || delta_size != entry->delta_size) |
a6080a0a | 118 | die("delta size changed"); |
3613f9b4 NP |
119 | free(buf); |
120 | free(base_buf); | |
c323ac7d LT |
121 | return delta_buf; |
122 | } | |
123 | ||
30ebb40a NP |
124 | static unsigned long do_compress(void **pptr, unsigned long size) |
125 | { | |
ef49a7a0 | 126 | git_zstream stream; |
30ebb40a NP |
127 | void *in, *out; |
128 | unsigned long maxsize; | |
129 | ||
55bb5c91 | 130 | git_deflate_init(&stream, pack_compression_level); |
225a6f10 | 131 | maxsize = git_deflate_bound(&stream, size); |
30ebb40a NP |
132 | |
133 | in = *pptr; | |
134 | out = xmalloc(maxsize); | |
135 | *pptr = out; | |
136 | ||
137 | stream.next_in = in; | |
138 | stream.avail_in = size; | |
139 | stream.next_out = out; | |
140 | stream.avail_out = maxsize; | |
55bb5c91 | 141 | while (git_deflate(&stream, Z_FINISH) == Z_OK) |
30ebb40a | 142 | ; /* nothing */ |
55bb5c91 | 143 | git_deflate_end(&stream); |
30ebb40a NP |
144 | |
145 | free(in); | |
146 | return stream.total_out; | |
147 | } | |
148 | ||
cf2ba13a NTND |
149 | static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f, |
150 | const unsigned char *sha1) | |
151 | { | |
152 | git_zstream stream; | |
153 | unsigned char ibuf[1024 * 16]; | |
154 | unsigned char obuf[1024 * 16]; | |
155 | unsigned long olen = 0; | |
156 | ||
cf2ba13a NTND |
157 | git_deflate_init(&stream, pack_compression_level); |
158 | ||
159 | for (;;) { | |
160 | ssize_t readlen; | |
161 | int zret = Z_OK; | |
162 | readlen = read_istream(st, ibuf, sizeof(ibuf)); | |
163 | if (readlen == -1) | |
164 | die(_("unable to read %s"), sha1_to_hex(sha1)); | |
165 | ||
166 | stream.next_in = ibuf; | |
167 | stream.avail_in = readlen; | |
168 | while ((stream.avail_in || readlen == 0) && | |
169 | (zret == Z_OK || zret == Z_BUF_ERROR)) { | |
170 | stream.next_out = obuf; | |
171 | stream.avail_out = sizeof(obuf); | |
172 | zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); | |
173 | sha1write(f, obuf, stream.next_out - obuf); | |
174 | olen += stream.next_out - obuf; | |
175 | } | |
176 | if (stream.avail_in) | |
177 | die(_("deflate error (%d)"), zret); | |
178 | if (readlen == 0) { | |
179 | if (zret != Z_STREAM_END) | |
180 | die(_("deflate error (%d)"), zret); | |
181 | break; | |
182 | } | |
183 | } | |
184 | git_deflate_end(&stream); | |
185 | return olen; | |
186 | } | |
187 | ||
780e6e73 NP |
188 | /* |
189 | * we are going to reuse the existing object data as is. make | |
190 | * sure it is not corrupt. | |
191 | */ | |
079afb18 SP |
192 | static int check_pack_inflate(struct packed_git *p, |
193 | struct pack_window **w_curs, | |
6777a59f SP |
194 | off_t offset, |
195 | off_t len, | |
079afb18 SP |
196 | unsigned long expect) |
197 | { | |
ef49a7a0 | 198 | git_zstream stream; |
079afb18 SP |
199 | unsigned char fakebuf[4096], *in; |
200 | int st; | |
201 | ||
202 | memset(&stream, 0, sizeof(stream)); | |
39c68542 | 203 | git_inflate_init(&stream); |
079afb18 SP |
204 | do { |
205 | in = use_pack(p, w_curs, offset, &stream.avail_in); | |
206 | stream.next_in = in; | |
207 | stream.next_out = fakebuf; | |
208 | stream.avail_out = sizeof(fakebuf); | |
39c68542 | 209 | st = git_inflate(&stream, Z_FINISH); |
079afb18 SP |
210 | offset += stream.next_in - in; |
211 | } while (st == Z_OK || st == Z_BUF_ERROR); | |
39c68542 | 212 | git_inflate_end(&stream); |
079afb18 SP |
213 | return (st == Z_STREAM_END && |
214 | stream.total_out == expect && | |
215 | stream.total_in == len) ? 0 : -1; | |
216 | } | |
217 | ||
218 | static void copy_pack_data(struct sha1file *f, | |
219 | struct packed_git *p, | |
220 | struct pack_window **w_curs, | |
6777a59f SP |
221 | off_t offset, |
222 | off_t len) | |
079afb18 SP |
223 | { |
224 | unsigned char *in; | |
ef49a7a0 | 225 | unsigned long avail; |
079afb18 SP |
226 | |
227 | while (len) { | |
228 | in = use_pack(p, w_curs, offset, &avail); | |
229 | if (avail > len) | |
ef49a7a0 | 230 | avail = (unsigned long)len; |
079afb18 SP |
231 | sha1write(f, in, avail); |
232 | offset += avail; | |
233 | len -= avail; | |
234 | } | |
235 | } | |
236 | ||
1b4bb16b | 237 | /* Return 0 if we will bust the pack-size limit */ |
c9018b03 NTND |
238 | static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry, |
239 | unsigned long limit, int usable_delta) | |
c323ac7d | 240 | { |
c9018b03 | 241 | unsigned long size, datalen; |
2c5e2865 JK |
242 | unsigned char header[MAX_PACK_OBJECT_HEADER], |
243 | dheader[MAX_PACK_OBJECT_HEADER]; | |
6777a59f | 244 | unsigned hdrlen; |
2c5ef824 | 245 | enum object_type type; |
c9018b03 | 246 | void *buf; |
cf2ba13a | 247 | struct git_istream *st = NULL; |
c9018b03 NTND |
248 | |
249 | if (!usable_delta) { | |
cf2ba13a NTND |
250 | if (entry->type == OBJ_BLOB && |
251 | entry->size > big_file_threshold && | |
252 | (st = open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL) | |
253 | buf = NULL; | |
254 | else { | |
255 | buf = read_sha1_file(entry->idx.sha1, &type, &size); | |
256 | if (!buf) | |
257 | die(_("unable to read %s"), sha1_to_hex(entry->idx.sha1)); | |
258 | } | |
c9018b03 NTND |
259 | /* |
260 | * make sure no cached delta data remains from a | |
261 | * previous attempt before a pack split occurred. | |
262 | */ | |
263 | free(entry->delta_data); | |
264 | entry->delta_data = NULL; | |
265 | entry->z_delta_size = 0; | |
266 | } else if (entry->delta_data) { | |
267 | size = entry->delta_size; | |
268 | buf = entry->delta_data; | |
269 | entry->delta_data = NULL; | |
270 | type = (allow_ofs_delta && entry->delta->idx.offset) ? | |
271 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
272 | } else { | |
273 | buf = get_delta(entry); | |
274 | size = entry->delta_size; | |
275 | type = (allow_ofs_delta && entry->delta->idx.offset) ? | |
276 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
277 | } | |
278 | ||
cf2ba13a NTND |
279 | if (st) /* large blob case, just assume we don't compress well */ |
280 | datalen = size; | |
281 | else if (entry->z_delta_size) | |
c9018b03 NTND |
282 | datalen = entry->z_delta_size; |
283 | else | |
284 | datalen = do_compress(&buf, size); | |
285 | ||
286 | /* | |
287 | * The object header is a byte of 'type' followed by zero or | |
288 | * more bytes of length. | |
289 | */ | |
7202a6fa JK |
290 | hdrlen = encode_in_pack_object_header(header, sizeof(header), |
291 | type, size); | |
c9018b03 NTND |
292 | |
293 | if (type == OBJ_OFS_DELTA) { | |
294 | /* | |
295 | * Deltas with relative base contain an additional | |
296 | * encoding of the relative offset for the delta | |
297 | * base from this object's position in the pack. | |
298 | */ | |
299 | off_t ofs = entry->idx.offset - entry->delta->idx.offset; | |
300 | unsigned pos = sizeof(dheader) - 1; | |
301 | dheader[pos] = ofs & 127; | |
302 | while (ofs >>= 7) | |
303 | dheader[--pos] = 128 | (--ofs & 127); | |
304 | if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { | |
cf2ba13a NTND |
305 | if (st) |
306 | close_istream(st); | |
c9018b03 NTND |
307 | free(buf); |
308 | return 0; | |
309 | } | |
310 | sha1write(f, header, hdrlen); | |
311 | sha1write(f, dheader + pos, sizeof(dheader) - pos); | |
312 | hdrlen += sizeof(dheader) - pos; | |
313 | } else if (type == OBJ_REF_DELTA) { | |
314 | /* | |
315 | * Deltas with a base reference contain | |
316 | * an additional 20 bytes for the base sha1. | |
317 | */ | |
318 | if (limit && hdrlen + 20 + datalen + 20 >= limit) { | |
cf2ba13a NTND |
319 | if (st) |
320 | close_istream(st); | |
c9018b03 NTND |
321 | free(buf); |
322 | return 0; | |
323 | } | |
324 | sha1write(f, header, hdrlen); | |
325 | sha1write(f, entry->delta->idx.sha1, 20); | |
326 | hdrlen += 20; | |
327 | } else { | |
328 | if (limit && hdrlen + datalen + 20 >= limit) { | |
cf2ba13a NTND |
329 | if (st) |
330 | close_istream(st); | |
c9018b03 NTND |
331 | free(buf); |
332 | return 0; | |
333 | } | |
334 | sha1write(f, header, hdrlen); | |
335 | } | |
cf2ba13a NTND |
336 | if (st) { |
337 | datalen = write_large_blob_data(st, f, entry->idx.sha1); | |
338 | close_istream(st); | |
339 | } else { | |
340 | sha1write(f, buf, datalen); | |
341 | free(buf); | |
342 | } | |
c9018b03 NTND |
343 | |
344 | return hdrlen + datalen; | |
345 | } | |
346 | ||
347 | /* Return 0 if we will bust the pack-size limit */ | |
af92a645 NTND |
348 | static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, |
349 | unsigned long limit, int usable_delta) | |
c9018b03 NTND |
350 | { |
351 | struct packed_git *p = entry->in_pack; | |
352 | struct pack_window *w_curs = NULL; | |
353 | struct revindex_entry *revidx; | |
354 | off_t offset; | |
355 | enum object_type type = entry->type; | |
211c61c6 | 356 | off_t datalen; |
2c5e2865 JK |
357 | unsigned char header[MAX_PACK_OBJECT_HEADER], |
358 | dheader[MAX_PACK_OBJECT_HEADER]; | |
c9018b03 NTND |
359 | unsigned hdrlen; |
360 | ||
361 | if (entry->delta) | |
362 | type = (allow_ofs_delta && entry->delta->idx.offset) ? | |
363 | OBJ_OFS_DELTA : OBJ_REF_DELTA; | |
7202a6fa JK |
364 | hdrlen = encode_in_pack_object_header(header, sizeof(header), |
365 | type, entry->size); | |
c9018b03 NTND |
366 | |
367 | offset = entry->in_pack_offset; | |
368 | revidx = find_pack_revindex(p, offset); | |
369 | datalen = revidx[1].offset - offset; | |
370 | if (!pack_to_stdout && p->index_version > 1 && | |
371 | check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { | |
372 | error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1)); | |
373 | unuse_pack(&w_curs); | |
374 | return write_no_reuse_object(f, entry, limit, usable_delta); | |
375 | } | |
376 | ||
377 | offset += entry->in_pack_header_size; | |
378 | datalen -= entry->in_pack_header_size; | |
379 | ||
380 | if (!pack_to_stdout && p->index_version == 1 && | |
381 | check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { | |
382 | error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1)); | |
383 | unuse_pack(&w_curs); | |
384 | return write_no_reuse_object(f, entry, limit, usable_delta); | |
385 | } | |
386 | ||
387 | if (type == OBJ_OFS_DELTA) { | |
388 | off_t ofs = entry->idx.offset - entry->delta->idx.offset; | |
389 | unsigned pos = sizeof(dheader) - 1; | |
390 | dheader[pos] = ofs & 127; | |
391 | while (ofs >>= 7) | |
392 | dheader[--pos] = 128 | (--ofs & 127); | |
393 | if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { | |
394 | unuse_pack(&w_curs); | |
395 | return 0; | |
396 | } | |
397 | sha1write(f, header, hdrlen); | |
398 | sha1write(f, dheader + pos, sizeof(dheader) - pos); | |
399 | hdrlen += sizeof(dheader) - pos; | |
400 | reused_delta++; | |
401 | } else if (type == OBJ_REF_DELTA) { | |
402 | if (limit && hdrlen + 20 + datalen + 20 >= limit) { | |
403 | unuse_pack(&w_curs); | |
404 | return 0; | |
405 | } | |
406 | sha1write(f, header, hdrlen); | |
407 | sha1write(f, entry->delta->idx.sha1, 20); | |
408 | hdrlen += 20; | |
409 | reused_delta++; | |
410 | } else { | |
411 | if (limit && hdrlen + datalen + 20 >= limit) { | |
412 | unuse_pack(&w_curs); | |
413 | return 0; | |
414 | } | |
415 | sha1write(f, header, hdrlen); | |
416 | } | |
417 | copy_pack_data(f, p, &w_curs, offset, datalen); | |
418 | unuse_pack(&w_curs); | |
419 | reused++; | |
420 | return hdrlen + datalen; | |
421 | } | |
422 | ||
423 | /* Return 0 if we will bust the pack-size limit */ | |
af92a645 NTND |
424 | static off_t write_object(struct sha1file *f, |
425 | struct object_entry *entry, | |
426 | off_t write_offset) | |
c9018b03 | 427 | { |
af92a645 NTND |
428 | unsigned long limit; |
429 | off_t len; | |
2c5ef824 | 430 | int usable_delta, to_reuse; |
c323ac7d | 431 | |
78d1e84f NP |
432 | if (!pack_to_stdout) |
433 | crc32_begin(f); | |
434 | ||
a2430dde | 435 | /* apply size limit if limited packsize and not first object */ |
a1e4760f NP |
436 | if (!pack_size_limit || !nr_written) |
437 | limit = 0; | |
438 | else if (pack_size_limit <= write_offset) | |
439 | /* | |
440 | * the earlier object did not fit the limit; avoid | |
441 | * mistaking this with unlimited (i.e. limit = 0). | |
442 | */ | |
443 | limit = 1; | |
444 | else | |
445 | limit = pack_size_limit - write_offset; | |
2c5ef824 NP |
446 | |
447 | if (!entry->delta) | |
448 | usable_delta = 0; /* no delta */ | |
449 | else if (!pack_size_limit) | |
450 | usable_delta = 1; /* unlimited packfile */ | |
451 | else if (entry->delta->idx.offset == (off_t)-1) | |
452 | usable_delta = 0; /* base was written to another pack */ | |
453 | else if (entry->delta->idx.offset) | |
454 | usable_delta = 1; /* base already exists in this pack */ | |
455 | else | |
456 | usable_delta = 0; /* base could end up in another pack */ | |
457 | ||
a7de7130 | 458 | if (!reuse_object) |
fa736f72 NP |
459 | to_reuse = 0; /* explicit */ |
460 | else if (!entry->in_pack) | |
ab7cd7bb | 461 | to_reuse = 0; /* can't reuse what we don't have */ |
c9018b03 | 462 | else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) |
17b08f2c DH |
463 | /* check_object() decided it for us ... */ |
464 | to_reuse = usable_delta; | |
465 | /* ... but pack split may override that */ | |
c9018b03 | 466 | else if (entry->type != entry->in_pack_type) |
ab7cd7bb JH |
467 | to_reuse = 0; /* pack has delta which is unusable */ |
468 | else if (entry->delta) | |
469 | to_reuse = 0; /* we want to pack afresh */ | |
470 | else | |
471 | to_reuse = 1; /* we have it in-pack undeltified, | |
472 | * and we do not need to deltify it. | |
473 | */ | |
474 | ||
c9018b03 NTND |
475 | if (!to_reuse) |
476 | len = write_no_reuse_object(f, entry, limit, usable_delta); | |
477 | else | |
478 | len = write_reuse_object(f, entry, limit, usable_delta); | |
479 | if (!len) | |
480 | return 0; | |
64bd76b1 | 481 | |
17b08f2c | 482 | if (usable_delta) |
ab7cd7bb | 483 | written_delta++; |
3f9ac8d2 | 484 | written++; |
78d1e84f | 485 | if (!pack_to_stdout) |
aa7e44bf | 486 | entry->idx.crc32 = crc32_end(f); |
c9018b03 | 487 | return len; |
c323ac7d LT |
488 | } |
489 | ||
f63c79db JH |
490 | enum write_one_status { |
491 | WRITE_ONE_SKIP = -1, /* already written */ | |
492 | WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */ | |
493 | WRITE_ONE_WRITTEN = 1, /* normal */ | |
494 | WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ | |
495 | }; | |
496 | ||
497 | static enum write_one_status write_one(struct sha1file *f, | |
498 | struct object_entry *e, | |
499 | off_t *offset) | |
9d5ab962 | 500 | { |
af92a645 | 501 | off_t size; |
f63c79db | 502 | int recursing; |
d7dd0223 | 503 | |
f63c79db JH |
504 | /* |
505 | * we set offset to 1 (which is an impossible value) to mark | |
506 | * the fact that this object is involved in "write its base | |
507 | * first before writing a deltified object" recursion. | |
508 | */ | |
509 | recursing = (e->idx.offset == 1); | |
510 | if (recursing) { | |
511 | warning("recursive delta detected for object %s", | |
512 | sha1_to_hex(e->idx.sha1)); | |
513 | return WRITE_ONE_RECURSIVE; | |
514 | } else if (e->idx.offset || e->preferred_base) { | |
515 | /* offset is non zero if object is written already. */ | |
516 | return WRITE_ONE_SKIP; | |
517 | } | |
d7dd0223 | 518 | |
720c9f7b | 519 | /* if we are deltified, write out base object first. */ |
f63c79db JH |
520 | if (e->delta) { |
521 | e->idx.offset = 1; /* now recurse */ | |
522 | switch (write_one(f, e->delta, offset)) { | |
523 | case WRITE_ONE_RECURSIVE: | |
524 | /* we cannot depend on this one */ | |
525 | e->delta = NULL; | |
526 | break; | |
527 | default: | |
528 | break; | |
529 | case WRITE_ONE_BREAK: | |
530 | e->idx.offset = recursing; | |
531 | return WRITE_ONE_BREAK; | |
532 | } | |
533 | } | |
d7dd0223 | 534 | |
6ed7f25e NP |
535 | e->idx.offset = *offset; |
536 | size = write_object(f, e, *offset); | |
17b08f2c | 537 | if (!size) { |
f63c79db JH |
538 | e->idx.offset = recursing; |
539 | return WRITE_ONE_BREAK; | |
17b08f2c | 540 | } |
79814f42 | 541 | written_list[nr_written++] = &e->idx; |
d7dd0223 NP |
542 | |
543 | /* make sure off_t is sufficiently large not to wrap */ | |
c03c8315 | 544 | if (signed_add_overflows(*offset, size)) |
d7dd0223 | 545 | die("pack too large for current definition of off_t"); |
6ed7f25e | 546 | *offset += size; |
f63c79db | 547 | return WRITE_ONE_WRITTEN; |
9d5ab962 JH |
548 | } |
549 | ||
d155254c | 550 | static int mark_tagged(const char *path, const struct object_id *oid, int flag, |
1b4bb16b JH |
551 | void *cb_data) |
552 | { | |
553 | unsigned char peeled[20]; | |
d155254c | 554 | struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL); |
1b4bb16b JH |
555 | |
556 | if (entry) | |
557 | entry->tagged = 1; | |
558 | if (!peel_ref(path, peeled)) { | |
2834bc27 | 559 | entry = packlist_find(&to_pack, peeled, NULL); |
1b4bb16b JH |
560 | if (entry) |
561 | entry->tagged = 1; | |
562 | } | |
563 | return 0; | |
564 | } | |
565 | ||
be126818 | 566 | static inline void add_to_write_order(struct object_entry **wo, |
92bef1a1 | 567 | unsigned int *endp, |
1b4bb16b JH |
568 | struct object_entry *e) |
569 | { | |
570 | if (e->filled) | |
571 | return; | |
572 | wo[(*endp)++] = e; | |
573 | e->filled = 1; | |
574 | } | |
575 | ||
576 | static void add_descendants_to_write_order(struct object_entry **wo, | |
92bef1a1 | 577 | unsigned int *endp, |
1b4bb16b JH |
578 | struct object_entry *e) |
579 | { | |
f380872f DM |
580 | int add_to_order = 1; |
581 | while (e) { | |
582 | if (add_to_order) { | |
583 | struct object_entry *s; | |
584 | /* add this node... */ | |
585 | add_to_write_order(wo, endp, e); | |
586 | /* all its siblings... */ | |
587 | for (s = e->delta_sibling; s; s = s->delta_sibling) { | |
588 | add_to_write_order(wo, endp, s); | |
589 | } | |
590 | } | |
591 | /* drop down a level to add left subtree nodes if possible */ | |
592 | if (e->delta_child) { | |
593 | add_to_order = 1; | |
594 | e = e->delta_child; | |
595 | } else { | |
596 | add_to_order = 0; | |
597 | /* our sibling might have some children, it is next */ | |
598 | if (e->delta_sibling) { | |
599 | e = e->delta_sibling; | |
600 | continue; | |
601 | } | |
602 | /* go back to our parent node */ | |
603 | e = e->delta; | |
604 | while (e && !e->delta_sibling) { | |
605 | /* we're on the right side of a subtree, keep | |
606 | * going up until we can go right again */ | |
607 | e = e->delta; | |
608 | } | |
609 | if (!e) { | |
610 | /* done- we hit our original root node */ | |
611 | return; | |
612 | } | |
613 | /* pass it off to sibling at this level */ | |
614 | e = e->delta_sibling; | |
615 | } | |
616 | }; | |
1b4bb16b JH |
617 | } |
618 | ||
619 | static void add_family_to_write_order(struct object_entry **wo, | |
92bef1a1 | 620 | unsigned int *endp, |
1b4bb16b JH |
621 | struct object_entry *e) |
622 | { | |
623 | struct object_entry *root; | |
624 | ||
625 | for (root = e; root->delta; root = root->delta) | |
626 | ; /* nothing */ | |
1b4bb16b JH |
627 | add_descendants_to_write_order(wo, endp, root); |
628 | } | |
629 | ||
630 | static struct object_entry **compute_write_order(void) | |
631 | { | |
38d4debb | 632 | unsigned int i, wo_end, last_untagged; |
1b4bb16b | 633 | |
b32fa95f | 634 | struct object_entry **wo; |
2834bc27 | 635 | struct object_entry *objects = to_pack.objects; |
1b4bb16b | 636 | |
2834bc27 | 637 | for (i = 0; i < to_pack.nr_objects; i++) { |
1b4bb16b JH |
638 | objects[i].tagged = 0; |
639 | objects[i].filled = 0; | |
640 | objects[i].delta_child = NULL; | |
641 | objects[i].delta_sibling = NULL; | |
642 | } | |
643 | ||
644 | /* | |
645 | * Fully connect delta_child/delta_sibling network. | |
646 | * Make sure delta_sibling is sorted in the original | |
647 | * recency order. | |
648 | */ | |
2834bc27 | 649 | for (i = to_pack.nr_objects; i > 0;) { |
92bef1a1 | 650 | struct object_entry *e = &objects[--i]; |
1b4bb16b JH |
651 | if (!e->delta) |
652 | continue; | |
653 | /* Mark me as the first child */ | |
654 | e->delta_sibling = e->delta->delta_child; | |
655 | e->delta->delta_child = e; | |
656 | } | |
657 | ||
658 | /* | |
659 | * Mark objects that are at the tip of tags. | |
660 | */ | |
d155254c | 661 | for_each_tag_ref(mark_tagged, NULL); |
1b4bb16b JH |
662 | |
663 | /* | |
38d4debb | 664 | * Give the objects in the original recency order until |
1b4bb16b JH |
665 | * we see a tagged tip. |
666 | */ | |
b32fa95f | 667 | ALLOC_ARRAY(wo, to_pack.nr_objects); |
2834bc27 | 668 | for (i = wo_end = 0; i < to_pack.nr_objects; i++) { |
1b4bb16b JH |
669 | if (objects[i].tagged) |
670 | break; | |
671 | add_to_write_order(wo, &wo_end, &objects[i]); | |
672 | } | |
38d4debb | 673 | last_untagged = i; |
1b4bb16b JH |
674 | |
675 | /* | |
676 | * Then fill all the tagged tips. | |
677 | */ | |
2834bc27 | 678 | for (; i < to_pack.nr_objects; i++) { |
1b4bb16b JH |
679 | if (objects[i].tagged) |
680 | add_to_write_order(wo, &wo_end, &objects[i]); | |
681 | } | |
682 | ||
683 | /* | |
684 | * And then all remaining commits and tags. | |
685 | */ | |
2834bc27 | 686 | for (i = last_untagged; i < to_pack.nr_objects; i++) { |
1b4bb16b JH |
687 | if (objects[i].type != OBJ_COMMIT && |
688 | objects[i].type != OBJ_TAG) | |
689 | continue; | |
690 | add_to_write_order(wo, &wo_end, &objects[i]); | |
691 | } | |
692 | ||
693 | /* | |
694 | * And then all the trees. | |
695 | */ | |
2834bc27 | 696 | for (i = last_untagged; i < to_pack.nr_objects; i++) { |
1b4bb16b JH |
697 | if (objects[i].type != OBJ_TREE) |
698 | continue; | |
699 | add_to_write_order(wo, &wo_end, &objects[i]); | |
700 | } | |
701 | ||
702 | /* | |
703 | * Finally all the rest in really tight order | |
704 | */ | |
2834bc27 | 705 | for (i = last_untagged; i < to_pack.nr_objects; i++) { |
38d4debb DM |
706 | if (!objects[i].filled) |
707 | add_family_to_write_order(wo, &wo_end, &objects[i]); | |
708 | } | |
709 | ||
2834bc27 VM |
710 | if (wo_end != to_pack.nr_objects) |
711 | die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); | |
1b4bb16b JH |
712 | |
713 | return wo; | |
714 | } | |
715 | ||
6b8fda2d VM |
716 | static off_t write_reused_pack(struct sha1file *f) |
717 | { | |
718 | unsigned char buffer[8192]; | |
657673f1 | 719 | off_t to_write, total; |
6b8fda2d VM |
720 | int fd; |
721 | ||
722 | if (!is_pack_valid(reuse_packfile)) | |
723 | die("packfile is invalid: %s", reuse_packfile->pack_name); | |
724 | ||
a5436b57 | 725 | fd = git_open(reuse_packfile->pack_name); |
6b8fda2d VM |
726 | if (fd < 0) |
727 | die_errno("unable to open packfile for reuse: %s", | |
728 | reuse_packfile->pack_name); | |
729 | ||
730 | if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) | |
731 | die_errno("unable to seek in reused packfile"); | |
732 | ||
733 | if (reuse_packfile_offset < 0) | |
734 | reuse_packfile_offset = reuse_packfile->pack_size - 20; | |
735 | ||
657673f1 | 736 | total = to_write = reuse_packfile_offset - sizeof(struct pack_header); |
6b8fda2d VM |
737 | |
738 | while (to_write) { | |
739 | int read_pack = xread(fd, buffer, sizeof(buffer)); | |
740 | ||
741 | if (read_pack <= 0) | |
742 | die_errno("unable to read from reused packfile"); | |
743 | ||
744 | if (read_pack > to_write) | |
745 | read_pack = to_write; | |
746 | ||
747 | sha1write(f, buffer, read_pack); | |
748 | to_write -= read_pack; | |
657673f1 JK |
749 | |
750 | /* | |
751 | * We don't know the actual number of objects written, | |
752 | * only how many bytes written, how many bytes total, and | |
753 | * how many objects total. So we can fake it by pretending all | |
754 | * objects we are writing are the same size. This gives us a | |
755 | * smooth progress meter, and at the end it matches the true | |
756 | * answer. | |
757 | */ | |
758 | written = reuse_packfile_objects * | |
759 | (((double)(total - to_write)) / total); | |
760 | display_progress(progress_state, written); | |
6b8fda2d VM |
761 | } |
762 | ||
763 | close(fd); | |
657673f1 JK |
764 | written = reuse_packfile_objects; |
765 | display_progress(progress_state, written); | |
6b8fda2d VM |
766 | return reuse_packfile_offset - sizeof(struct pack_header); |
767 | } | |
768 | ||
9cea46cd EW |
769 | static const char no_split_warning[] = N_( |
770 | "disabling bitmap writing, packs are split due to pack.packSizeLimit" | |
771 | ); | |
772 | ||
d01fb92f | 773 | static void write_pack_file(void) |
c323ac7d | 774 | { |
ebe27b13 | 775 | uint32_t i = 0, j; |
d22b9290 | 776 | struct sha1file *f; |
6ed7f25e | 777 | off_t offset; |
ebe27b13 | 778 | uint32_t nr_remaining = nr_result; |
f746bae8 | 779 | time_t last_mtime = 0; |
1b4bb16b | 780 | struct object_entry **write_order; |
81a216a5 | 781 | |
bcd7954e | 782 | if (progress > pack_to_stdout) |
754dbc43 | 783 | progress_state = start_progress(_("Writing objects"), nr_result); |
b32fa95f | 784 | ALLOC_ARRAY(written_list, to_pack.nr_objects); |
1b4bb16b | 785 | write_order = compute_write_order(); |
183bdb2c | 786 | |
ebe27b13 | 787 | do { |
aa7e44bf | 788 | unsigned char sha1[20]; |
7ba502c4 | 789 | char *pack_tmp_name = NULL; |
aa7e44bf | 790 | |
cdf9db3c | 791 | if (pack_to_stdout) |
2a128d63 | 792 | f = sha1fd_throughput(1, "<stdout>", progress_state); |
cdf9db3c JH |
793 | else |
794 | f = create_tmp_packfile(&pack_tmp_name); | |
ebe27b13 | 795 | |
c0ad4657 | 796 | offset = write_pack_header(f, nr_remaining); |
6b8fda2d VM |
797 | |
798 | if (reuse_packfile) { | |
799 | off_t packfile_size; | |
800 | assert(pack_to_stdout); | |
801 | ||
802 | packfile_size = write_reused_pack(f); | |
803 | offset += packfile_size; | |
804 | } | |
805 | ||
ebe27b13 | 806 | nr_written = 0; |
2834bc27 | 807 | for (; i < to_pack.nr_objects; i++) { |
1b4bb16b | 808 | struct object_entry *e = write_order[i]; |
cddec4f8 | 809 | if (write_one(f, e, &offset) == WRITE_ONE_BREAK) |
720c9f7b NP |
810 | break; |
811 | display_progress(progress_state, written); | |
812 | } | |
c553ca25 | 813 | |
ebe27b13 DH |
814 | /* |
815 | * Did we write the wrong # entries in the header? | |
816 | * If so, rewrite it like in fast-import | |
817 | */ | |
54352bb2 LT |
818 | if (pack_to_stdout) { |
819 | sha1close(f, sha1, CSUM_CLOSE); | |
820 | } else if (nr_written == nr_remaining) { | |
821 | sha1close(f, sha1, CSUM_FSYNC); | |
ebe27b13 | 822 | } else { |
ac0463ed | 823 | int fd = sha1close(f, sha1, 0); |
abeb40e5 | 824 | fixup_pack_header_footer(fd, sha1, pack_tmp_name, |
ac0463ed | 825 | nr_written, sha1, offset); |
7ba502c4 | 826 | close(fd); |
9cea46cd EW |
827 | if (write_bitmap_index) { |
828 | warning(_(no_split_warning)); | |
829 | write_bitmap_index = 0; | |
830 | } | |
ebe27b13 DH |
831 | } |
832 | ||
833 | if (!pack_to_stdout) { | |
f746bae8 | 834 | struct stat st; |
58892711 | 835 | struct strbuf tmpname = STRBUF_INIT; |
d01fb92f | 836 | |
f746bae8 NP |
837 | /* |
838 | * Packs are runtime accessed in their mtime | |
839 | * order since newer packs are more likely to contain | |
840 | * younger objects. So if we are creating multiple | |
841 | * packs then we should modify the mtime of later ones | |
842 | * to preserve this property. | |
843 | */ | |
0e990530 | 844 | if (stat(pack_tmp_name, &st) < 0) { |
54d47394 | 845 | warning_errno("failed to stat %s", pack_tmp_name); |
f746bae8 NP |
846 | } else if (!last_mtime) { |
847 | last_mtime = st.st_mtime; | |
848 | } else { | |
849 | struct utimbuf utb; | |
850 | utb.actime = st.st_atime; | |
851 | utb.modtime = --last_mtime; | |
0e990530 | 852 | if (utime(pack_tmp_name, &utb) < 0) |
54d47394 | 853 | warning_errno("failed utime() on %s", pack_tmp_name); |
f746bae8 NP |
854 | } |
855 | ||
58892711 | 856 | strbuf_addf(&tmpname, "%s-", base_name); |
7cc8f971 VM |
857 | |
858 | if (write_bitmap_index) { | |
859 | bitmap_writer_set_checksum(sha1); | |
860 | bitmap_writer_build_type_index(written_list, nr_written); | |
861 | } | |
862 | ||
58892711 | 863 | finish_tmp_packfile(&tmpname, pack_tmp_name, |
0e990530 JH |
864 | written_list, nr_written, |
865 | &pack_idx_opts, sha1); | |
7cc8f971 VM |
866 | |
867 | if (write_bitmap_index) { | |
58892711 | 868 | strbuf_addf(&tmpname, "%s.bitmap", sha1_to_hex(sha1)); |
7cc8f971 VM |
869 | |
870 | stop_progress(&progress_state); | |
871 | ||
872 | bitmap_writer_show_progress(progress); | |
873 | bitmap_writer_reuse_bitmaps(&to_pack); | |
874 | bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); | |
875 | bitmap_writer_build(&to_pack); | |
ae4f07fb | 876 | bitmap_writer_finish(written_list, nr_written, |
58892711 | 877 | tmpname.buf, write_bitmap_options); |
7cc8f971 VM |
878 | write_bitmap_index = 0; |
879 | } | |
880 | ||
58892711 | 881 | strbuf_release(&tmpname); |
7ba502c4 | 882 | free(pack_tmp_name); |
aa7e44bf | 883 | puts(sha1_to_hex(sha1)); |
ebe27b13 DH |
884 | } |
885 | ||
886 | /* mark written objects as written to previous pack */ | |
887 | for (j = 0; j < nr_written; j++) { | |
79814f42 | 888 | written_list[j]->offset = (off_t)-1; |
ebe27b13 DH |
889 | } |
890 | nr_remaining -= nr_written; | |
2834bc27 | 891 | } while (nr_remaining && i < to_pack.nr_objects); |
ebe27b13 DH |
892 | |
893 | free(written_list); | |
1b4bb16b | 894 | free(write_order); |
4d4fcc54 | 895 | stop_progress(&progress_state); |
67c08ce1 | 896 | if (written != nr_result) |
6e1c2344 RJ |
897 | die("wrote %"PRIu32" objects while expecting %"PRIu32, |
898 | written, nr_result); | |
c323ac7d LT |
899 | } |
900 | ||
a74db82e JH |
901 | static int no_try_delta(const char *path) |
902 | { | |
2aef63d3 | 903 | static struct attr_check *check; |
a74db82e | 904 | |
2aef63d3 JH |
905 | if (!check) |
906 | check = attr_check_initl("delta", NULL); | |
907 | if (git_check_attr(path, check)) | |
a74db82e | 908 | return 0; |
2aef63d3 | 909 | if (ATTR_FALSE(check->items[0].value)) |
a74db82e JH |
910 | return 1; |
911 | return 0; | |
912 | } | |
913 | ||
ce2bc424 JK |
914 | /* |
915 | * When adding an object, check whether we have already added it | |
916 | * to our packing list. If so, we can skip. However, if we are | |
917 | * being asked to excludei t, but the previous mention was to include | |
918 | * it, make sure to adjust its flags and tweak our numbers accordingly. | |
919 | * | |
920 | * As an optimization, we pass out the index position where we would have | |
921 | * found the item, since that saves us from having to look it up again a | |
922 | * few lines later when we want to add the new entry. | |
923 | */ | |
924 | static int have_duplicate_entry(const unsigned char *sha1, | |
925 | int exclude, | |
926 | uint32_t *index_pos) | |
c323ac7d | 927 | { |
c323ac7d | 928 | struct object_entry *entry; |
29b734e4 | 929 | |
ce2bc424 JK |
930 | entry = packlist_find(&to_pack, sha1, index_pos); |
931 | if (!entry) | |
29b734e4 | 932 | return 0; |
ce2bc424 JK |
933 | |
934 | if (exclude) { | |
935 | if (!entry->preferred_base) | |
936 | nr_result--; | |
937 | entry->preferred_base = 1; | |
29b734e4 | 938 | } |
c323ac7d | 939 | |
ce2bc424 JK |
940 | return 1; |
941 | } | |
942 | ||
702d1b95 KS |
943 | static int want_found_object(int exclude, struct packed_git *p) |
944 | { | |
945 | if (exclude) | |
946 | return 1; | |
947 | if (incremental) | |
948 | return 0; | |
949 | ||
950 | /* | |
951 | * When asked to do --local (do not include an object that appears in a | |
952 | * pack we borrow from elsewhere) or --honor-pack-keep (do not include | |
953 | * an object that appears in a pack marked with .keep), finding a pack | |
954 | * that matches the criteria is sufficient for us to decide to omit it. | |
955 | * However, even if this pack does not satisfy the criteria, we need to | |
956 | * make sure no copy of this object appears in _any_ pack that makes us | |
957 | * to omit the object, so we need to check all the packs. | |
958 | * | |
959 | * We can however first check whether these options can possible matter; | |
960 | * if they do not matter we know we want the object in generated pack. | |
961 | * Otherwise, we signal "-1" at the end to tell the caller that we do | |
962 | * not know either way, and it needs to check more packs. | |
963 | */ | |
964 | if (!ignore_packed_keep && | |
965 | (!local || !have_non_local_packs)) | |
966 | return 1; | |
967 | ||
968 | if (local && !p->pack_local) | |
969 | return 0; | |
970 | if (ignore_packed_keep && p->pack_local && p->pack_keep) | |
971 | return 0; | |
972 | ||
973 | /* we don't know yet; keep looking for more packs */ | |
974 | return -1; | |
975 | } | |
976 | ||
ce2bc424 JK |
977 | /* |
978 | * Check whether we want the object in the pack (e.g., we do not want | |
979 | * objects found in non-local stores if the "--local" option was used). | |
980 | * | |
702d1b95 KS |
981 | * If the caller already knows an existing pack it wants to take the object |
982 | * from, that is passed in *found_pack and *found_offset; otherwise this | |
983 | * function finds if there is any pack that has the object and returns the pack | |
984 | * and its offset in these variables. | |
ce2bc424 JK |
985 | */ |
986 | static int want_object_in_pack(const unsigned char *sha1, | |
987 | int exclude, | |
988 | struct packed_git **found_pack, | |
989 | off_t *found_offset) | |
990 | { | |
c9af708b | 991 | struct mru_entry *entry; |
702d1b95 | 992 | int want; |
ce2bc424 | 993 | |
daae0625 BC |
994 | if (!exclude && local && has_loose_object_nonlocal(sha1)) |
995 | return 0; | |
996 | ||
702d1b95 KS |
997 | /* |
998 | * If we already know the pack object lives in, start checks from that | |
999 | * pack - in the usual case when neither --local was given nor .keep files | |
1000 | * are present we will determine the answer right now. | |
1001 | */ | |
1002 | if (*found_pack) { | |
1003 | want = want_found_object(exclude, *found_pack); | |
1004 | if (want != -1) | |
1005 | return want; | |
1006 | } | |
ce2bc424 | 1007 | |
c9af708b JK |
1008 | for (entry = packed_git_mru->head; entry; entry = entry->next) { |
1009 | struct packed_git *p = entry->item; | |
702d1b95 KS |
1010 | off_t offset; |
1011 | ||
1012 | if (p == *found_pack) | |
1013 | offset = *found_offset; | |
1014 | else | |
1015 | offset = find_pack_entry_one(sha1, p); | |
1016 | ||
5c49c116 | 1017 | if (offset) { |
ce2bc424 | 1018 | if (!*found_pack) { |
319b678a | 1019 | if (!is_pack_valid(p)) |
4c080182 | 1020 | continue; |
ce2bc424 JK |
1021 | *found_offset = offset; |
1022 | *found_pack = p; | |
64560374 | 1023 | } |
702d1b95 | 1024 | want = want_found_object(exclude, p); |
e6e24c94 | 1025 | if (!exclude && want > 0) |
c9af708b | 1026 | mru_mark(packed_git_mru, entry); |
702d1b95 KS |
1027 | if (want != -1) |
1028 | return want; | |
64560374 LT |
1029 | } |
1030 | } | |
eb019375 | 1031 | |
ce2bc424 JK |
1032 | return 1; |
1033 | } | |
1034 | ||
1035 | static void create_object_entry(const unsigned char *sha1, | |
1036 | enum object_type type, | |
1037 | uint32_t hash, | |
1038 | int exclude, | |
1039 | int no_try_delta, | |
1040 | uint32_t index_pos, | |
1041 | struct packed_git *found_pack, | |
1042 | off_t found_offset) | |
1043 | { | |
1044 | struct object_entry *entry; | |
29b734e4 | 1045 | |
2834bc27 | 1046 | entry = packlist_alloc(&to_pack, sha1, index_pos); |
27225f2e | 1047 | entry->hash = hash; |
5c49c116 NP |
1048 | if (type) |
1049 | entry->type = type; | |
29b734e4 NP |
1050 | if (exclude) |
1051 | entry->preferred_base = 1; | |
81a216a5 NP |
1052 | else |
1053 | nr_result++; | |
29b734e4 NP |
1054 | if (found_pack) { |
1055 | entry->in_pack = found_pack; | |
1056 | entry->in_pack_offset = found_offset; | |
1057 | } | |
7a979d99 | 1058 | |
ce2bc424 JK |
1059 | entry->no_try_delta = no_try_delta; |
1060 | } | |
29b734e4 | 1061 | |
373c67da JK |
1062 | static const char no_closure_warning[] = N_( |
1063 | "disabling bitmap writing, as some objects are not being packed" | |
1064 | ); | |
1065 | ||
ce2bc424 JK |
1066 | static int add_object_entry(const unsigned char *sha1, enum object_type type, |
1067 | const char *name, int exclude) | |
1068 | { | |
702d1b95 KS |
1069 | struct packed_git *found_pack = NULL; |
1070 | off_t found_offset = 0; | |
ce2bc424 | 1071 | uint32_t index_pos; |
7a979d99 | 1072 | |
ce2bc424 JK |
1073 | if (have_duplicate_entry(sha1, exclude, &index_pos)) |
1074 | return 0; | |
a74db82e | 1075 | |
373c67da JK |
1076 | if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) { |
1077 | /* The pack is missing an object, so it will not have closure */ | |
1078 | if (write_bitmap_index) { | |
1079 | warning(_(no_closure_warning)); | |
1080 | write_bitmap_index = 0; | |
1081 | } | |
ce2bc424 | 1082 | return 0; |
373c67da | 1083 | } |
29b734e4 | 1084 | |
ce2bc424 JK |
1085 | create_object_entry(sha1, type, pack_name_hash(name), |
1086 | exclude, name && no_try_delta(name), | |
1087 | index_pos, found_pack, found_offset); | |
a74db82e | 1088 | |
78d2214e | 1089 | display_progress(progress_state, nr_result); |
29b734e4 | 1090 | return 1; |
c323ac7d LT |
1091 | } |
1092 | ||
6b8fda2d VM |
1093 | static int add_object_entry_from_bitmap(const unsigned char *sha1, |
1094 | enum object_type type, | |
1095 | int flags, uint32_t name_hash, | |
1096 | struct packed_git *pack, off_t offset) | |
1097 | { | |
1098 | uint32_t index_pos; | |
1099 | ||
1100 | if (have_duplicate_entry(sha1, 0, &index_pos)) | |
1101 | return 0; | |
1102 | ||
702d1b95 KS |
1103 | if (!want_object_in_pack(sha1, 0, &pack, &offset)) |
1104 | return 0; | |
1105 | ||
6b8fda2d VM |
1106 | create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset); |
1107 | ||
78d2214e | 1108 | display_progress(progress_state, nr_result); |
29b734e4 | 1109 | return 1; |
c323ac7d LT |
1110 | } |
1111 | ||
5379a5c5 JH |
1112 | struct pbase_tree_cache { |
1113 | unsigned char sha1[20]; | |
1114 | int ref; | |
1115 | int temporary; | |
1116 | void *tree_data; | |
1117 | unsigned long tree_size; | |
1118 | }; | |
1119 | ||
1120 | static struct pbase_tree_cache *(pbase_tree_cache[256]); | |
1121 | static int pbase_tree_cache_ix(const unsigned char *sha1) | |
1122 | { | |
1123 | return sha1[0] % ARRAY_SIZE(pbase_tree_cache); | |
1124 | } | |
1125 | static int pbase_tree_cache_ix_incr(int ix) | |
1126 | { | |
1127 | return (ix+1) % ARRAY_SIZE(pbase_tree_cache); | |
1128 | } | |
1129 | ||
1130 | static struct pbase_tree { | |
1131 | struct pbase_tree *next; | |
1132 | /* This is a phony "cache" entry; we are not | |
01689909 | 1133 | * going to evict it or find it through _get() |
5379a5c5 JH |
1134 | * mechanism -- this is for the toplevel node that |
1135 | * would almost always change with any commit. | |
1136 | */ | |
1137 | struct pbase_tree_cache pcache; | |
1138 | } *pbase_tree; | |
1139 | ||
1140 | static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1) | |
1141 | { | |
1142 | struct pbase_tree_cache *ent, *nent; | |
1143 | void *data; | |
1144 | unsigned long size; | |
21666f1a | 1145 | enum object_type type; |
5379a5c5 JH |
1146 | int neigh; |
1147 | int my_ix = pbase_tree_cache_ix(sha1); | |
1148 | int available_ix = -1; | |
1149 | ||
1150 | /* pbase-tree-cache acts as a limited hashtable. | |
1151 | * your object will be found at your index or within a few | |
1152 | * slots after that slot if it is cached. | |
1153 | */ | |
1154 | for (neigh = 0; neigh < 8; neigh++) { | |
1155 | ent = pbase_tree_cache[my_ix]; | |
a89fccd2 | 1156 | if (ent && !hashcmp(ent->sha1, sha1)) { |
5379a5c5 JH |
1157 | ent->ref++; |
1158 | return ent; | |
1159 | } | |
1160 | else if (((available_ix < 0) && (!ent || !ent->ref)) || | |
1161 | ((0 <= available_ix) && | |
1162 | (!ent && pbase_tree_cache[available_ix]))) | |
1163 | available_ix = my_ix; | |
1164 | if (!ent) | |
1165 | break; | |
1166 | my_ix = pbase_tree_cache_ix_incr(my_ix); | |
1167 | } | |
1168 | ||
1169 | /* Did not find one. Either we got a bogus request or | |
1170 | * we need to read and perhaps cache. | |
1171 | */ | |
21666f1a | 1172 | data = read_sha1_file(sha1, &type, &size); |
5379a5c5 JH |
1173 | if (!data) |
1174 | return NULL; | |
21666f1a | 1175 | if (type != OBJ_TREE) { |
5379a5c5 JH |
1176 | free(data); |
1177 | return NULL; | |
1178 | } | |
1179 | ||
1180 | /* We need to either cache or return a throwaway copy */ | |
1181 | ||
1182 | if (available_ix < 0) | |
1183 | ent = NULL; | |
1184 | else { | |
1185 | ent = pbase_tree_cache[available_ix]; | |
1186 | my_ix = available_ix; | |
1187 | } | |
1188 | ||
1189 | if (!ent) { | |
1190 | nent = xmalloc(sizeof(*nent)); | |
1191 | nent->temporary = (available_ix < 0); | |
1192 | } | |
1193 | else { | |
1194 | /* evict and reuse */ | |
1195 | free(ent->tree_data); | |
1196 | nent = ent; | |
1197 | } | |
e702496e | 1198 | hashcpy(nent->sha1, sha1); |
5379a5c5 JH |
1199 | nent->tree_data = data; |
1200 | nent->tree_size = size; | |
1201 | nent->ref = 1; | |
1202 | if (!nent->temporary) | |
1203 | pbase_tree_cache[my_ix] = nent; | |
1204 | return nent; | |
1205 | } | |
1206 | ||
1207 | static void pbase_tree_put(struct pbase_tree_cache *cache) | |
1208 | { | |
1209 | if (!cache->temporary) { | |
1210 | cache->ref--; | |
1211 | return; | |
1212 | } | |
1213 | free(cache->tree_data); | |
1214 | free(cache); | |
1215 | } | |
1216 | ||
1217 | static int name_cmp_len(const char *name) | |
1218 | { | |
1219 | int i; | |
1220 | for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++) | |
1221 | ; | |
1222 | return i; | |
1223 | } | |
1224 | ||
1225 | static void add_pbase_object(struct tree_desc *tree, | |
5379a5c5 | 1226 | const char *name, |
ce0bd642 LT |
1227 | int cmplen, |
1228 | const char *fullname) | |
3f9ac8d2 | 1229 | { |
4c068a98 | 1230 | struct name_entry entry; |
8a5a8d6c | 1231 | int cmp; |
4c068a98 LT |
1232 | |
1233 | while (tree_entry(tree,&entry)) { | |
1211be6b LT |
1234 | if (S_ISGITLINK(entry.mode)) |
1235 | continue; | |
0de16337 | 1236 | cmp = tree_entry_len(&entry) != cmplen ? 1 : |
8a5a8d6c NP |
1237 | memcmp(name, entry.path, cmplen); |
1238 | if (cmp > 0) | |
7a979d99 | 1239 | continue; |
8a5a8d6c NP |
1240 | if (cmp < 0) |
1241 | return; | |
5379a5c5 | 1242 | if (name[cmplen] != '/') { |
7d924c91 | 1243 | add_object_entry(entry.oid->hash, |
4d1012c3 | 1244 | object_type(entry.mode), |
bc32fed5 | 1245 | fullname, 1); |
5379a5c5 JH |
1246 | return; |
1247 | } | |
8a5a8d6c | 1248 | if (S_ISDIR(entry.mode)) { |
7a979d99 | 1249 | struct tree_desc sub; |
5379a5c5 JH |
1250 | struct pbase_tree_cache *tree; |
1251 | const char *down = name+cmplen+1; | |
1252 | int downlen = name_cmp_len(down); | |
1253 | ||
7d924c91 | 1254 | tree = pbase_tree_get(entry.oid->hash); |
5379a5c5 JH |
1255 | if (!tree) |
1256 | return; | |
6fda5e51 | 1257 | init_tree_desc(&sub, tree->tree_data, tree->tree_size); |
5379a5c5 | 1258 | |
ce0bd642 | 1259 | add_pbase_object(&sub, down, downlen, fullname); |
5379a5c5 JH |
1260 | pbase_tree_put(tree); |
1261 | } | |
1262 | } | |
1263 | } | |
1d6b38cc | 1264 | |
5379a5c5 JH |
1265 | static unsigned *done_pbase_paths; |
1266 | static int done_pbase_paths_num; | |
1267 | static int done_pbase_paths_alloc; | |
1268 | static int done_pbase_path_pos(unsigned hash) | |
1269 | { | |
1270 | int lo = 0; | |
1271 | int hi = done_pbase_paths_num; | |
1272 | while (lo < hi) { | |
1273 | int mi = (hi + lo) / 2; | |
1274 | if (done_pbase_paths[mi] == hash) | |
1275 | return mi; | |
1276 | if (done_pbase_paths[mi] < hash) | |
1277 | hi = mi; | |
1278 | else | |
1279 | lo = mi + 1; | |
1280 | } | |
1281 | return -lo-1; | |
1282 | } | |
1283 | ||
1284 | static int check_pbase_path(unsigned hash) | |
1285 | { | |
1286 | int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash); | |
1287 | if (0 <= pos) | |
1288 | return 1; | |
1289 | pos = -pos - 1; | |
25e19407 DD |
1290 | ALLOC_GROW(done_pbase_paths, |
1291 | done_pbase_paths_num + 1, | |
1292 | done_pbase_paths_alloc); | |
5379a5c5 JH |
1293 | done_pbase_paths_num++; |
1294 | if (pos < done_pbase_paths_num) | |
1295 | memmove(done_pbase_paths + pos + 1, | |
1296 | done_pbase_paths + pos, | |
1297 | (done_pbase_paths_num - pos - 1) * sizeof(unsigned)); | |
1298 | done_pbase_paths[pos] = hash; | |
1299 | return 0; | |
1300 | } | |
1301 | ||
bc32fed5 | 1302 | static void add_preferred_base_object(const char *name) |
5379a5c5 JH |
1303 | { |
1304 | struct pbase_tree *it; | |
8a5a8d6c | 1305 | int cmplen; |
68fb36eb | 1306 | unsigned hash = pack_name_hash(name); |
5379a5c5 | 1307 | |
8a5a8d6c | 1308 | if (!num_preferred_base || check_pbase_path(hash)) |
5379a5c5 JH |
1309 | return; |
1310 | ||
8a5a8d6c | 1311 | cmplen = name_cmp_len(name); |
5379a5c5 JH |
1312 | for (it = pbase_tree; it; it = it->next) { |
1313 | if (cmplen == 0) { | |
bc32fed5 | 1314 | add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1); |
5379a5c5 JH |
1315 | } |
1316 | else { | |
1317 | struct tree_desc tree; | |
6fda5e51 | 1318 | init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size); |
ce0bd642 | 1319 | add_pbase_object(&tree, name, cmplen, name); |
7a979d99 | 1320 | } |
3f9ac8d2 | 1321 | } |
3f9ac8d2 JH |
1322 | } |
1323 | ||
7a979d99 | 1324 | static void add_preferred_base(unsigned char *sha1) |
3f9ac8d2 | 1325 | { |
5379a5c5 JH |
1326 | struct pbase_tree *it; |
1327 | void *data; | |
1328 | unsigned long size; | |
1329 | unsigned char tree_sha1[20]; | |
1d6b38cc | 1330 | |
8d1d8f83 JH |
1331 | if (window <= num_preferred_base++) |
1332 | return; | |
1333 | ||
5379a5c5 JH |
1334 | data = read_object_with_reference(sha1, tree_type, &size, tree_sha1); |
1335 | if (!data) | |
7a979d99 | 1336 | return; |
5379a5c5 JH |
1337 | |
1338 | for (it = pbase_tree; it; it = it->next) { | |
a89fccd2 | 1339 | if (!hashcmp(it->pcache.sha1, tree_sha1)) { |
5379a5c5 JH |
1340 | free(data); |
1341 | return; | |
1342 | } | |
1343 | } | |
1344 | ||
1345 | it = xcalloc(1, sizeof(*it)); | |
1346 | it->next = pbase_tree; | |
1347 | pbase_tree = it; | |
1348 | ||
e702496e | 1349 | hashcpy(it->pcache.sha1, tree_sha1); |
5379a5c5 JH |
1350 | it->pcache.tree_data = data; |
1351 | it->pcache.tree_size = size; | |
3f9ac8d2 JH |
1352 | } |
1353 | ||
0ef95f72 NP |
1354 | static void cleanup_preferred_base(void) |
1355 | { | |
1356 | struct pbase_tree *it; | |
1357 | unsigned i; | |
1358 | ||
1359 | it = pbase_tree; | |
1360 | pbase_tree = NULL; | |
1361 | while (it) { | |
1362 | struct pbase_tree *this = it; | |
1363 | it = this->next; | |
1364 | free(this->pcache.tree_data); | |
1365 | free(this); | |
1366 | } | |
1367 | ||
1368 | for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) { | |
1369 | if (!pbase_tree_cache[i]) | |
1370 | continue; | |
1371 | free(pbase_tree_cache[i]->tree_data); | |
1372 | free(pbase_tree_cache[i]); | |
1373 | pbase_tree_cache[i] = NULL; | |
1374 | } | |
1375 | ||
1376 | free(done_pbase_paths); | |
1377 | done_pbase_paths = NULL; | |
1378 | done_pbase_paths_num = done_pbase_paths_alloc = 0; | |
1379 | } | |
1380 | ||
c323ac7d LT |
1381 | static void check_object(struct object_entry *entry) |
1382 | { | |
5c49c116 | 1383 | if (entry->in_pack) { |
780e6e73 | 1384 | struct packed_git *p = entry->in_pack; |
03e79c88 | 1385 | struct pack_window *w_curs = NULL; |
5c49c116 NP |
1386 | const unsigned char *base_ref = NULL; |
1387 | struct object_entry *base_entry; | |
1388 | unsigned long used, used_0; | |
ef49a7a0 | 1389 | unsigned long avail; |
5c49c116 NP |
1390 | off_t ofs; |
1391 | unsigned char *buf, c; | |
780e6e73 | 1392 | |
6777a59f | 1393 | buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail); |
ab7cd7bb | 1394 | |
5c49c116 | 1395 | /* |
fa736f72 NP |
1396 | * We want in_pack_type even if we do not reuse delta |
1397 | * since non-delta representations could still be reused. | |
ab7cd7bb | 1398 | */ |
09ded04b | 1399 | used = unpack_object_header_buffer(buf, avail, |
5c49c116 NP |
1400 | &entry->in_pack_type, |
1401 | &entry->size); | |
03d66015 NP |
1402 | if (used == 0) |
1403 | goto give_up; | |
ab7cd7bb | 1404 | |
5c49c116 NP |
1405 | /* |
1406 | * Determine if this is a delta and if so whether we can | |
1407 | * reuse it or not. Otherwise let's find out as cheaply as | |
1408 | * possible what the actual type and size for this object is. | |
3f9ac8d2 | 1409 | */ |
5c49c116 NP |
1410 | switch (entry->in_pack_type) { |
1411 | default: | |
1412 | /* Not a delta hence we've already got all we need. */ | |
1413 | entry->type = entry->in_pack_type; | |
1414 | entry->in_pack_header_size = used; | |
03d66015 NP |
1415 | if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB) |
1416 | goto give_up; | |
5c49c116 NP |
1417 | unuse_pack(&w_curs); |
1418 | return; | |
1419 | case OBJ_REF_DELTA: | |
a7de7130 | 1420 | if (reuse_delta && !entry->preferred_base) |
5c49c116 NP |
1421 | base_ref = use_pack(p, &w_curs, |
1422 | entry->in_pack_offset + used, NULL); | |
1423 | entry->in_pack_header_size = used + 20; | |
1424 | break; | |
1425 | case OBJ_OFS_DELTA: | |
1426 | buf = use_pack(p, &w_curs, | |
1427 | entry->in_pack_offset + used, NULL); | |
1428 | used_0 = 0; | |
1429 | c = buf[used_0++]; | |
1430 | ofs = c & 127; | |
1431 | while (c & 128) { | |
1432 | ofs += 1; | |
03d66015 NP |
1433 | if (!ofs || MSB(ofs, 7)) { |
1434 | error("delta base offset overflow in pack for %s", | |
1435 | sha1_to_hex(entry->idx.sha1)); | |
1436 | goto give_up; | |
1437 | } | |
5c49c116 NP |
1438 | c = buf[used_0++]; |
1439 | ofs = (ofs << 7) + (c & 127); | |
780e6e73 | 1440 | } |
5c49c116 | 1441 | ofs = entry->in_pack_offset - ofs; |
03d66015 NP |
1442 | if (ofs <= 0 || ofs >= entry->in_pack_offset) { |
1443 | error("delta base offset out of bound for %s", | |
1444 | sha1_to_hex(entry->idx.sha1)); | |
1445 | goto give_up; | |
1446 | } | |
a7de7130 | 1447 | if (reuse_delta && !entry->preferred_base) { |
3449f8c4 NP |
1448 | struct revindex_entry *revidx; |
1449 | revidx = find_pack_revindex(p, ofs); | |
08698b1e NP |
1450 | if (!revidx) |
1451 | goto give_up; | |
3449f8c4 NP |
1452 | base_ref = nth_packed_object_sha1(p, revidx->nr); |
1453 | } | |
5c49c116 NP |
1454 | entry->in_pack_header_size = used + used_0; |
1455 | break; | |
780e6e73 | 1456 | } |
780e6e73 | 1457 | |
2834bc27 | 1458 | if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) { |
5c49c116 NP |
1459 | /* |
1460 | * If base_ref was set above that means we wish to | |
1461 | * reuse delta data, and we even found that base | |
1462 | * in the list of objects we want to pack. Goodie! | |
1463 | * | |
1464 | * Depth value does not matter - find_deltas() will | |
1465 | * never consider reused delta as the base object to | |
1466 | * deltify other objects against, in order to avoid | |
1467 | * circular deltas. | |
3f9ac8d2 | 1468 | */ |
780e6e73 | 1469 | entry->type = entry->in_pack_type; |
5c49c116 | 1470 | entry->delta = base_entry; |
64bd76b1 | 1471 | entry->delta_size = entry->size; |
15b4d577 JH |
1472 | entry->delta_sibling = base_entry->delta_child; |
1473 | base_entry->delta_child = entry; | |
5c49c116 NP |
1474 | unuse_pack(&w_curs); |
1475 | return; | |
1476 | } | |
ab7cd7bb | 1477 | |
5c49c116 NP |
1478 | if (entry->type) { |
1479 | /* | |
1480 | * This must be a delta and we already know what the | |
1481 | * final object type is. Let's extract the actual | |
1482 | * object size from the delta header. | |
1483 | */ | |
1484 | entry->size = get_size_from_delta(p, &w_curs, | |
1485 | entry->in_pack_offset + entry->in_pack_header_size); | |
03d66015 NP |
1486 | if (entry->size == 0) |
1487 | goto give_up; | |
5c49c116 | 1488 | unuse_pack(&w_curs); |
3f9ac8d2 JH |
1489 | return; |
1490 | } | |
5c49c116 NP |
1491 | |
1492 | /* | |
1493 | * No choice but to fall back to the recursive delta walk | |
1494 | * with sha1_object_info() to find about the object type | |
1495 | * at this point... | |
1496 | */ | |
03d66015 | 1497 | give_up: |
5c49c116 | 1498 | unuse_pack(&w_curs); |
36e4d74a | 1499 | } |
3f9ac8d2 | 1500 | |
aa7e44bf | 1501 | entry->type = sha1_object_info(entry->idx.sha1, &entry->size); |
6d6f9cdd SP |
1502 | /* |
1503 | * The error condition is checked in prepare_pack(). This is | |
1504 | * to permit a missing preferred base object to be ignored | |
1505 | * as a preferred base. Doing so can result in a larger | |
1506 | * pack file, but the transfer will still take place. | |
1507 | */ | |
3f9ac8d2 JH |
1508 | } |
1509 | ||
5c49c116 NP |
1510 | static int pack_offset_sort(const void *_a, const void *_b) |
1511 | { | |
1512 | const struct object_entry *a = *(struct object_entry **)_a; | |
1513 | const struct object_entry *b = *(struct object_entry **)_b; | |
1514 | ||
1515 | /* avoid filesystem trashing with loose objects */ | |
1516 | if (!a->in_pack && !b->in_pack) | |
aa7e44bf | 1517 | return hashcmp(a->idx.sha1, b->idx.sha1); |
5c49c116 NP |
1518 | |
1519 | if (a->in_pack < b->in_pack) | |
1520 | return -1; | |
1521 | if (a->in_pack > b->in_pack) | |
1522 | return 1; | |
1523 | return a->in_pack_offset < b->in_pack_offset ? -1 : | |
1524 | (a->in_pack_offset > b->in_pack_offset); | |
1525 | } | |
1526 | ||
4cf2143e JK |
1527 | /* |
1528 | * Drop an on-disk delta we were planning to reuse. Naively, this would | |
1529 | * just involve blanking out the "delta" field, but we have to deal | |
1530 | * with some extra book-keeping: | |
1531 | * | |
1532 | * 1. Removing ourselves from the delta_sibling linked list. | |
1533 | * | |
1534 | * 2. Updating our size/type to the non-delta representation. These were | |
1535 | * either not recorded initially (size) or overwritten with the delta type | |
1536 | * (type) when check_object() decided to reuse the delta. | |
7dbabbbe JK |
1537 | * |
1538 | * 3. Resetting our delta depth, as we are now a base object. | |
4cf2143e JK |
1539 | */ |
1540 | static void drop_reused_delta(struct object_entry *entry) | |
1541 | { | |
1542 | struct object_entry **p = &entry->delta->delta_child; | |
1543 | struct object_info oi = OBJECT_INFO_INIT; | |
1544 | ||
1545 | while (*p) { | |
1546 | if (*p == entry) | |
1547 | *p = (*p)->delta_sibling; | |
1548 | else | |
1549 | p = &(*p)->delta_sibling; | |
1550 | } | |
1551 | entry->delta = NULL; | |
7dbabbbe | 1552 | entry->depth = 0; |
4cf2143e JK |
1553 | |
1554 | oi.sizep = &entry->size; | |
1555 | oi.typep = &entry->type; | |
1556 | if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) { | |
1557 | /* | |
1558 | * We failed to get the info from this pack for some reason; | |
1559 | * fall back to sha1_object_info, which may find another copy. | |
1560 | * And if that fails, the error will be recorded in entry->type | |
1561 | * and dealt with in prepare_pack(). | |
1562 | */ | |
1563 | entry->type = sha1_object_info(entry->idx.sha1, &entry->size); | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | /* | |
1568 | * Follow the chain of deltas from this entry onward, throwing away any links | |
1569 | * that cause us to hit a cycle (as determined by the DFS state flags in | |
1570 | * the entries). | |
7dbabbbe JK |
1571 | * |
1572 | * We also detect too-long reused chains that would violate our --depth | |
1573 | * limit. | |
4cf2143e JK |
1574 | */ |
1575 | static void break_delta_chains(struct object_entry *entry) | |
1576 | { | |
42b766d7 JK |
1577 | /* |
1578 | * The actual depth of each object we will write is stored as an int, | |
1579 | * as it cannot exceed our int "depth" limit. But before we break | |
1580 | * changes based no that limit, we may potentially go as deep as the | |
1581 | * number of objects, which is elsewhere bounded to a uint32_t. | |
1582 | */ | |
1583 | uint32_t total_depth; | |
1584 | struct object_entry *cur, *next; | |
1585 | ||
1586 | for (cur = entry, total_depth = 0; | |
1587 | cur; | |
1588 | cur = cur->delta, total_depth++) { | |
1589 | if (cur->dfs_state == DFS_DONE) { | |
1590 | /* | |
1591 | * We've already seen this object and know it isn't | |
1592 | * part of a cycle. We do need to append its depth | |
1593 | * to our count. | |
1594 | */ | |
1595 | total_depth += cur->depth; | |
1596 | break; | |
1597 | } | |
4cf2143e | 1598 | |
4cf2143e | 1599 | /* |
42b766d7 JK |
1600 | * We break cycles before looping, so an ACTIVE state (or any |
1601 | * other cruft which made its way into the state variable) | |
1602 | * is a bug. | |
4cf2143e | 1603 | */ |
42b766d7 JK |
1604 | if (cur->dfs_state != DFS_NONE) |
1605 | die("BUG: confusing delta dfs state in first pass: %d", | |
1606 | cur->dfs_state); | |
4cf2143e | 1607 | |
4cf2143e | 1608 | /* |
42b766d7 JK |
1609 | * Now we know this is the first time we've seen the object. If |
1610 | * it's not a delta, we're done traversing, but we'll mark it | |
1611 | * done to save time on future traversals. | |
4cf2143e | 1612 | */ |
42b766d7 JK |
1613 | if (!cur->delta) { |
1614 | cur->dfs_state = DFS_DONE; | |
1615 | break; | |
1616 | } | |
4cf2143e | 1617 | |
4cf2143e | 1618 | /* |
42b766d7 JK |
1619 | * Mark ourselves as active and see if the next step causes |
1620 | * us to cycle to another active object. It's important to do | |
1621 | * this _before_ we loop, because it impacts where we make the | |
1622 | * cut, and thus how our total_depth counter works. | |
1623 | * E.g., We may see a partial loop like: | |
1624 | * | |
1625 | * A -> B -> C -> D -> B | |
1626 | * | |
1627 | * Cutting B->C breaks the cycle. But now the depth of A is | |
1628 | * only 1, and our total_depth counter is at 3. The size of the | |
1629 | * error is always one less than the size of the cycle we | |
1630 | * broke. Commits C and D were "lost" from A's chain. | |
1631 | * | |
1632 | * If we instead cut D->B, then the depth of A is correct at 3. | |
1633 | * We keep all commits in the chain that we examined. | |
4cf2143e | 1634 | */ |
42b766d7 JK |
1635 | cur->dfs_state = DFS_ACTIVE; |
1636 | if (cur->delta->dfs_state == DFS_ACTIVE) { | |
1637 | drop_reused_delta(cur); | |
1638 | cur->dfs_state = DFS_DONE; | |
1639 | break; | |
7dbabbbe | 1640 | } |
42b766d7 | 1641 | } |
7dbabbbe | 1642 | |
42b766d7 JK |
1643 | /* |
1644 | * And now that we've gone all the way to the bottom of the chain, we | |
1645 | * need to clear the active flags and set the depth fields as | |
1646 | * appropriate. Unlike the loop above, which can quit when it drops a | |
1647 | * delta, we need to keep going to look for more depth cuts. So we need | |
1648 | * an extra "next" pointer to keep going after we reset cur->delta. | |
1649 | */ | |
1650 | for (cur = entry; cur; cur = next) { | |
1651 | next = cur->delta; | |
4cf2143e | 1652 | |
42b766d7 JK |
1653 | /* |
1654 | * We should have a chain of zero or more ACTIVE states down to | |
1655 | * a final DONE. We can quit after the DONE, because either it | |
1656 | * has no bases, or we've already handled them in a previous | |
1657 | * call. | |
1658 | */ | |
1659 | if (cur->dfs_state == DFS_DONE) | |
1660 | break; | |
1661 | else if (cur->dfs_state != DFS_ACTIVE) | |
1662 | die("BUG: confusing delta dfs state in second pass: %d", | |
1663 | cur->dfs_state); | |
4cf2143e | 1664 | |
4cf2143e | 1665 | /* |
42b766d7 JK |
1666 | * If the total_depth is more than depth, then we need to snip |
1667 | * the chain into two or more smaller chains that don't exceed | |
1668 | * the maximum depth. Most of the resulting chains will contain | |
1669 | * (depth + 1) entries (i.e., depth deltas plus one base), and | |
1670 | * the last chain (i.e., the one containing entry) will contain | |
1671 | * whatever entries are left over, namely | |
1672 | * (total_depth % (depth + 1)) of them. | |
1673 | * | |
1674 | * Since we are iterating towards decreasing depth, we need to | |
1675 | * decrement total_depth as we go, and we need to write to the | |
1676 | * entry what its final depth will be after all of the | |
1677 | * snipping. Since we're snipping into chains of length (depth | |
1678 | * + 1) entries, the final depth of an entry will be its | |
1679 | * original depth modulo (depth + 1). Any time we encounter an | |
1680 | * entry whose final depth is supposed to be zero, we snip it | |
1681 | * from its delta base, thereby making it so. | |
4cf2143e | 1682 | */ |
42b766d7 JK |
1683 | cur->depth = (total_depth--) % (depth + 1); |
1684 | if (!cur->depth) | |
1685 | drop_reused_delta(cur); | |
1686 | ||
1687 | cur->dfs_state = DFS_DONE; | |
4cf2143e JK |
1688 | } |
1689 | } | |
1690 | ||
c323ac7d LT |
1691 | static void get_object_details(void) |
1692 | { | |
7cadf491 | 1693 | uint32_t i; |
5c49c116 NP |
1694 | struct object_entry **sorted_by_offset; |
1695 | ||
2834bc27 VM |
1696 | sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *)); |
1697 | for (i = 0; i < to_pack.nr_objects; i++) | |
1698 | sorted_by_offset[i] = to_pack.objects + i; | |
9ed0d8d6 | 1699 | QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort); |
c323ac7d | 1700 | |
2834bc27 | 1701 | for (i = 0; i < to_pack.nr_objects; i++) { |
15366280 JH |
1702 | struct object_entry *entry = sorted_by_offset[i]; |
1703 | check_object(entry); | |
754980d0 | 1704 | if (big_file_threshold < entry->size) |
15366280 JH |
1705 | entry->no_try_delta = 1; |
1706 | } | |
3449f8c4 | 1707 | |
4cf2143e JK |
1708 | /* |
1709 | * This must happen in a second pass, since we rely on the delta | |
1710 | * information for the whole list being completed. | |
1711 | */ | |
1712 | for (i = 0; i < to_pack.nr_objects; i++) | |
1713 | break_delta_chains(&to_pack.objects[i]); | |
1714 | ||
5c49c116 | 1715 | free(sorted_by_offset); |
c323ac7d LT |
1716 | } |
1717 | ||
b904166c NP |
1718 | /* |
1719 | * We search for deltas in a list sorted by type, by filename hash, and then | |
1720 | * by size, so that we see progressively smaller and smaller files. | |
1721 | * That's because we prefer deltas to be from the bigger file | |
1722 | * to the smaller -- deletes are potentially cheaper, but perhaps | |
1723 | * more importantly, the bigger file is likely the more recent | |
1724 | * one. The deepest deltas are therefore the oldest objects which are | |
1725 | * less susceptible to be accessed often. | |
1726 | */ | |
9668cf59 | 1727 | static int type_size_sort(const void *_a, const void *_b) |
c323ac7d | 1728 | { |
9668cf59 NP |
1729 | const struct object_entry *a = *(struct object_entry **)_a; |
1730 | const struct object_entry *b = *(struct object_entry **)_b; | |
1731 | ||
c323ac7d | 1732 | if (a->type > b->type) |
27225f2e | 1733 | return -1; |
b904166c | 1734 | if (a->type < b->type) |
27225f2e | 1735 | return 1; |
b904166c | 1736 | if (a->hash > b->hash) |
7a979d99 | 1737 | return -1; |
b904166c | 1738 | if (a->hash < b->hash) |
7a979d99 | 1739 | return 1; |
b904166c | 1740 | if (a->preferred_base > b->preferred_base) |
c323ac7d | 1741 | return -1; |
b904166c NP |
1742 | if (a->preferred_base < b->preferred_base) |
1743 | return 1; | |
c323ac7d | 1744 | if (a->size > b->size) |
b904166c NP |
1745 | return -1; |
1746 | if (a->size < b->size) | |
c323ac7d | 1747 | return 1; |
b904166c | 1748 | return a < b ? -1 : (a > b); /* newest first */ |
c323ac7d LT |
1749 | } |
1750 | ||
1751 | struct unpacked { | |
1752 | struct object_entry *entry; | |
1753 | void *data; | |
f6c7081a | 1754 | struct delta_index *index; |
5a235b5e | 1755 | unsigned depth; |
c323ac7d LT |
1756 | }; |
1757 | ||
d250626c NP |
1758 | static int delta_cacheable(unsigned long src_size, unsigned long trg_size, |
1759 | unsigned long delta_size) | |
074b2eea MK |
1760 | { |
1761 | if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size) | |
1762 | return 0; | |
1763 | ||
e3dfddb3 MK |
1764 | if (delta_size < cache_max_small_delta_size) |
1765 | return 1; | |
1766 | ||
074b2eea MK |
1767 | /* cache delta, if objects are large enough compared to delta size */ |
1768 | if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10)) | |
1769 | return 1; | |
1770 | ||
1771 | return 0; | |
1772 | } | |
1773 | ||
7eb151d6 | 1774 | #ifndef NO_PTHREADS |
8ecce684 | 1775 | |
44626dc7 | 1776 | static pthread_mutex_t read_mutex; |
8ecce684 NP |
1777 | #define read_lock() pthread_mutex_lock(&read_mutex) |
1778 | #define read_unlock() pthread_mutex_unlock(&read_mutex) | |
1779 | ||
44626dc7 | 1780 | static pthread_mutex_t cache_mutex; |
3c701839 NP |
1781 | #define cache_lock() pthread_mutex_lock(&cache_mutex) |
1782 | #define cache_unlock() pthread_mutex_unlock(&cache_mutex) | |
1783 | ||
44626dc7 | 1784 | static pthread_mutex_t progress_mutex; |
8ecce684 NP |
1785 | #define progress_lock() pthread_mutex_lock(&progress_mutex) |
1786 | #define progress_unlock() pthread_mutex_unlock(&progress_mutex) | |
1787 | ||
1788 | #else | |
1789 | ||
e1ef8673 JH |
1790 | #define read_lock() (void)0 |
1791 | #define read_unlock() (void)0 | |
1792 | #define cache_lock() (void)0 | |
1793 | #define cache_unlock() (void)0 | |
1794 | #define progress_lock() (void)0 | |
1795 | #define progress_unlock() (void)0 | |
8ecce684 NP |
1796 | |
1797 | #endif | |
1798 | ||
f6c7081a | 1799 | static int try_delta(struct unpacked *trg, struct unpacked *src, |
ef0316fc | 1800 | unsigned max_depth, unsigned long *mem_usage) |
c323ac7d | 1801 | { |
f6c7081a NP |
1802 | struct object_entry *trg_entry = trg->entry; |
1803 | struct object_entry *src_entry = src->entry; | |
560b25a8 | 1804 | unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz; |
c83f032e | 1805 | unsigned ref_depth; |
21666f1a | 1806 | enum object_type type; |
c323ac7d LT |
1807 | void *delta_buf; |
1808 | ||
1809 | /* Don't bother doing diffs between different types */ | |
f6c7081a | 1810 | if (trg_entry->type != src_entry->type) |
c323ac7d LT |
1811 | return -1; |
1812 | ||
51d1e83f | 1813 | /* |
15f07e06 JK |
1814 | * We do not bother to try a delta that we discarded on an |
1815 | * earlier try, but only when reusing delta data. Note that | |
1816 | * src_entry that is marked as the preferred_base should always | |
1817 | * be considered, as even if we produce a suboptimal delta against | |
1818 | * it, we will still save the transfer cost, as we already know | |
1819 | * the other side has it and we won't send src_entry at all. | |
51d1e83f | 1820 | */ |
a7de7130 | 1821 | if (reuse_delta && trg_entry->in_pack && |
e9195b58 | 1822 | trg_entry->in_pack == src_entry->in_pack && |
15f07e06 | 1823 | !src_entry->preferred_base && |
e9195b58 JH |
1824 | trg_entry->in_pack_type != OBJ_REF_DELTA && |
1825 | trg_entry->in_pack_type != OBJ_OFS_DELTA) | |
51d1e83f LT |
1826 | return 0; |
1827 | ||
898b14ce | 1828 | /* Let's not bust the allowed depth. */ |
5a235b5e | 1829 | if (src->depth >= max_depth) |
d116a45a | 1830 | return 0; |
c323ac7d | 1831 | |
c3b06a69 | 1832 | /* Now some size filtering heuristics. */ |
560b25a8 | 1833 | trg_size = trg_entry->size; |
c83f032e NP |
1834 | if (!trg_entry->delta) { |
1835 | max_size = trg_size/2 - 20; | |
1836 | ref_depth = 1; | |
1837 | } else { | |
1838 | max_size = trg_entry->delta_size; | |
5a235b5e | 1839 | ref_depth = trg->depth; |
c83f032e | 1840 | } |
720fe22d | 1841 | max_size = (uint64_t)max_size * (max_depth - src->depth) / |
c83f032e | 1842 | (max_depth - ref_depth + 1); |
c3b06a69 NP |
1843 | if (max_size == 0) |
1844 | return 0; | |
f6c7081a | 1845 | src_size = src_entry->size; |
560b25a8 | 1846 | sizediff = src_size < trg_size ? trg_size - src_size : 0; |
27225f2e | 1847 | if (sizediff >= max_size) |
f527cb8c | 1848 | return 0; |
a1dab41a BD |
1849 | if (trg_size < src_size / 32) |
1850 | return 0; | |
f6c7081a | 1851 | |
560b25a8 NP |
1852 | /* Load data if not already done */ |
1853 | if (!trg->data) { | |
8ecce684 | 1854 | read_lock(); |
aa7e44bf | 1855 | trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz); |
8ecce684 | 1856 | read_unlock(); |
2e3404c3 JH |
1857 | if (!trg->data) |
1858 | die("object %s cannot be read", | |
1859 | sha1_to_hex(trg_entry->idx.sha1)); | |
560b25a8 NP |
1860 | if (sz != trg_size) |
1861 | die("object %s inconsistent object length (%lu vs %lu)", | |
aa7e44bf | 1862 | sha1_to_hex(trg_entry->idx.sha1), sz, trg_size); |
ef0316fc | 1863 | *mem_usage += sz; |
560b25a8 NP |
1864 | } |
1865 | if (!src->data) { | |
8ecce684 | 1866 | read_lock(); |
aa7e44bf | 1867 | src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz); |
8ecce684 | 1868 | read_unlock(); |
71064a95 NP |
1869 | if (!src->data) { |
1870 | if (src_entry->preferred_base) { | |
1871 | static int warned = 0; | |
1872 | if (!warned++) | |
1873 | warning("object %s cannot be read", | |
1874 | sha1_to_hex(src_entry->idx.sha1)); | |
1875 | /* | |
1876 | * Those objects are not included in the | |
1877 | * resulting pack. Be resilient and ignore | |
1878 | * them if they can't be read, in case the | |
1879 | * pack could be created nevertheless. | |
1880 | */ | |
1881 | return 0; | |
1882 | } | |
2e3404c3 JH |
1883 | die("object %s cannot be read", |
1884 | sha1_to_hex(src_entry->idx.sha1)); | |
71064a95 | 1885 | } |
560b25a8 NP |
1886 | if (sz != src_size) |
1887 | die("object %s inconsistent object length (%lu vs %lu)", | |
aa7e44bf | 1888 | sha1_to_hex(src_entry->idx.sha1), sz, src_size); |
ef0316fc | 1889 | *mem_usage += sz; |
560b25a8 NP |
1890 | } |
1891 | if (!src->index) { | |
1892 | src->index = create_delta_index(src->data, src_size); | |
a588d88a MK |
1893 | if (!src->index) { |
1894 | static int warned = 0; | |
1895 | if (!warned++) | |
1896 | warning("suboptimal pack - out of memory"); | |
1897 | return 0; | |
1898 | } | |
ef0316fc | 1899 | *mem_usage += sizeof_delta_index(src->index); |
560b25a8 NP |
1900 | } |
1901 | ||
1902 | delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size); | |
c323ac7d | 1903 | if (!delta_buf) |
75c42d8c | 1904 | return 0; |
f6c7081a | 1905 | |
9e2d57a0 | 1906 | if (trg_entry->delta) { |
848d732c BD |
1907 | /* Prefer only shallower same-sized deltas. */ |
1908 | if (delta_size == trg_entry->delta_size && | |
5a235b5e | 1909 | src->depth + 1 >= trg->depth) { |
848d732c BD |
1910 | free(delta_buf); |
1911 | return 0; | |
1912 | } | |
074b2eea | 1913 | } |
9e2d57a0 | 1914 | |
3c701839 NP |
1915 | /* |
1916 | * Handle memory allocation outside of the cache | |
1917 | * accounting lock. Compiler will optimize the strangeness | |
7eb151d6 | 1918 | * away when NO_PTHREADS is defined. |
3c701839 | 1919 | */ |
8e0f7003 | 1920 | free(trg_entry->delta_data); |
3c701839 | 1921 | cache_lock(); |
9e2d57a0 NP |
1922 | if (trg_entry->delta_data) { |
1923 | delta_cache_size -= trg_entry->delta_size; | |
9e2d57a0 NP |
1924 | trg_entry->delta_data = NULL; |
1925 | } | |
d250626c | 1926 | if (delta_cacheable(src_size, trg_size, delta_size)) { |
b7a28f78 | 1927 | delta_cache_size += delta_size; |
3c701839 NP |
1928 | cache_unlock(); |
1929 | trg_entry->delta_data = xrealloc(delta_buf, delta_size); | |
1930 | } else { | |
1931 | cache_unlock(); | |
074b2eea | 1932 | free(delta_buf); |
3c701839 NP |
1933 | } |
1934 | ||
b7a28f78 NP |
1935 | trg_entry->delta = src_entry; |
1936 | trg_entry->delta_size = delta_size; | |
1937 | trg->depth = src->depth + 1; | |
1938 | ||
f6c7081a | 1939 | return 1; |
c323ac7d LT |
1940 | } |
1941 | ||
898b14ce | 1942 | static unsigned int check_delta_limit(struct object_entry *me, unsigned int n) |
b2504a0d | 1943 | { |
898b14ce NP |
1944 | struct object_entry *child = me->delta_child; |
1945 | unsigned int m = n; | |
1946 | while (child) { | |
1947 | unsigned int c = check_delta_limit(child, n + 1); | |
1948 | if (m < c) | |
1949 | m = c; | |
1950 | child = child->delta_sibling; | |
1951 | } | |
1952 | return m; | |
b2504a0d NP |
1953 | } |
1954 | ||
75ad235c | 1955 | static unsigned long free_unpacked(struct unpacked *n) |
a97773ce | 1956 | { |
ef0316fc | 1957 | unsigned long freed_mem = sizeof_delta_index(n->index); |
a97773ce BD |
1958 | free_delta_index(n->index); |
1959 | n->index = NULL; | |
1960 | if (n->data) { | |
ef0316fc | 1961 | freed_mem += n->entry->size; |
a97773ce BD |
1962 | free(n->data); |
1963 | n->data = NULL; | |
a97773ce BD |
1964 | } |
1965 | n->entry = NULL; | |
7d7baa5e | 1966 | n->depth = 0; |
ef0316fc | 1967 | return freed_mem; |
a97773ce BD |
1968 | } |
1969 | ||
384b32c0 | 1970 | static void find_deltas(struct object_entry **list, unsigned *list_size, |
e334977d | 1971 | int window, int depth, unsigned *processed) |
c323ac7d | 1972 | { |
384b32c0 | 1973 | uint32_t i, idx = 0, count = 0; |
7cadf491 | 1974 | struct unpacked *array; |
ef0316fc | 1975 | unsigned long mem_usage = 0; |
c323ac7d | 1976 | |
19d4b416 | 1977 | array = xcalloc(window, sizeof(struct unpacked)); |
21fcd1bd | 1978 | |
384b32c0 | 1979 | for (;;) { |
421b488a | 1980 | struct object_entry *entry; |
c323ac7d | 1981 | struct unpacked *n = array + idx; |
ef0316fc | 1982 | int j, max_depth, best_base = -1; |
c323ac7d | 1983 | |
384b32c0 NP |
1984 | progress_lock(); |
1985 | if (!*list_size) { | |
1986 | progress_unlock(); | |
1987 | break; | |
1988 | } | |
421b488a | 1989 | entry = *list++; |
384b32c0 NP |
1990 | (*list_size)--; |
1991 | if (!entry->preferred_base) { | |
1992 | (*processed)++; | |
1993 | display_progress(progress_state, *processed); | |
1994 | } | |
1995 | progress_unlock(); | |
1996 | ||
ef0316fc | 1997 | mem_usage -= free_unpacked(n); |
c323ac7d | 1998 | n->entry = entry; |
ab7cd7bb | 1999 | |
a97773ce | 2000 | while (window_memory_limit && |
ef0316fc | 2001 | mem_usage > window_memory_limit && |
a97773ce BD |
2002 | count > 1) { |
2003 | uint32_t tail = (idx + window - count) % window; | |
75ad235c | 2004 | mem_usage -= free_unpacked(array + tail); |
a97773ce BD |
2005 | count--; |
2006 | } | |
2007 | ||
75d39853 NP |
2008 | /* We do not compute delta to *create* objects we are not |
2009 | * going to pack. | |
2010 | */ | |
2011 | if (entry->preferred_base) | |
2012 | goto next; | |
2013 | ||
898b14ce NP |
2014 | /* |
2015 | * If the current object is at pack edge, take the depth the | |
2016 | * objects that depend on the current object into account | |
2017 | * otherwise they would become too deep. | |
2018 | */ | |
2019 | max_depth = depth; | |
2020 | if (entry->delta_child) { | |
2021 | max_depth -= check_delta_limit(entry, 0); | |
2022 | if (max_depth <= 0) | |
2023 | goto next; | |
2024 | } | |
2025 | ||
78817c15 LT |
2026 | j = window; |
2027 | while (--j > 0) { | |
77639870 | 2028 | int ret; |
7cadf491 | 2029 | uint32_t other_idx = idx + j; |
c323ac7d | 2030 | struct unpacked *m; |
78817c15 LT |
2031 | if (other_idx >= window) |
2032 | other_idx -= window; | |
c323ac7d LT |
2033 | m = array + other_idx; |
2034 | if (!m->entry) | |
2035 | break; | |
ef0316fc | 2036 | ret = try_delta(n, m, max_depth, &mem_usage); |
77639870 | 2037 | if (ret < 0) |
c323ac7d | 2038 | break; |
77639870 JH |
2039 | else if (ret > 0) |
2040 | best_base = other_idx; | |
c323ac7d | 2041 | } |
898b14ce | 2042 | |
ed4a9031 NP |
2043 | /* |
2044 | * If we decided to cache the delta data, then it is best | |
2045 | * to compress it right away. First because we have to do | |
2046 | * it anyway, and doing it here while we're threaded will | |
2047 | * save a lot of time in the non threaded write phase, | |
2048 | * as well as allow for caching more deltas within | |
2049 | * the same cache size limit. | |
2050 | * ... | |
2051 | * But only if not writing to stdout, since in that case | |
2052 | * the network is most likely throttling writes anyway, | |
2053 | * and therefore it is best to go to the write phase ASAP | |
2054 | * instead, as we can afford spending more time compressing | |
2055 | * between writes at that moment. | |
2056 | */ | |
2057 | if (entry->delta_data && !pack_to_stdout) { | |
2058 | entry->z_delta_size = do_compress(&entry->delta_data, | |
2059 | entry->delta_size); | |
2060 | cache_lock(); | |
2061 | delta_cache_size -= entry->delta_size; | |
2062 | delta_cache_size += entry->z_delta_size; | |
2063 | cache_unlock(); | |
2064 | } | |
2065 | ||
70ca1a3f JH |
2066 | /* if we made n a delta, and if n is already at max |
2067 | * depth, leaving it in the window is pointless. we | |
2068 | * should evict it first. | |
70ca1a3f | 2069 | */ |
70baf5d4 | 2070 | if (entry->delta && max_depth <= n->depth) |
70ca1a3f | 2071 | continue; |
ff45715c | 2072 | |
77639870 JH |
2073 | /* |
2074 | * Move the best delta base up in the window, after the | |
2075 | * currently deltified object, to keep it longer. It will | |
2076 | * be the first base object to be attempted next. | |
2077 | */ | |
2078 | if (entry->delta) { | |
2079 | struct unpacked swap = array[best_base]; | |
2080 | int dist = (window + idx - best_base) % window; | |
2081 | int dst = best_base; | |
2082 | while (dist--) { | |
2083 | int src = (dst + 1) % window; | |
2084 | array[dst] = array[src]; | |
2085 | dst = src; | |
2086 | } | |
2087 | array[dst] = swap; | |
2088 | } | |
2089 | ||
898b14ce | 2090 | next: |
521a4f4c | 2091 | idx++; |
a97773ce BD |
2092 | if (count + 1 < window) |
2093 | count++; | |
521a4f4c LT |
2094 | if (idx >= window) |
2095 | idx = 0; | |
384b32c0 | 2096 | } |
adee7bdf | 2097 | |
f6c7081a | 2098 | for (i = 0; i < window; ++i) { |
ff45715c | 2099 | free_delta_index(array[i].index); |
adee7bdf | 2100 | free(array[i].data); |
f6c7081a | 2101 | } |
adee7bdf | 2102 | free(array); |
c323ac7d LT |
2103 | } |
2104 | ||
7eb151d6 | 2105 | #ifndef NO_PTHREADS |
8ecce684 | 2106 | |
a9a74636 NP |
2107 | static void try_to_free_from_threads(size_t size) |
2108 | { | |
2109 | read_lock(); | |
7c3ecb32 | 2110 | release_pack_memory(size); |
a9a74636 NP |
2111 | read_unlock(); |
2112 | } | |
2113 | ||
bc9b2175 | 2114 | static try_to_free_t old_try_to_free_routine; |
851c34b0 | 2115 | |
50f22ada JS |
2116 | /* |
2117 | * The main thread waits on the condition that (at least) one of the workers | |
2118 | * has stopped working (which is indicated in the .working member of | |
2119 | * struct thread_params). | |
2120 | * When a work thread has completed its work, it sets .working to 0 and | |
2121 | * signals the main thread and waits on the condition that .data_ready | |
2122 | * becomes 1. | |
2123 | */ | |
2124 | ||
8ecce684 NP |
2125 | struct thread_params { |
2126 | pthread_t thread; | |
2127 | struct object_entry **list; | |
2128 | unsigned list_size; | |
384b32c0 | 2129 | unsigned remaining; |
8ecce684 NP |
2130 | int window; |
2131 | int depth; | |
50f22ada JS |
2132 | int working; |
2133 | int data_ready; | |
2134 | pthread_mutex_t mutex; | |
2135 | pthread_cond_t cond; | |
8ecce684 NP |
2136 | unsigned *processed; |
2137 | }; | |
2138 | ||
44626dc7 AH |
2139 | static pthread_cond_t progress_cond; |
2140 | ||
2141 | /* | |
2142 | * Mutex and conditional variable can't be statically-initialized on Windows. | |
2143 | */ | |
2144 | static void init_threaded_search(void) | |
2145 | { | |
93749194 | 2146 | init_recursive_mutex(&read_mutex); |
44626dc7 AH |
2147 | pthread_mutex_init(&cache_mutex, NULL); |
2148 | pthread_mutex_init(&progress_mutex, NULL); | |
2149 | pthread_cond_init(&progress_cond, NULL); | |
851c34b0 | 2150 | old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads); |
44626dc7 AH |
2151 | } |
2152 | ||
2153 | static void cleanup_threaded_search(void) | |
2154 | { | |
851c34b0 | 2155 | set_try_to_free_routine(old_try_to_free_routine); |
44626dc7 AH |
2156 | pthread_cond_destroy(&progress_cond); |
2157 | pthread_mutex_destroy(&read_mutex); | |
2158 | pthread_mutex_destroy(&cache_mutex); | |
2159 | pthread_mutex_destroy(&progress_mutex); | |
2160 | } | |
c2a33679 | 2161 | |
8ecce684 NP |
2162 | static void *threaded_find_deltas(void *arg) |
2163 | { | |
c2a33679 NP |
2164 | struct thread_params *me = arg; |
2165 | ||
50f22ada | 2166 | while (me->remaining) { |
384b32c0 | 2167 | find_deltas(me->list, &me->remaining, |
c2a33679 | 2168 | me->window, me->depth, me->processed); |
50f22ada JS |
2169 | |
2170 | progress_lock(); | |
2171 | me->working = 0; | |
2172 | pthread_cond_signal(&progress_cond); | |
2173 | progress_unlock(); | |
2174 | ||
2175 | /* | |
2176 | * We must not set ->data_ready before we wait on the | |
2177 | * condition because the main thread may have set it to 1 | |
2178 | * before we get here. In order to be sure that new | |
2179 | * work is available if we see 1 in ->data_ready, it | |
2180 | * was initialized to 0 before this thread was spawned | |
2181 | * and we reset it to 0 right away. | |
2182 | */ | |
2183 | pthread_mutex_lock(&me->mutex); | |
2184 | while (!me->data_ready) | |
2185 | pthread_cond_wait(&me->cond, &me->mutex); | |
2186 | me->data_ready = 0; | |
2187 | pthread_mutex_unlock(&me->mutex); | |
c2a33679 | 2188 | } |
50f22ada JS |
2189 | /* leave ->working 1 so that this doesn't get more work assigned */ |
2190 | return NULL; | |
8ecce684 NP |
2191 | } |
2192 | ||
8ecce684 NP |
2193 | static void ll_find_deltas(struct object_entry **list, unsigned list_size, |
2194 | int window, int depth, unsigned *processed) | |
2195 | { | |
dcda3614 | 2196 | struct thread_params *p; |
384b32c0 | 2197 | int i, ret, active_threads = 0; |
c2a33679 | 2198 | |
44626dc7 AH |
2199 | init_threaded_search(); |
2200 | ||
367f4a43 | 2201 | if (delta_search_threads <= 1) { |
384b32c0 | 2202 | find_deltas(list, &list_size, window, depth, processed); |
44626dc7 | 2203 | cleanup_threaded_search(); |
367f4a43 NP |
2204 | return; |
2205 | } | |
43cc2b42 | 2206 | if (progress > pack_to_stdout) |
b6c29915 | 2207 | fprintf(stderr, "Delta compression using up to %d threads.\n", |
43cc2b42 | 2208 | delta_search_threads); |
dcda3614 | 2209 | p = xcalloc(delta_search_threads, sizeof(*p)); |
367f4a43 | 2210 | |
50f22ada | 2211 | /* Partition the work amongst work threads. */ |
367f4a43 | 2212 | for (i = 0; i < delta_search_threads; i++) { |
50f22ada JS |
2213 | unsigned sub_size = list_size / (delta_search_threads - i); |
2214 | ||
bf874896 NP |
2215 | /* don't use too small segments or no deltas will be found */ |
2216 | if (sub_size < 2*window && i+1 < delta_search_threads) | |
2217 | sub_size = 0; | |
2218 | ||
8ecce684 NP |
2219 | p[i].window = window; |
2220 | p[i].depth = depth; | |
2221 | p[i].processed = processed; | |
50f22ada JS |
2222 | p[i].working = 1; |
2223 | p[i].data_ready = 0; | |
c2a33679 | 2224 | |
59921b4b | 2225 | /* try to split chunks on "path" boundaries */ |
6fc74703 NP |
2226 | while (sub_size && sub_size < list_size && |
2227 | list[sub_size]->hash && | |
384b32c0 NP |
2228 | list[sub_size]->hash == list[sub_size-1]->hash) |
2229 | sub_size++; | |
2230 | ||
50f22ada JS |
2231 | p[i].list = list; |
2232 | p[i].list_size = sub_size; | |
2233 | p[i].remaining = sub_size; | |
59921b4b | 2234 | |
384b32c0 NP |
2235 | list += sub_size; |
2236 | list_size -= sub_size; | |
2237 | } | |
2238 | ||
50f22ada JS |
2239 | /* Start work threads. */ |
2240 | for (i = 0; i < delta_search_threads; i++) { | |
2241 | if (!p[i].list_size) | |
2242 | continue; | |
68e6a4f8 JS |
2243 | pthread_mutex_init(&p[i].mutex, NULL); |
2244 | pthread_cond_init(&p[i].cond, NULL); | |
50f22ada JS |
2245 | ret = pthread_create(&p[i].thread, NULL, |
2246 | threaded_find_deltas, &p[i]); | |
2247 | if (ret) | |
2248 | die("unable to create thread: %s", strerror(ret)); | |
2249 | active_threads++; | |
2250 | } | |
2251 | ||
384b32c0 NP |
2252 | /* |
2253 | * Now let's wait for work completion. Each time a thread is done | |
2254 | * with its work, we steal half of the remaining work from the | |
2255 | * thread with the largest number of unprocessed objects and give | |
2256 | * it to that newly idle thread. This ensure good load balancing | |
2257 | * until the remaining object list segments are simply too short | |
2258 | * to be worth splitting anymore. | |
2259 | */ | |
50f22ada JS |
2260 | while (active_threads) { |
2261 | struct thread_params *target = NULL; | |
384b32c0 NP |
2262 | struct thread_params *victim = NULL; |
2263 | unsigned sub_size = 0; | |
384b32c0 NP |
2264 | |
2265 | progress_lock(); | |
50f22ada JS |
2266 | for (;;) { |
2267 | for (i = 0; !target && i < delta_search_threads; i++) | |
2268 | if (!p[i].working) | |
2269 | target = &p[i]; | |
2270 | if (target) | |
2271 | break; | |
2272 | pthread_cond_wait(&progress_cond, &progress_mutex); | |
2273 | } | |
2274 | ||
384b32c0 NP |
2275 | for (i = 0; i < delta_search_threads; i++) |
2276 | if (p[i].remaining > 2*window && | |
2277 | (!victim || victim->remaining < p[i].remaining)) | |
2278 | victim = &p[i]; | |
2279 | if (victim) { | |
2280 | sub_size = victim->remaining / 2; | |
2281 | list = victim->list + victim->list_size - sub_size; | |
2282 | while (sub_size && list[0]->hash && | |
2283 | list[0]->hash == list[-1]->hash) { | |
2284 | list++; | |
2285 | sub_size--; | |
2286 | } | |
eb9688ff NP |
2287 | if (!sub_size) { |
2288 | /* | |
2289 | * It is possible for some "paths" to have | |
2290 | * so many objects that no hash boundary | |
2291 | * might be found. Let's just steal the | |
2292 | * exact half in that case. | |
2293 | */ | |
2294 | sub_size = victim->remaining / 2; | |
2295 | list -= sub_size; | |
2296 | } | |
384b32c0 NP |
2297 | target->list = list; |
2298 | victim->list_size -= sub_size; | |
2299 | victim->remaining -= sub_size; | |
2300 | } | |
384b32c0 NP |
2301 | target->list_size = sub_size; |
2302 | target->remaining = sub_size; | |
50f22ada JS |
2303 | target->working = 1; |
2304 | progress_unlock(); | |
2305 | ||
2306 | pthread_mutex_lock(&target->mutex); | |
2307 | target->data_ready = 1; | |
2308 | pthread_cond_signal(&target->cond); | |
2309 | pthread_mutex_unlock(&target->mutex); | |
c2a33679 | 2310 | |
384b32c0 | 2311 | if (!sub_size) { |
b81d9af7 | 2312 | pthread_join(target->thread, NULL); |
50f22ada JS |
2313 | pthread_cond_destroy(&target->cond); |
2314 | pthread_mutex_destroy(&target->mutex); | |
384b32c0 | 2315 | active_threads--; |
c2a33679 | 2316 | } |
50f22ada | 2317 | } |
44626dc7 | 2318 | cleanup_threaded_search(); |
dcda3614 | 2319 | free(p); |
8ecce684 NP |
2320 | } |
2321 | ||
2322 | #else | |
384b32c0 | 2323 | #define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p) |
8ecce684 NP |
2324 | #endif |
2325 | ||
b773ddea JK |
2326 | static void add_tag_chain(const struct object_id *oid) |
2327 | { | |
2328 | struct tag *tag; | |
2329 | ||
2330 | /* | |
2331 | * We catch duplicates already in add_object_entry(), but we'd | |
2332 | * prefer to do this extra check to avoid having to parse the | |
2333 | * tag at all if we already know that it's being packed (e.g., if | |
2334 | * it was included via bitmaps, we would not have parsed it | |
2335 | * previously). | |
2336 | */ | |
2337 | if (packlist_find(&to_pack, oid->hash, NULL)) | |
2338 | return; | |
2339 | ||
2340 | tag = lookup_tag(oid->hash); | |
2341 | while (1) { | |
2342 | if (!tag || parse_tag(tag) || !tag->tagged) | |
2343 | die("unable to pack objects reachable from tag %s", | |
2344 | oid_to_hex(oid)); | |
2345 | ||
2346 | add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL, 0); | |
2347 | ||
2348 | if (tag->tagged->type != OBJ_TAG) | |
2349 | return; | |
2350 | ||
2351 | tag = (struct tag *)tag->tagged; | |
2352 | } | |
2353 | } | |
2354 | ||
d155254c | 2355 | static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data) |
f0a24aa5 | 2356 | { |
d155254c | 2357 | struct object_id peeled; |
f0a24aa5 | 2358 | |
59556548 | 2359 | if (starts_with(path, "refs/tags/") && /* is a tag? */ |
d155254c MH |
2360 | !peel_ref(path, peeled.hash) && /* peelable? */ |
2361 | packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */ | |
b773ddea | 2362 | add_tag_chain(oid); |
f0a24aa5 SP |
2363 | return 0; |
2364 | } | |
2365 | ||
f3123c4a JH |
2366 | static void prepare_pack(int window, int depth) |
2367 | { | |
9668cf59 | 2368 | struct object_entry **delta_list; |
6e1c2344 RJ |
2369 | uint32_t i, nr_deltas; |
2370 | unsigned n; | |
9668cf59 | 2371 | |
3f9ac8d2 | 2372 | get_object_details(); |
9668cf59 | 2373 | |
0e8189e2 NP |
2374 | /* |
2375 | * If we're locally repacking then we need to be doubly careful | |
2376 | * from now on in order to make sure no stealth corruption gets | |
2377 | * propagated to the new pack. Clients receiving streamed packs | |
2378 | * should validate everything they get anyway so no need to incur | |
2379 | * the additional cost here in that case. | |
2380 | */ | |
2381 | if (!pack_to_stdout) | |
2382 | do_check_packed_object_crc = 1; | |
2383 | ||
2834bc27 | 2384 | if (!to_pack.nr_objects || !window || !depth) |
9668cf59 NP |
2385 | return; |
2386 | ||
b32fa95f | 2387 | ALLOC_ARRAY(delta_list, to_pack.nr_objects); |
75d39853 NP |
2388 | nr_deltas = n = 0; |
2389 | ||
2834bc27 VM |
2390 | for (i = 0; i < to_pack.nr_objects; i++) { |
2391 | struct object_entry *entry = to_pack.objects + i; | |
75d39853 NP |
2392 | |
2393 | if (entry->delta) | |
2394 | /* This happens if we decided to reuse existing | |
a7de7130 | 2395 | * delta from a pack. "reuse_delta &&" is implied. |
75d39853 NP |
2396 | */ |
2397 | continue; | |
2398 | ||
2399 | if (entry->size < 50) | |
2400 | continue; | |
2401 | ||
2402 | if (entry->no_try_delta) | |
2403 | continue; | |
2404 | ||
6d6f9cdd | 2405 | if (!entry->preferred_base) { |
75d39853 | 2406 | nr_deltas++; |
6d6f9cdd SP |
2407 | if (entry->type < 0) |
2408 | die("unable to get type of object %s", | |
2409 | sha1_to_hex(entry->idx.sha1)); | |
eede9f42 NP |
2410 | } else { |
2411 | if (entry->type < 0) { | |
2412 | /* | |
2413 | * This object is not found, but we | |
2414 | * don't have to include it anyway. | |
2415 | */ | |
2416 | continue; | |
2417 | } | |
6d6f9cdd | 2418 | } |
75d39853 NP |
2419 | |
2420 | delta_list[n++] = entry; | |
2421 | } | |
2422 | ||
2f8b8947 | 2423 | if (nr_deltas && n > 1) { |
e334977d NP |
2424 | unsigned nr_done = 0; |
2425 | if (progress) | |
754dbc43 | 2426 | progress_state = start_progress(_("Compressing objects"), |
dc6a0757 | 2427 | nr_deltas); |
9ed0d8d6 | 2428 | QSORT(delta_list, n, type_size_sort); |
8ecce684 | 2429 | ll_find_deltas(delta_list, n, window+1, depth, &nr_done); |
4d4fcc54 | 2430 | stop_progress(&progress_state); |
e334977d NP |
2431 | if (nr_done != nr_deltas) |
2432 | die("inconsistency with delta count"); | |
75d39853 | 2433 | } |
9668cf59 | 2434 | free(delta_list); |
f3123c4a JH |
2435 | } |
2436 | ||
ef90d6d4 | 2437 | static int git_pack_config(const char *k, const char *v, void *cb) |
4812a93a | 2438 | { |
eeefa7c9 | 2439 | if (!strcmp(k, "pack.window")) { |
4812a93a JK |
2440 | window = git_config_int(k, v); |
2441 | return 0; | |
2442 | } | |
a97773ce BD |
2443 | if (!strcmp(k, "pack.windowmemory")) { |
2444 | window_memory_limit = git_config_ulong(k, v); | |
2445 | return 0; | |
2446 | } | |
2447 | if (!strcmp(k, "pack.depth")) { | |
842aaf93 TT |
2448 | depth = git_config_int(k, v); |
2449 | return 0; | |
2450 | } | |
074b2eea MK |
2451 | if (!strcmp(k, "pack.deltacachesize")) { |
2452 | max_delta_cache_size = git_config_int(k, v); | |
2453 | return 0; | |
2454 | } | |
e3dfddb3 MK |
2455 | if (!strcmp(k, "pack.deltacachelimit")) { |
2456 | cache_max_small_delta_size = git_config_int(k, v); | |
2457 | return 0; | |
2458 | } | |
ae4f07fb VM |
2459 | if (!strcmp(k, "pack.writebitmaphashcache")) { |
2460 | if (git_config_bool(k, v)) | |
2461 | write_bitmap_options |= BITMAP_OPT_HASH_CACHE; | |
2462 | else | |
2463 | write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE; | |
2464 | } | |
6b8fda2d | 2465 | if (!strcmp(k, "pack.usebitmaps")) { |
645c432d | 2466 | use_bitmap_index_default = git_config_bool(k, v); |
6b8fda2d VM |
2467 | return 0; |
2468 | } | |
693b86ff NP |
2469 | if (!strcmp(k, "pack.threads")) { |
2470 | delta_search_threads = git_config_int(k, v); | |
833e3df1 | 2471 | if (delta_search_threads < 0) |
693b86ff NP |
2472 | die("invalid number of threads specified (%d)", |
2473 | delta_search_threads); | |
7eb151d6 | 2474 | #ifdef NO_PTHREADS |
833e3df1 | 2475 | if (delta_search_threads != 1) |
693b86ff NP |
2476 | warning("no threads support, ignoring %s", k); |
2477 | #endif | |
2478 | return 0; | |
2479 | } | |
4d00bda2 | 2480 | if (!strcmp(k, "pack.indexversion")) { |
ebcfb379 JH |
2481 | pack_idx_opts.version = git_config_int(k, v); |
2482 | if (pack_idx_opts.version > 2) | |
6e1c2344 | 2483 | die("bad pack.indexversion=%"PRIu32, |
ebcfb379 | 2484 | pack_idx_opts.version); |
4d00bda2 NP |
2485 | return 0; |
2486 | } | |
ef90d6d4 | 2487 | return git_default_config(k, v, cb); |
4812a93a JK |
2488 | } |
2489 | ||
b5d97e6b | 2490 | static void read_object_list_from_stdin(void) |
c323ac7d | 2491 | { |
b5d97e6b JH |
2492 | char line[40 + 1 + PATH_MAX + 2]; |
2493 | unsigned char sha1[20]; | |
b2504a0d | 2494 | |
da93d12b | 2495 | for (;;) { |
da93d12b LT |
2496 | if (!fgets(line, sizeof(line), stdin)) { |
2497 | if (feof(stdin)) | |
2498 | break; | |
2499 | if (!ferror(stdin)) | |
2500 | die("fgets returned NULL, not EOF, not error!"); | |
687dd75c | 2501 | if (errno != EINTR) |
d824cbba | 2502 | die_errno("fgets"); |
687dd75c JH |
2503 | clearerr(stdin); |
2504 | continue; | |
da93d12b | 2505 | } |
7a979d99 JH |
2506 | if (line[0] == '-') { |
2507 | if (get_sha1_hex(line+1, sha1)) | |
2508 | die("expected edge sha1, got garbage:\n %s", | |
b5d97e6b | 2509 | line); |
8d1d8f83 | 2510 | add_preferred_base(sha1); |
7a979d99 | 2511 | continue; |
21fcd1bd | 2512 | } |
c323ac7d | 2513 | if (get_sha1_hex(line, sha1)) |
ef07618f | 2514 | die("expected sha1, got garbage:\n %s", line); |
b5d97e6b | 2515 | |
bc32fed5 JH |
2516 | add_preferred_base_object(line+41); |
2517 | add_object_entry(sha1, 0, line+41, 0); | |
c323ac7d | 2518 | } |
b5d97e6b JH |
2519 | } |
2520 | ||
08cdfb13 JH |
2521 | #define OBJECT_ADDED (1u<<20) |
2522 | ||
11c211fa | 2523 | static void show_commit(struct commit *commit, void *data) |
b5d97e6b | 2524 | { |
ed1c9977 | 2525 | add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL, 0); |
08cdfb13 | 2526 | commit->object.flags |= OBJECT_ADDED; |
7cc8f971 VM |
2527 | |
2528 | if (write_bitmap_index) | |
2529 | index_commit_for_bitmap(commit); | |
b5d97e6b JH |
2530 | } |
2531 | ||
de1e67d0 | 2532 | static void show_object(struct object *obj, const char *name, void *data) |
b5d97e6b | 2533 | { |
8d2dfc49 | 2534 | add_preferred_base_object(name); |
ed1c9977 | 2535 | add_object_entry(obj->oid.hash, obj->type, name, 0); |
8d2dfc49 | 2536 | obj->flags |= OBJECT_ADDED; |
b5d97e6b JH |
2537 | } |
2538 | ||
8d1d8f83 JH |
2539 | static void show_edge(struct commit *commit) |
2540 | { | |
ed1c9977 | 2541 | add_preferred_base(commit->object.oid.hash); |
8d1d8f83 JH |
2542 | } |
2543 | ||
08cdfb13 JH |
2544 | struct in_pack_object { |
2545 | off_t offset; | |
2546 | struct object *object; | |
2547 | }; | |
2548 | ||
2549 | struct in_pack { | |
2550 | int alloc; | |
2551 | int nr; | |
2552 | struct in_pack_object *array; | |
2553 | }; | |
2554 | ||
2555 | static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack) | |
2556 | { | |
ed1c9977 | 2557 | in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p); |
08cdfb13 JH |
2558 | in_pack->array[in_pack->nr].object = object; |
2559 | in_pack->nr++; | |
2560 | } | |
2561 | ||
2562 | /* | |
2563 | * Compare the objects in the offset order, in order to emulate the | |
f18d244a | 2564 | * "git rev-list --objects" output that produced the pack originally. |
08cdfb13 JH |
2565 | */ |
2566 | static int ofscmp(const void *a_, const void *b_) | |
2567 | { | |
2568 | struct in_pack_object *a = (struct in_pack_object *)a_; | |
2569 | struct in_pack_object *b = (struct in_pack_object *)b_; | |
2570 | ||
2571 | if (a->offset < b->offset) | |
2572 | return -1; | |
2573 | else if (a->offset > b->offset) | |
2574 | return 1; | |
2575 | else | |
f2fd0760 | 2576 | return oidcmp(&a->object->oid, &b->object->oid); |
08cdfb13 JH |
2577 | } |
2578 | ||
2579 | static void add_objects_in_unpacked_packs(struct rev_info *revs) | |
2580 | { | |
2581 | struct packed_git *p; | |
2582 | struct in_pack in_pack; | |
2583 | uint32_t i; | |
2584 | ||
2585 | memset(&in_pack, 0, sizeof(in_pack)); | |
2586 | ||
2587 | for (p = packed_git; p; p = p->next) { | |
2588 | const unsigned char *sha1; | |
2589 | struct object *o; | |
2590 | ||
79bc4c71 | 2591 | if (!p->pack_local || p->pack_keep) |
08cdfb13 JH |
2592 | continue; |
2593 | if (open_pack_index(p)) | |
2594 | die("cannot open pack index"); | |
2595 | ||
2596 | ALLOC_GROW(in_pack.array, | |
2597 | in_pack.nr + p->num_objects, | |
2598 | in_pack.alloc); | |
2599 | ||
2600 | for (i = 0; i < p->num_objects; i++) { | |
2601 | sha1 = nth_packed_object_sha1(p, i); | |
2602 | o = lookup_unknown_object(sha1); | |
2603 | if (!(o->flags & OBJECT_ADDED)) | |
2604 | mark_in_pack_object(o, p, &in_pack); | |
2605 | o->flags |= OBJECT_ADDED; | |
2606 | } | |
2607 | } | |
2608 | ||
2609 | if (in_pack.nr) { | |
9ed0d8d6 | 2610 | QSORT(in_pack.array, in_pack.nr, ofscmp); |
08cdfb13 JH |
2611 | for (i = 0; i < in_pack.nr; i++) { |
2612 | struct object *o = in_pack.array[i].object; | |
ed1c9977 | 2613 | add_object_entry(o->oid.hash, o->type, "", 0); |
08cdfb13 JH |
2614 | } |
2615 | } | |
2616 | free(in_pack.array); | |
2617 | } | |
2618 | ||
76c1d9a0 | 2619 | static int add_loose_object(const struct object_id *oid, const char *path, |
e26a8c47 JK |
2620 | void *data) |
2621 | { | |
76c1d9a0 | 2622 | enum object_type type = sha1_object_info(oid->hash, NULL); |
e26a8c47 JK |
2623 | |
2624 | if (type < 0) { | |
2625 | warning("loose object at %s could not be examined", path); | |
2626 | return 0; | |
2627 | } | |
2628 | ||
76c1d9a0 | 2629 | add_object_entry(oid->hash, type, "", 0); |
e26a8c47 JK |
2630 | return 0; |
2631 | } | |
2632 | ||
2633 | /* | |
2634 | * We actually don't even have to worry about reachability here. | |
2635 | * add_object_entry will weed out duplicates, so we just add every | |
2636 | * loose object we find. | |
2637 | */ | |
2638 | static void add_unreachable_loose_objects(void) | |
2639 | { | |
2640 | for_each_loose_file_in_objdir(get_object_directory(), | |
2641 | add_loose_object, | |
2642 | NULL, NULL, NULL); | |
2643 | } | |
2644 | ||
094085e3 BC |
2645 | static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1) |
2646 | { | |
2647 | static struct packed_git *last_found = (void *)1; | |
2648 | struct packed_git *p; | |
2649 | ||
2650 | p = (last_found != (void *)1) ? last_found : packed_git; | |
2651 | ||
2652 | while (p) { | |
2653 | if ((!p->pack_local || p->pack_keep) && | |
2654 | find_pack_entry_one(sha1, p)) { | |
2655 | last_found = p; | |
2656 | return 1; | |
2657 | } | |
2658 | if (p == last_found) | |
2659 | p = packed_git; | |
2660 | else | |
2661 | p = p->next; | |
2662 | if (p == last_found) | |
2663 | p = p->next; | |
2664 | } | |
2665 | return 0; | |
2666 | } | |
2667 | ||
abcb8655 JK |
2668 | /* |
2669 | * Store a list of sha1s that are should not be discarded | |
2670 | * because they are either written too recently, or are | |
2671 | * reachable from another object that was. | |
2672 | * | |
2673 | * This is filled by get_object_list. | |
2674 | */ | |
910650d2 | 2675 | static struct oid_array recent_objects; |
abcb8655 | 2676 | |
4ce3621a | 2677 | static int loosened_object_can_be_discarded(const struct object_id *oid, |
d0d46abc JK |
2678 | unsigned long mtime) |
2679 | { | |
2680 | if (!unpack_unreachable_expiration) | |
2681 | return 0; | |
2682 | if (mtime > unpack_unreachable_expiration) | |
2683 | return 0; | |
910650d2 | 2684 | if (oid_array_lookup(&recent_objects, oid) >= 0) |
abcb8655 | 2685 | return 0; |
d0d46abc JK |
2686 | return 1; |
2687 | } | |
2688 | ||
ca11b212 NP |
2689 | static void loosen_unused_packed_objects(struct rev_info *revs) |
2690 | { | |
2691 | struct packed_git *p; | |
2692 | uint32_t i; | |
4ce3621a | 2693 | struct object_id oid; |
ca11b212 NP |
2694 | |
2695 | for (p = packed_git; p; p = p->next) { | |
79bc4c71 | 2696 | if (!p->pack_local || p->pack_keep) |
ca11b212 NP |
2697 | continue; |
2698 | ||
2699 | if (open_pack_index(p)) | |
2700 | die("cannot open pack index"); | |
2701 | ||
2702 | for (i = 0; i < p->num_objects; i++) { | |
4ce3621a | 2703 | nth_packed_object_oid(&oid, p, i); |
2704 | if (!packlist_find(&to_pack, oid.hash, NULL) && | |
2705 | !has_sha1_pack_kept_or_nonlocal(oid.hash) && | |
2706 | !loosened_object_can_be_discarded(&oid, p->mtime)) | |
2707 | if (force_object_loose(oid.hash, p->mtime)) | |
ca11b212 NP |
2708 | die("unable to force loose object"); |
2709 | } | |
2710 | } | |
2711 | } | |
2712 | ||
69e4b342 | 2713 | /* |
645c432d KS |
2714 | * This tracks any options which pack-reuse code expects to be on, or which a |
2715 | * reader of the pack might not understand, and which would therefore prevent | |
2716 | * blind reuse of what we have on disk. | |
69e4b342 JK |
2717 | */ |
2718 | static int pack_options_allow_reuse(void) | |
2719 | { | |
645c432d | 2720 | return pack_to_stdout && allow_ofs_delta; |
69e4b342 JK |
2721 | } |
2722 | ||
6b8fda2d VM |
2723 | static int get_object_list_from_bitmap(struct rev_info *revs) |
2724 | { | |
2725 | if (prepare_bitmap_walk(revs) < 0) | |
2726 | return -1; | |
2727 | ||
69e4b342 JK |
2728 | if (pack_options_allow_reuse() && |
2729 | !reuse_partial_packfile_from_bitmap( | |
6b8fda2d VM |
2730 | &reuse_packfile, |
2731 | &reuse_packfile_objects, | |
2732 | &reuse_packfile_offset)) { | |
2733 | assert(reuse_packfile_objects); | |
2734 | nr_result += reuse_packfile_objects; | |
78d2214e | 2735 | display_progress(progress_state, nr_result); |
6b8fda2d VM |
2736 | } |
2737 | ||
2738 | traverse_bitmap_commit_list(&add_object_entry_from_bitmap); | |
2739 | return 0; | |
2740 | } | |
2741 | ||
abcb8655 | 2742 | static void record_recent_object(struct object *obj, |
de1e67d0 | 2743 | const char *name, |
abcb8655 JK |
2744 | void *data) |
2745 | { | |
910650d2 | 2746 | oid_array_append(&recent_objects, &obj->oid); |
abcb8655 JK |
2747 | } |
2748 | ||
2749 | static void record_recent_commit(struct commit *commit, void *data) | |
2750 | { | |
910650d2 | 2751 | oid_array_append(&recent_objects, &commit->object.oid); |
abcb8655 JK |
2752 | } |
2753 | ||
8d1d8f83 | 2754 | static void get_object_list(int ac, const char **av) |
b5d97e6b JH |
2755 | { |
2756 | struct rev_info revs; | |
2757 | char line[1000]; | |
b5d97e6b JH |
2758 | int flags = 0; |
2759 | ||
b5d97e6b JH |
2760 | init_revisions(&revs, NULL); |
2761 | save_commit_buffer = 0; | |
b5d97e6b JH |
2762 | setup_revisions(ac, av, &revs, NULL); |
2763 | ||
b790e0f6 NTND |
2764 | /* make sure shallows are read */ |
2765 | is_repository_shallow(); | |
2766 | ||
b5d97e6b JH |
2767 | while (fgets(line, sizeof(line), stdin) != NULL) { |
2768 | int len = strlen(line); | |
872c930d | 2769 | if (len && line[len - 1] == '\n') |
b5d97e6b JH |
2770 | line[--len] = 0; |
2771 | if (!len) | |
2772 | break; | |
2773 | if (*line == '-') { | |
2774 | if (!strcmp(line, "--not")) { | |
2775 | flags ^= UNINTERESTING; | |
7cc8f971 | 2776 | write_bitmap_index = 0; |
b5d97e6b JH |
2777 | continue; |
2778 | } | |
b790e0f6 NTND |
2779 | if (starts_with(line, "--shallow ")) { |
2780 | unsigned char sha1[20]; | |
2781 | if (get_sha1_hex(line + 10, sha1)) | |
2782 | die("not an SHA-1 '%s'", line + 10); | |
2783 | register_shallow(sha1); | |
f7f91086 | 2784 | use_bitmap_index = 0; |
b790e0f6 NTND |
2785 | continue; |
2786 | } | |
b5d97e6b JH |
2787 | die("not a rev '%s'", line); |
2788 | } | |
8e676e8b | 2789 | if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME)) |
b5d97e6b JH |
2790 | die("bad revision '%s'", line); |
2791 | } | |
2792 | ||
6b8fda2d VM |
2793 | if (use_bitmap_index && !get_object_list_from_bitmap(&revs)) |
2794 | return; | |
2795 | ||
3d51e1b5 MK |
2796 | if (prepare_revision_walk(&revs)) |
2797 | die("revision walk setup failed"); | |
e76a5fb4 | 2798 | mark_edges_uninteresting(&revs, show_edge); |
11c211fa | 2799 | traverse_commit_list(&revs, show_commit, show_object, NULL); |
08cdfb13 | 2800 | |
abcb8655 JK |
2801 | if (unpack_unreachable_expiration) { |
2802 | revs.ignore_missing_links = 1; | |
2803 | if (add_unseen_recent_objects_to_traversal(&revs, | |
2804 | unpack_unreachable_expiration)) | |
2805 | die("unable to add recent objects"); | |
2806 | if (prepare_revision_walk(&revs)) | |
2807 | die("revision walk setup failed"); | |
2808 | traverse_commit_list(&revs, record_recent_commit, | |
2809 | record_recent_object, NULL); | |
2810 | } | |
2811 | ||
08cdfb13 JH |
2812 | if (keep_unreachable) |
2813 | add_objects_in_unpacked_packs(&revs); | |
e26a8c47 JK |
2814 | if (pack_loose_unreachable) |
2815 | add_unreachable_loose_objects(); | |
ca11b212 NP |
2816 | if (unpack_unreachable) |
2817 | loosen_unused_packed_objects(&revs); | |
abcb8655 | 2818 | |
910650d2 | 2819 | oid_array_clear(&recent_objects); |
b5d97e6b JH |
2820 | } |
2821 | ||
99fb6e04 NTND |
2822 | static int option_parse_index_version(const struct option *opt, |
2823 | const char *arg, int unset) | |
2824 | { | |
2825 | char *c; | |
2826 | const char *val = arg; | |
2827 | pack_idx_opts.version = strtoul(val, &c, 10); | |
2828 | if (pack_idx_opts.version > 2) | |
2829 | die(_("unsupported index version %s"), val); | |
2830 | if (*c == ',' && c[1]) | |
2831 | pack_idx_opts.off32_limit = strtoul(c+1, &c, 0); | |
2832 | if (*c || pack_idx_opts.off32_limit & 0x80000000) | |
2833 | die(_("bad index version '%s'"), val); | |
2834 | return 0; | |
2835 | } | |
2836 | ||
7e52f566 JK |
2837 | static int option_parse_unpack_unreachable(const struct option *opt, |
2838 | const char *arg, int unset) | |
2839 | { | |
2840 | if (unset) { | |
2841 | unpack_unreachable = 0; | |
2842 | unpack_unreachable_expiration = 0; | |
2843 | } | |
2844 | else { | |
2845 | unpack_unreachable = 1; | |
2846 | if (arg) | |
2847 | unpack_unreachable_expiration = approxidate(arg); | |
2848 | } | |
2849 | return 0; | |
2850 | } | |
2851 | ||
b5d97e6b JH |
2852 | int cmd_pack_objects(int argc, const char **argv, const char *prefix) |
2853 | { | |
b5d97e6b | 2854 | int use_internal_rev_list = 0; |
8d1d8f83 | 2855 | int thin = 0; |
2dacf26d | 2856 | int shallow = 0; |
4f366275 | 2857 | int all_progress_implied = 0; |
edfbb2aa | 2858 | struct argv_array rp = ARGV_ARRAY_INIT; |
99fb6e04 | 2859 | int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0; |
c90f9e13 | 2860 | int rev_list_index = 0; |
99fb6e04 NTND |
2861 | struct option pack_objects_options[] = { |
2862 | OPT_SET_INT('q', "quiet", &progress, | |
4c688120 | 2863 | N_("do not show progress meter"), 0), |
99fb6e04 | 2864 | OPT_SET_INT(0, "progress", &progress, |
4c688120 | 2865 | N_("show progress meter"), 1), |
99fb6e04 | 2866 | OPT_SET_INT(0, "all-progress", &progress, |
4c688120 | 2867 | N_("show progress meter during object writing phase"), 2), |
99fb6e04 NTND |
2868 | OPT_BOOL(0, "all-progress-implied", |
2869 | &all_progress_implied, | |
4c688120 NTND |
2870 | N_("similar to --all-progress when progress meter is shown")), |
2871 | { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"), | |
2872 | N_("write the pack index file in the specified idx format version"), | |
99fb6e04 | 2873 | 0, option_parse_index_version }, |
2a514ed8 CB |
2874 | OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit, |
2875 | N_("maximum size of each output pack file")), | |
99fb6e04 | 2876 | OPT_BOOL(0, "local", &local, |
4c688120 | 2877 | N_("ignore borrowed objects from alternate object store")), |
99fb6e04 | 2878 | OPT_BOOL(0, "incremental", &incremental, |
4c688120 | 2879 | N_("ignore packed objects")), |
99fb6e04 | 2880 | OPT_INTEGER(0, "window", &window, |
4c688120 | 2881 | N_("limit pack window by objects")), |
2a514ed8 CB |
2882 | OPT_MAGNITUDE(0, "window-memory", &window_memory_limit, |
2883 | N_("limit pack window by memory in addition to object limit")), | |
99fb6e04 | 2884 | OPT_INTEGER(0, "depth", &depth, |
4c688120 | 2885 | N_("maximum length of delta chain allowed in the resulting pack")), |
99fb6e04 | 2886 | OPT_BOOL(0, "reuse-delta", &reuse_delta, |
4c688120 | 2887 | N_("reuse existing deltas")), |
99fb6e04 | 2888 | OPT_BOOL(0, "reuse-object", &reuse_object, |
4c688120 | 2889 | N_("reuse existing objects")), |
99fb6e04 | 2890 | OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta, |
4c688120 | 2891 | N_("use OFS_DELTA objects")), |
99fb6e04 | 2892 | OPT_INTEGER(0, "threads", &delta_search_threads, |
4c688120 | 2893 | N_("use threads when searching for best delta matches")), |
99fb6e04 | 2894 | OPT_BOOL(0, "non-empty", &non_empty, |
4c688120 | 2895 | N_("do not create an empty pack output")), |
99fb6e04 | 2896 | OPT_BOOL(0, "revs", &use_internal_rev_list, |
4c688120 | 2897 | N_("read revision arguments from standard input")), |
99fb6e04 | 2898 | { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL, |
4c688120 | 2899 | N_("limit the objects to those that are not yet packed"), |
99fb6e04 NTND |
2900 | PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, |
2901 | { OPTION_SET_INT, 0, "all", &rev_list_all, NULL, | |
4c688120 | 2902 | N_("include objects reachable from any reference"), |
99fb6e04 NTND |
2903 | PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, |
2904 | { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL, | |
4c688120 | 2905 | N_("include objects referred by reflog entries"), |
99fb6e04 | 2906 | PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, |
c90f9e13 JK |
2907 | { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL, |
2908 | N_("include objects referred to by the index"), | |
2909 | PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, | |
99fb6e04 | 2910 | OPT_BOOL(0, "stdout", &pack_to_stdout, |
4c688120 | 2911 | N_("output pack to stdout")), |
99fb6e04 | 2912 | OPT_BOOL(0, "include-tag", &include_tag, |
4c688120 | 2913 | N_("include tag objects that refer to objects to be packed")), |
99fb6e04 | 2914 | OPT_BOOL(0, "keep-unreachable", &keep_unreachable, |
4c688120 | 2915 | N_("keep unreachable objects")), |
e26a8c47 JK |
2916 | OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable, |
2917 | N_("pack loose unreachable objects")), | |
4c688120 NTND |
2918 | { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"), |
2919 | N_("unpack unreachable objects newer than <time>"), | |
7e52f566 | 2920 | PARSE_OPT_OPTARG, option_parse_unpack_unreachable }, |
99fb6e04 | 2921 | OPT_BOOL(0, "thin", &thin, |
4c688120 | 2922 | N_("create thin packs")), |
2dacf26d | 2923 | OPT_BOOL(0, "shallow", &shallow, |
2924 | N_("create packs suitable for shallow fetches")), | |
99fb6e04 | 2925 | OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep, |
4c688120 | 2926 | N_("ignore packs that have companion .keep file")), |
99fb6e04 | 2927 | OPT_INTEGER(0, "compression", &pack_compression_level, |
4c688120 | 2928 | N_("pack compression level")), |
99fb6e04 | 2929 | OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents, |
4c688120 | 2930 | N_("do not hide commits by grafts"), 0), |
6b8fda2d VM |
2931 | OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index, |
2932 | N_("use a bitmap index if available to speed up counting objects")), | |
7cc8f971 VM |
2933 | OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index, |
2934 | N_("write a bitmap index together with the pack index")), | |
99fb6e04 NTND |
2935 | OPT_END(), |
2936 | }; | |
8d1d8f83 | 2937 | |
afc711b8 | 2938 | check_replace_refs = 0; |
dae556bd | 2939 | |
ebcfb379 | 2940 | reset_pack_idx_option(&pack_idx_opts); |
ef90d6d4 | 2941 | git_config(git_pack_config, NULL); |
b5d97e6b JH |
2942 | |
2943 | progress = isatty(2); | |
99fb6e04 NTND |
2944 | argc = parse_options(argc, argv, prefix, pack_objects_options, |
2945 | pack_usage, 0); | |
b5d97e6b | 2946 | |
99fb6e04 NTND |
2947 | if (argc) { |
2948 | base_name = argv[0]; | |
2949 | argc--; | |
b5d97e6b | 2950 | } |
99fb6e04 NTND |
2951 | if (pack_to_stdout != !base_name || argc) |
2952 | usage_with_options(pack_usage, pack_objects_options); | |
b5d97e6b | 2953 | |
edfbb2aa | 2954 | argv_array_push(&rp, "pack-objects"); |
99fb6e04 NTND |
2955 | if (thin) { |
2956 | use_internal_rev_list = 1; | |
2dacf26d | 2957 | argv_array_push(&rp, shallow |
2958 | ? "--objects-edge-aggressive" | |
2959 | : "--objects-edge"); | |
99fb6e04 | 2960 | } else |
edfbb2aa | 2961 | argv_array_push(&rp, "--objects"); |
b5d97e6b | 2962 | |
99fb6e04 NTND |
2963 | if (rev_list_all) { |
2964 | use_internal_rev_list = 1; | |
edfbb2aa | 2965 | argv_array_push(&rp, "--all"); |
99fb6e04 NTND |
2966 | } |
2967 | if (rev_list_reflog) { | |
2968 | use_internal_rev_list = 1; | |
edfbb2aa | 2969 | argv_array_push(&rp, "--reflog"); |
99fb6e04 | 2970 | } |
c90f9e13 JK |
2971 | if (rev_list_index) { |
2972 | use_internal_rev_list = 1; | |
2973 | argv_array_push(&rp, "--indexed-objects"); | |
99fb6e04 NTND |
2974 | } |
2975 | if (rev_list_unpacked) { | |
2976 | use_internal_rev_list = 1; | |
edfbb2aa | 2977 | argv_array_push(&rp, "--unpacked"); |
99fb6e04 | 2978 | } |
b5d97e6b | 2979 | |
99fb6e04 NTND |
2980 | if (!reuse_object) |
2981 | reuse_delta = 0; | |
2982 | if (pack_compression_level == -1) | |
2983 | pack_compression_level = Z_DEFAULT_COMPRESSION; | |
2984 | else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION) | |
2985 | die("bad pack compression level %d", pack_compression_level); | |
0c45d258 JH |
2986 | |
2987 | if (!delta_search_threads) /* --threads=0 means autodetect */ | |
2988 | delta_search_threads = online_cpus(); | |
2989 | ||
99fb6e04 NTND |
2990 | #ifdef NO_PTHREADS |
2991 | if (delta_search_threads != 1) | |
2b34e486 | 2992 | warning("no threads support, ignoring --threads"); |
99fb6e04 | 2993 | #endif |
2b84b5a8 JS |
2994 | if (!pack_to_stdout && !pack_size_limit) |
2995 | pack_size_limit = pack_size_limit_cfg; | |
01c12a23 DH |
2996 | if (pack_to_stdout && pack_size_limit) |
2997 | die("--max-pack-size cannot be used to build a pack for transfer."); | |
07cf0f24 NP |
2998 | if (pack_size_limit && pack_size_limit < 1024*1024) { |
2999 | warning("minimum pack size limit is 1 MiB"); | |
3000 | pack_size_limit = 1024*1024; | |
3001 | } | |
01c12a23 | 3002 | |
8d1d8f83 JH |
3003 | if (!pack_to_stdout && thin) |
3004 | die("--thin cannot be used to build an indexable pack."); | |
b5d97e6b | 3005 | |
ca11b212 NP |
3006 | if (keep_unreachable && unpack_unreachable) |
3007 | die("--keep-unreachable and --unpack-unreachable are incompatible."); | |
b1e757f3 JK |
3008 | if (!rev_list_all || !rev_list_reflog || !rev_list_index) |
3009 | unpack_unreachable_expiration = 0; | |
ca11b212 | 3010 | |
645c432d KS |
3011 | /* |
3012 | * "soft" reasons not to use bitmaps - for on-disk repack by default we want | |
3013 | * | |
3014 | * - to produce good pack (with bitmap index not-yet-packed objects are | |
3015 | * packed in suboptimal order). | |
3016 | * | |
3017 | * - to use more robust pack-generation codepath (avoiding possible | |
3018 | * bugs in bitmap code and possible bitmap index corruption). | |
3019 | */ | |
3020 | if (!pack_to_stdout) | |
3021 | use_bitmap_index_default = 0; | |
3022 | ||
3023 | if (use_bitmap_index < 0) | |
3024 | use_bitmap_index = use_bitmap_index_default; | |
3025 | ||
3026 | /* "hard" reasons not to use bitmaps; these just won't work at all */ | |
3027 | if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow()) | |
6b8fda2d VM |
3028 | use_bitmap_index = 0; |
3029 | ||
7cc8f971 VM |
3030 | if (pack_to_stdout || !rev_list_all) |
3031 | write_bitmap_index = 0; | |
3032 | ||
4f366275 NP |
3033 | if (progress && all_progress_implied) |
3034 | progress = 2; | |
3035 | ||
b5d97e6b | 3036 | prepare_packed_git(); |
56dfeb62 JK |
3037 | if (ignore_packed_keep) { |
3038 | struct packed_git *p; | |
3039 | for (p = packed_git; p; p = p->next) | |
3040 | if (p->pack_local && p->pack_keep) | |
3041 | break; | |
3042 | if (!p) /* no keep-able packs found */ | |
3043 | ignore_packed_keep = 0; | |
3044 | } | |
3045 | if (local) { | |
3046 | /* | |
3047 | * unlike ignore_packed_keep above, we do not want to | |
3048 | * unset "local" based on looking at packs, as it | |
3049 | * also covers non-local objects | |
3050 | */ | |
3051 | struct packed_git *p; | |
3052 | for (p = packed_git; p; p = p->next) { | |
3053 | if (!p->pack_local) { | |
3054 | have_non_local_packs = 1; | |
3055 | break; | |
3056 | } | |
3057 | } | |
3058 | } | |
b5d97e6b | 3059 | |
13aaf148 | 3060 | if (progress) |
754dbc43 | 3061 | progress_state = start_progress(_("Counting objects"), 0); |
b5d97e6b JH |
3062 | if (!use_internal_rev_list) |
3063 | read_object_list_from_stdin(); | |
8d1d8f83 | 3064 | else { |
edfbb2aa JK |
3065 | get_object_list(rp.argc, rp.argv); |
3066 | argv_array_clear(&rp); | |
8d1d8f83 | 3067 | } |
0ef95f72 | 3068 | cleanup_preferred_base(); |
d155254c MH |
3069 | if (include_tag && nr_result) |
3070 | for_each_ref(add_ref_tag, NULL); | |
4d4fcc54 | 3071 | stop_progress(&progress_state); |
96a02f8f | 3072 | |
f0b0af1b | 3073 | if (non_empty && !nr_result) |
1c4a2912 | 3074 | return 0; |
f7ae6a93 NP |
3075 | if (nr_result) |
3076 | prepare_pack(window, depth); | |
d01fb92f | 3077 | write_pack_file(); |
ab7cd7bb | 3078 | if (progress) |
6e1c2344 RJ |
3079 | fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32")," |
3080 | " reused %"PRIu32" (delta %"PRIu32")\n", | |
67c08ce1 | 3081 | written, written_delta, reused, reused_delta); |
c323ac7d LT |
3082 | return 0; |
3083 | } |