]> git.ipfire.org Git - thirdparty/git.git/blob - midx.c
reftable: read reftable files
[thirdparty/git.git] / midx.c
1 #include "cache.h"
2 #include "config.h"
3 #include "csum-file.h"
4 #include "dir.h"
5 #include "lockfile.h"
6 #include "packfile.h"
7 #include "object-store.h"
8 #include "hash-lookup.h"
9 #include "midx.h"
10 #include "progress.h"
11 #include "trace2.h"
12 #include "run-command.h"
13 #include "repository.h"
14 #include "chunk-format.h"
15 #include "pack.h"
16
17 #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
18 #define MIDX_VERSION 1
19 #define MIDX_BYTE_FILE_VERSION 4
20 #define MIDX_BYTE_HASH_VERSION 5
21 #define MIDX_BYTE_NUM_CHUNKS 6
22 #define MIDX_BYTE_NUM_PACKS 8
23 #define MIDX_HEADER_SIZE 12
24 #define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
25
26 #define MIDX_CHUNK_ALIGNMENT 4
27 #define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
28 #define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
29 #define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
30 #define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
31 #define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
32 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
33 #define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
34 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
35 #define MIDX_LARGE_OFFSET_NEEDED 0x80000000
36
37 #define PACK_EXPIRED UINT_MAX
38
39 static uint8_t oid_version(void)
40 {
41 switch (hash_algo_by_ptr(the_hash_algo)) {
42 case GIT_HASH_SHA1:
43 return 1;
44 case GIT_HASH_SHA256:
45 return 2;
46 default:
47 die(_("invalid hash version"));
48 }
49 }
50
51 static const unsigned char *get_midx_checksum(struct multi_pack_index *m)
52 {
53 return m->data + m->data_len - the_hash_algo->rawsz;
54 }
55
56 static char *get_midx_filename(const char *object_dir)
57 {
58 return xstrfmt("%s/pack/multi-pack-index", object_dir);
59 }
60
61 char *get_midx_rev_filename(struct multi_pack_index *m)
62 {
63 return xstrfmt("%s/pack/multi-pack-index-%s.rev",
64 m->object_dir, hash_to_hex(get_midx_checksum(m)));
65 }
66
67 static int midx_read_oid_fanout(const unsigned char *chunk_start,
68 size_t chunk_size, void *data)
69 {
70 struct multi_pack_index *m = data;
71 m->chunk_oid_fanout = (uint32_t *)chunk_start;
72
73 if (chunk_size != 4 * 256) {
74 error(_("multi-pack-index OID fanout is of the wrong size"));
75 return 1;
76 }
77 return 0;
78 }
79
80 struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local)
81 {
82 struct multi_pack_index *m = NULL;
83 int fd;
84 struct stat st;
85 size_t midx_size;
86 void *midx_map = NULL;
87 uint32_t hash_version;
88 char *midx_name = get_midx_filename(object_dir);
89 uint32_t i;
90 const char *cur_pack_name;
91 struct chunkfile *cf = NULL;
92
93 fd = git_open(midx_name);
94
95 if (fd < 0)
96 goto cleanup_fail;
97 if (fstat(fd, &st)) {
98 error_errno(_("failed to read %s"), midx_name);
99 goto cleanup_fail;
100 }
101
102 midx_size = xsize_t(st.st_size);
103
104 if (midx_size < MIDX_MIN_SIZE) {
105 error(_("multi-pack-index file %s is too small"), midx_name);
106 goto cleanup_fail;
107 }
108
109 FREE_AND_NULL(midx_name);
110
111 midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
112 close(fd);
113
114 FLEX_ALLOC_STR(m, object_dir, object_dir);
115 m->data = midx_map;
116 m->data_len = midx_size;
117 m->local = local;
118
119 m->signature = get_be32(m->data);
120 if (m->signature != MIDX_SIGNATURE)
121 die(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
122 m->signature, MIDX_SIGNATURE);
123
124 m->version = m->data[MIDX_BYTE_FILE_VERSION];
125 if (m->version != MIDX_VERSION)
126 die(_("multi-pack-index version %d not recognized"),
127 m->version);
128
129 hash_version = m->data[MIDX_BYTE_HASH_VERSION];
130 if (hash_version != oid_version()) {
131 error(_("multi-pack-index hash version %u does not match version %u"),
132 hash_version, oid_version());
133 goto cleanup_fail;
134 }
135 m->hash_len = the_hash_algo->rawsz;
136
137 m->num_chunks = m->data[MIDX_BYTE_NUM_CHUNKS];
138
139 m->num_packs = get_be32(m->data + MIDX_BYTE_NUM_PACKS);
140
141 cf = init_chunkfile(NULL);
142
143 if (read_table_of_contents(cf, m->data, midx_size,
144 MIDX_HEADER_SIZE, m->num_chunks))
145 goto cleanup_fail;
146
147 if (pair_chunk(cf, MIDX_CHUNKID_PACKNAMES, &m->chunk_pack_names) == CHUNK_NOT_FOUND)
148 die(_("multi-pack-index missing required pack-name chunk"));
149 if (read_chunk(cf, MIDX_CHUNKID_OIDFANOUT, midx_read_oid_fanout, m) == CHUNK_NOT_FOUND)
150 die(_("multi-pack-index missing required OID fanout chunk"));
151 if (pair_chunk(cf, MIDX_CHUNKID_OIDLOOKUP, &m->chunk_oid_lookup) == CHUNK_NOT_FOUND)
152 die(_("multi-pack-index missing required OID lookup chunk"));
153 if (pair_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS, &m->chunk_object_offsets) == CHUNK_NOT_FOUND)
154 die(_("multi-pack-index missing required object offsets chunk"));
155
156 pair_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS, &m->chunk_large_offsets);
157
158 m->num_objects = ntohl(m->chunk_oid_fanout[255]);
159
160 CALLOC_ARRAY(m->pack_names, m->num_packs);
161 CALLOC_ARRAY(m->packs, m->num_packs);
162
163 cur_pack_name = (const char *)m->chunk_pack_names;
164 for (i = 0; i < m->num_packs; i++) {
165 m->pack_names[i] = cur_pack_name;
166
167 cur_pack_name += strlen(cur_pack_name) + 1;
168
169 if (i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0)
170 die(_("multi-pack-index pack names out of order: '%s' before '%s'"),
171 m->pack_names[i - 1],
172 m->pack_names[i]);
173 }
174
175 trace2_data_intmax("midx", the_repository, "load/num_packs", m->num_packs);
176 trace2_data_intmax("midx", the_repository, "load/num_objects", m->num_objects);
177
178 return m;
179
180 cleanup_fail:
181 free(m);
182 free(midx_name);
183 free(cf);
184 if (midx_map)
185 munmap(midx_map, midx_size);
186 if (0 <= fd)
187 close(fd);
188 return NULL;
189 }
190
191 void close_midx(struct multi_pack_index *m)
192 {
193 uint32_t i;
194
195 if (!m)
196 return;
197
198 munmap((unsigned char *)m->data, m->data_len);
199
200 for (i = 0; i < m->num_packs; i++) {
201 if (m->packs[i])
202 m->packs[i]->multi_pack_index = 0;
203 }
204 FREE_AND_NULL(m->packs);
205 FREE_AND_NULL(m->pack_names);
206 }
207
208 int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id)
209 {
210 struct strbuf pack_name = STRBUF_INIT;
211 struct packed_git *p;
212
213 if (pack_int_id >= m->num_packs)
214 die(_("bad pack-int-id: %u (%u total packs)"),
215 pack_int_id, m->num_packs);
216
217 if (m->packs[pack_int_id])
218 return 0;
219
220 strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir,
221 m->pack_names[pack_int_id]);
222
223 p = add_packed_git(pack_name.buf, pack_name.len, m->local);
224 strbuf_release(&pack_name);
225
226 if (!p)
227 return 1;
228
229 p->multi_pack_index = 1;
230 m->packs[pack_int_id] = p;
231 install_packed_git(r, p);
232 list_add_tail(&p->mru, &r->objects->packed_git_mru);
233
234 return 0;
235 }
236
237 int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result)
238 {
239 return bsearch_hash(oid->hash, m->chunk_oid_fanout, m->chunk_oid_lookup,
240 the_hash_algo->rawsz, result);
241 }
242
243 struct object_id *nth_midxed_object_oid(struct object_id *oid,
244 struct multi_pack_index *m,
245 uint32_t n)
246 {
247 if (n >= m->num_objects)
248 return NULL;
249
250 oidread(oid, m->chunk_oid_lookup + m->hash_len * n);
251 return oid;
252 }
253
254 off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
255 {
256 const unsigned char *offset_data;
257 uint32_t offset32;
258
259 offset_data = m->chunk_object_offsets + (off_t)pos * MIDX_CHUNK_OFFSET_WIDTH;
260 offset32 = get_be32(offset_data + sizeof(uint32_t));
261
262 if (m->chunk_large_offsets && offset32 & MIDX_LARGE_OFFSET_NEEDED) {
263 if (sizeof(off_t) < sizeof(uint64_t))
264 die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
265
266 offset32 ^= MIDX_LARGE_OFFSET_NEEDED;
267 return get_be64(m->chunk_large_offsets + sizeof(uint64_t) * offset32);
268 }
269
270 return offset32;
271 }
272
273 uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
274 {
275 return get_be32(m->chunk_object_offsets +
276 (off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
277 }
278
279 static int nth_midxed_pack_entry(struct repository *r,
280 struct multi_pack_index *m,
281 struct pack_entry *e,
282 uint32_t pos)
283 {
284 uint32_t pack_int_id;
285 struct packed_git *p;
286
287 if (pos >= m->num_objects)
288 return 0;
289
290 pack_int_id = nth_midxed_pack_int_id(m, pos);
291
292 if (prepare_midx_pack(r, m, pack_int_id))
293 return 0;
294 p = m->packs[pack_int_id];
295
296 /*
297 * We are about to tell the caller where they can locate the
298 * requested object. We better make sure the packfile is
299 * still here and can be accessed before supplying that
300 * answer, as it may have been deleted since the MIDX was
301 * loaded!
302 */
303 if (!is_pack_valid(p))
304 return 0;
305
306 if (p->num_bad_objects) {
307 uint32_t i;
308 struct object_id oid;
309 nth_midxed_object_oid(&oid, m, pos);
310 for (i = 0; i < p->num_bad_objects; i++)
311 if (hasheq(oid.hash,
312 p->bad_object_sha1 + the_hash_algo->rawsz * i))
313 return 0;
314 }
315
316 e->offset = nth_midxed_offset(m, pos);
317 e->p = p;
318
319 return 1;
320 }
321
322 int fill_midx_entry(struct repository * r,
323 const struct object_id *oid,
324 struct pack_entry *e,
325 struct multi_pack_index *m)
326 {
327 uint32_t pos;
328
329 if (!bsearch_midx(oid, m, &pos))
330 return 0;
331
332 return nth_midxed_pack_entry(r, m, e, pos);
333 }
334
335 /* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
336 static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
337 const char *idx_name)
338 {
339 /* Skip past any initial matching prefix. */
340 while (*idx_name && *idx_name == *idx_or_pack_name) {
341 idx_name++;
342 idx_or_pack_name++;
343 }
344
345 /*
346 * If we didn't match completely, we may have matched "pack-1234." and
347 * be left with "idx" and "pack" respectively, which is also OK. We do
348 * not have to check for "idx" and "idx", because that would have been
349 * a complete match (and in that case these strcmps will be false, but
350 * we'll correctly return 0 from the final strcmp() below.
351 *
352 * Technically this matches "fooidx" and "foopack", but we'd never have
353 * such names in the first place.
354 */
355 if (!strcmp(idx_name, "idx") && !strcmp(idx_or_pack_name, "pack"))
356 return 0;
357
358 /*
359 * This not only checks for a complete match, but also orders based on
360 * the first non-identical character, which means our ordering will
361 * match a raw strcmp(). That makes it OK to use this to binary search
362 * a naively-sorted list.
363 */
364 return strcmp(idx_or_pack_name, idx_name);
365 }
366
367 int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name)
368 {
369 uint32_t first = 0, last = m->num_packs;
370
371 while (first < last) {
372 uint32_t mid = first + (last - first) / 2;
373 const char *current;
374 int cmp;
375
376 current = m->pack_names[mid];
377 cmp = cmp_idx_or_pack_name(idx_or_pack_name, current);
378 if (!cmp)
379 return 1;
380 if (cmp > 0) {
381 first = mid + 1;
382 continue;
383 }
384 last = mid;
385 }
386
387 return 0;
388 }
389
390 int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local)
391 {
392 struct multi_pack_index *m;
393 struct multi_pack_index *m_search;
394
395 prepare_repo_settings(r);
396 if (!r->settings.core_multi_pack_index)
397 return 0;
398
399 for (m_search = r->objects->multi_pack_index; m_search; m_search = m_search->next)
400 if (!strcmp(object_dir, m_search->object_dir))
401 return 1;
402
403 m = load_multi_pack_index(object_dir, local);
404
405 if (m) {
406 struct multi_pack_index *mp = r->objects->multi_pack_index;
407 if (mp) {
408 m->next = mp->next;
409 mp->next = m;
410 } else
411 r->objects->multi_pack_index = m;
412 return 1;
413 }
414
415 return 0;
416 }
417
418 static size_t write_midx_header(struct hashfile *f,
419 unsigned char num_chunks,
420 uint32_t num_packs)
421 {
422 hashwrite_be32(f, MIDX_SIGNATURE);
423 hashwrite_u8(f, MIDX_VERSION);
424 hashwrite_u8(f, oid_version());
425 hashwrite_u8(f, num_chunks);
426 hashwrite_u8(f, 0); /* unused */
427 hashwrite_be32(f, num_packs);
428
429 return MIDX_HEADER_SIZE;
430 }
431
432 struct pack_info {
433 uint32_t orig_pack_int_id;
434 char *pack_name;
435 struct packed_git *p;
436 unsigned expired : 1;
437 };
438
439 static int pack_info_compare(const void *_a, const void *_b)
440 {
441 struct pack_info *a = (struct pack_info *)_a;
442 struct pack_info *b = (struct pack_info *)_b;
443 return strcmp(a->pack_name, b->pack_name);
444 }
445
446 static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
447 {
448 const char *pack_name = _va;
449 const struct pack_info *compar = _vb;
450
451 return cmp_idx_or_pack_name(pack_name, compar->pack_name);
452 }
453
454 struct write_midx_context {
455 struct pack_info *info;
456 uint32_t nr;
457 uint32_t alloc;
458 struct multi_pack_index *m;
459 struct progress *progress;
460 unsigned pack_paths_checked;
461
462 struct pack_midx_entry *entries;
463 uint32_t entries_nr;
464
465 uint32_t *pack_perm;
466 uint32_t *pack_order;
467 unsigned large_offsets_needed:1;
468 uint32_t num_large_offsets;
469
470 int preferred_pack_idx;
471 };
472
473 static void add_pack_to_midx(const char *full_path, size_t full_path_len,
474 const char *file_name, void *data)
475 {
476 struct write_midx_context *ctx = data;
477
478 if (ends_with(file_name, ".idx")) {
479 display_progress(ctx->progress, ++ctx->pack_paths_checked);
480 if (ctx->m && midx_contains_pack(ctx->m, file_name))
481 return;
482
483 ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
484
485 ctx->info[ctx->nr].p = add_packed_git(full_path,
486 full_path_len,
487 0);
488
489 if (!ctx->info[ctx->nr].p) {
490 warning(_("failed to add packfile '%s'"),
491 full_path);
492 return;
493 }
494
495 if (open_pack_index(ctx->info[ctx->nr].p)) {
496 warning(_("failed to open pack-index '%s'"),
497 full_path);
498 close_pack(ctx->info[ctx->nr].p);
499 FREE_AND_NULL(ctx->info[ctx->nr].p);
500 return;
501 }
502
503 ctx->info[ctx->nr].pack_name = xstrdup(file_name);
504 ctx->info[ctx->nr].orig_pack_int_id = ctx->nr;
505 ctx->info[ctx->nr].expired = 0;
506 ctx->nr++;
507 }
508 }
509
510 struct pack_midx_entry {
511 struct object_id oid;
512 uint32_t pack_int_id;
513 time_t pack_mtime;
514 uint64_t offset;
515 unsigned preferred : 1;
516 };
517
518 static int midx_oid_compare(const void *_a, const void *_b)
519 {
520 const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
521 const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
522 int cmp = oidcmp(&a->oid, &b->oid);
523
524 if (cmp)
525 return cmp;
526
527 /* Sort objects in a preferred pack first when multiple copies exist. */
528 if (a->preferred > b->preferred)
529 return -1;
530 if (a->preferred < b->preferred)
531 return 1;
532
533 if (a->pack_mtime > b->pack_mtime)
534 return -1;
535 else if (a->pack_mtime < b->pack_mtime)
536 return 1;
537
538 return a->pack_int_id - b->pack_int_id;
539 }
540
541 static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
542 struct pack_midx_entry *e,
543 uint32_t pos)
544 {
545 if (pos >= m->num_objects)
546 return 1;
547
548 nth_midxed_object_oid(&e->oid, m, pos);
549 e->pack_int_id = nth_midxed_pack_int_id(m, pos);
550 e->offset = nth_midxed_offset(m, pos);
551
552 /* consider objects in midx to be from "old" packs */
553 e->pack_mtime = 0;
554 return 0;
555 }
556
557 static void fill_pack_entry(uint32_t pack_int_id,
558 struct packed_git *p,
559 uint32_t cur_object,
560 struct pack_midx_entry *entry,
561 int preferred)
562 {
563 if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
564 die(_("failed to locate object %d in packfile"), cur_object);
565
566 entry->pack_int_id = pack_int_id;
567 entry->pack_mtime = p->mtime;
568
569 entry->offset = nth_packed_object_offset(p, cur_object);
570 entry->preferred = !!preferred;
571 }
572
573 /*
574 * It is possible to artificially get into a state where there are many
575 * duplicate copies of objects. That can create high memory pressure if
576 * we are to create a list of all objects before de-duplication. To reduce
577 * this memory pressure without a significant performance drop, automatically
578 * group objects by the first byte of their object id. Use the IDX fanout
579 * tables to group the data, copy to a local array, then sort.
580 *
581 * Copy only the de-duplicated entries (selected by most-recent modified time
582 * of a packfile containing the object).
583 */
584 static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
585 struct pack_info *info,
586 uint32_t nr_packs,
587 uint32_t *nr_objects,
588 int preferred_pack)
589 {
590 uint32_t cur_fanout, cur_pack, cur_object;
591 uint32_t alloc_fanout, alloc_objects, total_objects = 0;
592 struct pack_midx_entry *entries_by_fanout = NULL;
593 struct pack_midx_entry *deduplicated_entries = NULL;
594 uint32_t start_pack = m ? m->num_packs : 0;
595
596 for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
597 total_objects += info[cur_pack].p->num_objects;
598
599 /*
600 * As we de-duplicate by fanout value, we expect the fanout
601 * slices to be evenly distributed, with some noise. Hence,
602 * allocate slightly more than one 256th.
603 */
604 alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16;
605
606 ALLOC_ARRAY(entries_by_fanout, alloc_fanout);
607 ALLOC_ARRAY(deduplicated_entries, alloc_objects);
608 *nr_objects = 0;
609
610 for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
611 uint32_t nr_fanout = 0;
612
613 if (m) {
614 uint32_t start = 0, end;
615
616 if (cur_fanout)
617 start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
618 end = ntohl(m->chunk_oid_fanout[cur_fanout]);
619
620 for (cur_object = start; cur_object < end; cur_object++) {
621 ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
622 nth_midxed_pack_midx_entry(m,
623 &entries_by_fanout[nr_fanout],
624 cur_object);
625 if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack)
626 entries_by_fanout[nr_fanout].preferred = 1;
627 else
628 entries_by_fanout[nr_fanout].preferred = 0;
629 nr_fanout++;
630 }
631 }
632
633 for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
634 uint32_t start = 0, end;
635 int preferred = cur_pack == preferred_pack;
636
637 if (cur_fanout)
638 start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
639 end = get_pack_fanout(info[cur_pack].p, cur_fanout);
640
641 for (cur_object = start; cur_object < end; cur_object++) {
642 ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
643 fill_pack_entry(cur_pack,
644 info[cur_pack].p,
645 cur_object,
646 &entries_by_fanout[nr_fanout],
647 preferred);
648 nr_fanout++;
649 }
650 }
651
652 QSORT(entries_by_fanout, nr_fanout, midx_oid_compare);
653
654 /*
655 * The batch is now sorted by OID and then mtime (descending).
656 * Take only the first duplicate.
657 */
658 for (cur_object = 0; cur_object < nr_fanout; cur_object++) {
659 if (cur_object && oideq(&entries_by_fanout[cur_object - 1].oid,
660 &entries_by_fanout[cur_object].oid))
661 continue;
662
663 ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects);
664 memcpy(&deduplicated_entries[*nr_objects],
665 &entries_by_fanout[cur_object],
666 sizeof(struct pack_midx_entry));
667 (*nr_objects)++;
668 }
669 }
670
671 free(entries_by_fanout);
672 return deduplicated_entries;
673 }
674
675 static int write_midx_pack_names(struct hashfile *f, void *data)
676 {
677 struct write_midx_context *ctx = data;
678 uint32_t i;
679 unsigned char padding[MIDX_CHUNK_ALIGNMENT];
680 size_t written = 0;
681
682 for (i = 0; i < ctx->nr; i++) {
683 size_t writelen;
684
685 if (ctx->info[i].expired)
686 continue;
687
688 if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
689 BUG("incorrect pack-file order: %s before %s",
690 ctx->info[i - 1].pack_name,
691 ctx->info[i].pack_name);
692
693 writelen = strlen(ctx->info[i].pack_name) + 1;
694 hashwrite(f, ctx->info[i].pack_name, writelen);
695 written += writelen;
696 }
697
698 /* add padding to be aligned */
699 i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
700 if (i < MIDX_CHUNK_ALIGNMENT) {
701 memset(padding, 0, sizeof(padding));
702 hashwrite(f, padding, i);
703 }
704
705 return 0;
706 }
707
708 static int write_midx_oid_fanout(struct hashfile *f,
709 void *data)
710 {
711 struct write_midx_context *ctx = data;
712 struct pack_midx_entry *list = ctx->entries;
713 struct pack_midx_entry *last = ctx->entries + ctx->entries_nr;
714 uint32_t count = 0;
715 uint32_t i;
716
717 /*
718 * Write the first-level table (the list is sorted,
719 * but we use a 256-entry lookup to be able to avoid
720 * having to do eight extra binary search iterations).
721 */
722 for (i = 0; i < 256; i++) {
723 struct pack_midx_entry *next = list;
724
725 while (next < last && next->oid.hash[0] == i) {
726 count++;
727 next++;
728 }
729
730 hashwrite_be32(f, count);
731 list = next;
732 }
733
734 return 0;
735 }
736
737 static int write_midx_oid_lookup(struct hashfile *f,
738 void *data)
739 {
740 struct write_midx_context *ctx = data;
741 unsigned char hash_len = the_hash_algo->rawsz;
742 struct pack_midx_entry *list = ctx->entries;
743 uint32_t i;
744
745 for (i = 0; i < ctx->entries_nr; i++) {
746 struct pack_midx_entry *obj = list++;
747
748 if (i < ctx->entries_nr - 1) {
749 struct pack_midx_entry *next = list;
750 if (oidcmp(&obj->oid, &next->oid) >= 0)
751 BUG("OIDs not in order: %s >= %s",
752 oid_to_hex(&obj->oid),
753 oid_to_hex(&next->oid));
754 }
755
756 hashwrite(f, obj->oid.hash, (int)hash_len);
757 }
758
759 return 0;
760 }
761
762 static int write_midx_object_offsets(struct hashfile *f,
763 void *data)
764 {
765 struct write_midx_context *ctx = data;
766 struct pack_midx_entry *list = ctx->entries;
767 uint32_t i, nr_large_offset = 0;
768
769 for (i = 0; i < ctx->entries_nr; i++) {
770 struct pack_midx_entry *obj = list++;
771
772 if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
773 BUG("object %s is in an expired pack with int-id %d",
774 oid_to_hex(&obj->oid),
775 obj->pack_int_id);
776
777 hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
778
779 if (ctx->large_offsets_needed && obj->offset >> 31)
780 hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
781 else if (!ctx->large_offsets_needed && obj->offset >> 32)
782 BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
783 oid_to_hex(&obj->oid),
784 obj->offset);
785 else
786 hashwrite_be32(f, (uint32_t)obj->offset);
787 }
788
789 return 0;
790 }
791
792 static int write_midx_large_offsets(struct hashfile *f,
793 void *data)
794 {
795 struct write_midx_context *ctx = data;
796 struct pack_midx_entry *list = ctx->entries;
797 struct pack_midx_entry *end = ctx->entries + ctx->entries_nr;
798 uint32_t nr_large_offset = ctx->num_large_offsets;
799
800 while (nr_large_offset) {
801 struct pack_midx_entry *obj;
802 uint64_t offset;
803
804 if (list >= end)
805 BUG("too many large-offset objects");
806
807 obj = list++;
808 offset = obj->offset;
809
810 if (!(offset >> 31))
811 continue;
812
813 hashwrite_be64(f, offset);
814
815 nr_large_offset--;
816 }
817
818 return 0;
819 }
820
821 struct midx_pack_order_data {
822 uint32_t nr;
823 uint32_t pack;
824 off_t offset;
825 };
826
827 static int midx_pack_order_cmp(const void *va, const void *vb)
828 {
829 const struct midx_pack_order_data *a = va, *b = vb;
830 if (a->pack < b->pack)
831 return -1;
832 else if (a->pack > b->pack)
833 return 1;
834 else if (a->offset < b->offset)
835 return -1;
836 else if (a->offset > b->offset)
837 return 1;
838 else
839 return 0;
840 }
841
842 static uint32_t *midx_pack_order(struct write_midx_context *ctx)
843 {
844 struct midx_pack_order_data *data;
845 uint32_t *pack_order;
846 uint32_t i;
847
848 ALLOC_ARRAY(data, ctx->entries_nr);
849 for (i = 0; i < ctx->entries_nr; i++) {
850 struct pack_midx_entry *e = &ctx->entries[i];
851 data[i].nr = i;
852 data[i].pack = ctx->pack_perm[e->pack_int_id];
853 if (!e->preferred)
854 data[i].pack |= (1U << 31);
855 data[i].offset = e->offset;
856 }
857
858 QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
859
860 ALLOC_ARRAY(pack_order, ctx->entries_nr);
861 for (i = 0; i < ctx->entries_nr; i++)
862 pack_order[i] = data[i].nr;
863 free(data);
864
865 return pack_order;
866 }
867
868 static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
869 struct write_midx_context *ctx)
870 {
871 struct strbuf buf = STRBUF_INIT;
872 const char *tmp_file;
873
874 strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
875
876 tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
877 midx_hash, WRITE_REV);
878
879 if (finalize_object_file(tmp_file, buf.buf))
880 die(_("cannot store reverse index file"));
881
882 strbuf_release(&buf);
883 }
884
885 static void clear_midx_files_ext(struct repository *r, const char *ext,
886 unsigned char *keep_hash);
887
888 static int midx_checksum_valid(struct multi_pack_index *m)
889 {
890 return hashfile_checksum_valid(m->data, m->data_len);
891 }
892
893 static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
894 struct string_list *packs_to_drop,
895 const char *preferred_pack_name,
896 unsigned flags)
897 {
898 char *midx_name;
899 unsigned char midx_hash[GIT_MAX_RAWSZ];
900 uint32_t i;
901 struct hashfile *f = NULL;
902 struct lock_file lk;
903 struct write_midx_context ctx = { 0 };
904 int pack_name_concat_len = 0;
905 int dropped_packs = 0;
906 int result = 0;
907 struct chunkfile *cf;
908
909 midx_name = get_midx_filename(object_dir);
910 if (safe_create_leading_directories(midx_name))
911 die_errno(_("unable to create leading directories of %s"),
912 midx_name);
913
914 if (m)
915 ctx.m = m;
916 else
917 ctx.m = load_multi_pack_index(object_dir, 1);
918
919 if (ctx.m && !midx_checksum_valid(ctx.m)) {
920 warning(_("ignoring existing multi-pack-index; checksum mismatch"));
921 ctx.m = NULL;
922 }
923
924 ctx.nr = 0;
925 ctx.alloc = ctx.m ? ctx.m->num_packs : 16;
926 ctx.info = NULL;
927 ALLOC_ARRAY(ctx.info, ctx.alloc);
928
929 if (ctx.m) {
930 for (i = 0; i < ctx.m->num_packs; i++) {
931 ALLOC_GROW(ctx.info, ctx.nr + 1, ctx.alloc);
932
933 ctx.info[ctx.nr].orig_pack_int_id = i;
934 ctx.info[ctx.nr].pack_name = xstrdup(ctx.m->pack_names[i]);
935 ctx.info[ctx.nr].p = NULL;
936 ctx.info[ctx.nr].expired = 0;
937 ctx.nr++;
938 }
939 }
940
941 ctx.pack_paths_checked = 0;
942 if (flags & MIDX_PROGRESS)
943 ctx.progress = start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
944 else
945 ctx.progress = NULL;
946
947 for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
948 stop_progress(&ctx.progress);
949
950 if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop)
951 goto cleanup;
952
953 ctx.preferred_pack_idx = -1;
954 if (preferred_pack_name) {
955 for (i = 0; i < ctx.nr; i++) {
956 if (!cmp_idx_or_pack_name(preferred_pack_name,
957 ctx.info[i].pack_name)) {
958 ctx.preferred_pack_idx = i;
959 break;
960 }
961 }
962 }
963
964 ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
965 ctx.preferred_pack_idx);
966
967 ctx.large_offsets_needed = 0;
968 for (i = 0; i < ctx.entries_nr; i++) {
969 if (ctx.entries[i].offset > 0x7fffffff)
970 ctx.num_large_offsets++;
971 if (ctx.entries[i].offset > 0xffffffff)
972 ctx.large_offsets_needed = 1;
973 }
974
975 QSORT(ctx.info, ctx.nr, pack_info_compare);
976
977 if (packs_to_drop && packs_to_drop->nr) {
978 int drop_index = 0;
979 int missing_drops = 0;
980
981 for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
982 int cmp = strcmp(ctx.info[i].pack_name,
983 packs_to_drop->items[drop_index].string);
984
985 if (!cmp) {
986 drop_index++;
987 ctx.info[i].expired = 1;
988 } else if (cmp > 0) {
989 error(_("did not see pack-file %s to drop"),
990 packs_to_drop->items[drop_index].string);
991 drop_index++;
992 missing_drops++;
993 i--;
994 } else {
995 ctx.info[i].expired = 0;
996 }
997 }
998
999 if (missing_drops) {
1000 result = 1;
1001 goto cleanup;
1002 }
1003 }
1004
1005 /*
1006 * pack_perm stores a permutation between pack-int-ids from the
1007 * previous multi-pack-index to the new one we are writing:
1008 *
1009 * pack_perm[old_id] = new_id
1010 */
1011 ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
1012 for (i = 0; i < ctx.nr; i++) {
1013 if (ctx.info[i].expired) {
1014 dropped_packs++;
1015 ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
1016 } else {
1017 ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
1018 }
1019 }
1020
1021 for (i = 0; i < ctx.nr; i++) {
1022 if (!ctx.info[i].expired)
1023 pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
1024 }
1025
1026 /* Check that the preferred pack wasn't expired (if given). */
1027 if (preferred_pack_name) {
1028 struct pack_info *preferred = bsearch(preferred_pack_name,
1029 ctx.info, ctx.nr,
1030 sizeof(*ctx.info),
1031 idx_or_pack_name_cmp);
1032
1033 if (!preferred)
1034 warning(_("unknown preferred pack: '%s'"),
1035 preferred_pack_name);
1036 else {
1037 uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
1038 if (perm == PACK_EXPIRED)
1039 warning(_("preferred pack '%s' is expired"),
1040 preferred_pack_name);
1041 }
1042 }
1043
1044 if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
1045 pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
1046 (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
1047
1048 hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
1049 f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
1050
1051 if (ctx.m)
1052 close_midx(ctx.m);
1053
1054 if (ctx.nr - dropped_packs == 0) {
1055 error(_("no pack files to index."));
1056 result = 1;
1057 goto cleanup;
1058 }
1059
1060 cf = init_chunkfile(f);
1061
1062 add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
1063 write_midx_pack_names);
1064 add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
1065 write_midx_oid_fanout);
1066 add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
1067 (size_t)ctx.entries_nr * the_hash_algo->rawsz,
1068 write_midx_oid_lookup);
1069 add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
1070 (size_t)ctx.entries_nr * MIDX_CHUNK_OFFSET_WIDTH,
1071 write_midx_object_offsets);
1072
1073 if (ctx.large_offsets_needed)
1074 add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
1075 (size_t)ctx.num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH,
1076 write_midx_large_offsets);
1077
1078 write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
1079 write_chunkfile(cf, &ctx);
1080
1081 finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
1082 free_chunkfile(cf);
1083
1084 if (flags & MIDX_WRITE_REV_INDEX)
1085 ctx.pack_order = midx_pack_order(&ctx);
1086
1087 if (flags & MIDX_WRITE_REV_INDEX)
1088 write_midx_reverse_index(midx_name, midx_hash, &ctx);
1089 clear_midx_files_ext(the_repository, ".rev", midx_hash);
1090
1091 commit_lock_file(&lk);
1092
1093 cleanup:
1094 for (i = 0; i < ctx.nr; i++) {
1095 if (ctx.info[i].p) {
1096 close_pack(ctx.info[i].p);
1097 free(ctx.info[i].p);
1098 }
1099 free(ctx.info[i].pack_name);
1100 }
1101
1102 free(ctx.info);
1103 free(ctx.entries);
1104 free(ctx.pack_perm);
1105 free(ctx.pack_order);
1106 free(midx_name);
1107 return result;
1108 }
1109
1110 int write_midx_file(const char *object_dir,
1111 const char *preferred_pack_name,
1112 unsigned flags)
1113 {
1114 return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
1115 flags);
1116 }
1117
1118 struct clear_midx_data {
1119 char *keep;
1120 const char *ext;
1121 };
1122
1123 static void clear_midx_file_ext(const char *full_path, size_t full_path_len,
1124 const char *file_name, void *_data)
1125 {
1126 struct clear_midx_data *data = _data;
1127
1128 if (!(starts_with(file_name, "multi-pack-index-") &&
1129 ends_with(file_name, data->ext)))
1130 return;
1131 if (data->keep && !strcmp(data->keep, file_name))
1132 return;
1133
1134 if (unlink(full_path))
1135 die_errno(_("failed to remove %s"), full_path);
1136 }
1137
1138 static void clear_midx_files_ext(struct repository *r, const char *ext,
1139 unsigned char *keep_hash)
1140 {
1141 struct clear_midx_data data;
1142 memset(&data, 0, sizeof(struct clear_midx_data));
1143
1144 if (keep_hash)
1145 data.keep = xstrfmt("multi-pack-index-%s%s",
1146 hash_to_hex(keep_hash), ext);
1147 data.ext = ext;
1148
1149 for_each_file_in_pack_dir(r->objects->odb->path,
1150 clear_midx_file_ext,
1151 &data);
1152
1153 free(data.keep);
1154 }
1155
1156 void clear_midx_file(struct repository *r)
1157 {
1158 char *midx = get_midx_filename(r->objects->odb->path);
1159
1160 if (r->objects && r->objects->multi_pack_index) {
1161 close_midx(r->objects->multi_pack_index);
1162 r->objects->multi_pack_index = NULL;
1163 }
1164
1165 if (remove_path(midx))
1166 die(_("failed to clear multi-pack-index at %s"), midx);
1167
1168 clear_midx_files_ext(r, ".rev", NULL);
1169
1170 free(midx);
1171 }
1172
1173 static int verify_midx_error;
1174
1175 __attribute__((format (printf, 1, 2)))
1176 static void midx_report(const char *fmt, ...)
1177 {
1178 va_list ap;
1179 verify_midx_error = 1;
1180 va_start(ap, fmt);
1181 vfprintf(stderr, fmt, ap);
1182 fprintf(stderr, "\n");
1183 va_end(ap);
1184 }
1185
1186 struct pair_pos_vs_id
1187 {
1188 uint32_t pos;
1189 uint32_t pack_int_id;
1190 };
1191
1192 static int compare_pair_pos_vs_id(const void *_a, const void *_b)
1193 {
1194 struct pair_pos_vs_id *a = (struct pair_pos_vs_id *)_a;
1195 struct pair_pos_vs_id *b = (struct pair_pos_vs_id *)_b;
1196
1197 return b->pack_int_id - a->pack_int_id;
1198 }
1199
1200 /*
1201 * Limit calls to display_progress() for performance reasons.
1202 * The interval here was arbitrarily chosen.
1203 */
1204 #define SPARSE_PROGRESS_INTERVAL (1 << 12)
1205 #define midx_display_sparse_progress(progress, n) \
1206 do { \
1207 uint64_t _n = (n); \
1208 if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
1209 display_progress(progress, _n); \
1210 } while (0)
1211
1212 int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags)
1213 {
1214 struct pair_pos_vs_id *pairs = NULL;
1215 uint32_t i;
1216 struct progress *progress = NULL;
1217 struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
1218 verify_midx_error = 0;
1219
1220 if (!m) {
1221 int result = 0;
1222 struct stat sb;
1223 char *filename = get_midx_filename(object_dir);
1224 if (!stat(filename, &sb)) {
1225 error(_("multi-pack-index file exists, but failed to parse"));
1226 result = 1;
1227 }
1228 free(filename);
1229 return result;
1230 }
1231
1232 if (!midx_checksum_valid(m))
1233 midx_report(_("incorrect checksum"));
1234
1235 if (flags & MIDX_PROGRESS)
1236 progress = start_delayed_progress(_("Looking for referenced packfiles"),
1237 m->num_packs);
1238 for (i = 0; i < m->num_packs; i++) {
1239 if (prepare_midx_pack(r, m, i))
1240 midx_report("failed to load pack in position %d", i);
1241
1242 display_progress(progress, i + 1);
1243 }
1244 stop_progress(&progress);
1245
1246 for (i = 0; i < 255; i++) {
1247 uint32_t oid_fanout1 = ntohl(m->chunk_oid_fanout[i]);
1248 uint32_t oid_fanout2 = ntohl(m->chunk_oid_fanout[i + 1]);
1249
1250 if (oid_fanout1 > oid_fanout2)
1251 midx_report(_("oid fanout out of order: fanout[%d] = %"PRIx32" > %"PRIx32" = fanout[%d]"),
1252 i, oid_fanout1, oid_fanout2, i + 1);
1253 }
1254
1255 if (m->num_objects == 0) {
1256 midx_report(_("the midx contains no oid"));
1257 /*
1258 * Remaining tests assume that we have objects, so we can
1259 * return here.
1260 */
1261 return verify_midx_error;
1262 }
1263
1264 if (flags & MIDX_PROGRESS)
1265 progress = start_sparse_progress(_("Verifying OID order in multi-pack-index"),
1266 m->num_objects - 1);
1267 for (i = 0; i < m->num_objects - 1; i++) {
1268 struct object_id oid1, oid2;
1269
1270 nth_midxed_object_oid(&oid1, m, i);
1271 nth_midxed_object_oid(&oid2, m, i + 1);
1272
1273 if (oidcmp(&oid1, &oid2) >= 0)
1274 midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
1275 i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
1276
1277 midx_display_sparse_progress(progress, i + 1);
1278 }
1279 stop_progress(&progress);
1280
1281 /*
1282 * Create an array mapping each object to its packfile id. Sort it
1283 * to group the objects by packfile. Use this permutation to visit
1284 * each of the objects and only require 1 packfile to be open at a
1285 * time.
1286 */
1287 ALLOC_ARRAY(pairs, m->num_objects);
1288 for (i = 0; i < m->num_objects; i++) {
1289 pairs[i].pos = i;
1290 pairs[i].pack_int_id = nth_midxed_pack_int_id(m, i);
1291 }
1292
1293 if (flags & MIDX_PROGRESS)
1294 progress = start_sparse_progress(_("Sorting objects by packfile"),
1295 m->num_objects);
1296 display_progress(progress, 0); /* TODO: Measure QSORT() progress */
1297 QSORT(pairs, m->num_objects, compare_pair_pos_vs_id);
1298 stop_progress(&progress);
1299
1300 if (flags & MIDX_PROGRESS)
1301 progress = start_sparse_progress(_("Verifying object offsets"), m->num_objects);
1302 for (i = 0; i < m->num_objects; i++) {
1303 struct object_id oid;
1304 struct pack_entry e;
1305 off_t m_offset, p_offset;
1306
1307 if (i > 0 && pairs[i-1].pack_int_id != pairs[i].pack_int_id &&
1308 m->packs[pairs[i-1].pack_int_id])
1309 {
1310 close_pack_fd(m->packs[pairs[i-1].pack_int_id]);
1311 close_pack_index(m->packs[pairs[i-1].pack_int_id]);
1312 }
1313
1314 nth_midxed_object_oid(&oid, m, pairs[i].pos);
1315
1316 if (!fill_midx_entry(r, &oid, &e, m)) {
1317 midx_report(_("failed to load pack entry for oid[%d] = %s"),
1318 pairs[i].pos, oid_to_hex(&oid));
1319 continue;
1320 }
1321
1322 if (open_pack_index(e.p)) {
1323 midx_report(_("failed to load pack-index for packfile %s"),
1324 e.p->pack_name);
1325 break;
1326 }
1327
1328 m_offset = e.offset;
1329 p_offset = find_pack_entry_one(oid.hash, e.p);
1330
1331 if (m_offset != p_offset)
1332 midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
1333 pairs[i].pos, oid_to_hex(&oid), m_offset, p_offset);
1334
1335 midx_display_sparse_progress(progress, i + 1);
1336 }
1337 stop_progress(&progress);
1338
1339 free(pairs);
1340
1341 return verify_midx_error;
1342 }
1343
1344 int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
1345 {
1346 uint32_t i, *count, result = 0;
1347 struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
1348 struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
1349 struct progress *progress = NULL;
1350
1351 if (!m)
1352 return 0;
1353
1354 CALLOC_ARRAY(count, m->num_packs);
1355
1356 if (flags & MIDX_PROGRESS)
1357 progress = start_delayed_progress(_("Counting referenced objects"),
1358 m->num_objects);
1359 for (i = 0; i < m->num_objects; i++) {
1360 int pack_int_id = nth_midxed_pack_int_id(m, i);
1361 count[pack_int_id]++;
1362 display_progress(progress, i + 1);
1363 }
1364 stop_progress(&progress);
1365
1366 if (flags & MIDX_PROGRESS)
1367 progress = start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
1368 m->num_packs);
1369 for (i = 0; i < m->num_packs; i++) {
1370 char *pack_name;
1371 display_progress(progress, i + 1);
1372
1373 if (count[i])
1374 continue;
1375
1376 if (prepare_midx_pack(r, m, i))
1377 continue;
1378
1379 if (m->packs[i]->pack_keep)
1380 continue;
1381
1382 pack_name = xstrdup(m->packs[i]->pack_name);
1383 close_pack(m->packs[i]);
1384
1385 string_list_insert(&packs_to_drop, m->pack_names[i]);
1386 unlink_pack_path(pack_name, 0);
1387 free(pack_name);
1388 }
1389 stop_progress(&progress);
1390
1391 free(count);
1392
1393 if (packs_to_drop.nr)
1394 result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags);
1395
1396 string_list_clear(&packs_to_drop, 0);
1397 return result;
1398 }
1399
1400 struct repack_info {
1401 timestamp_t mtime;
1402 uint32_t referenced_objects;
1403 uint32_t pack_int_id;
1404 };
1405
1406 static int compare_by_mtime(const void *a_, const void *b_)
1407 {
1408 const struct repack_info *a, *b;
1409
1410 a = (const struct repack_info *)a_;
1411 b = (const struct repack_info *)b_;
1412
1413 if (a->mtime < b->mtime)
1414 return -1;
1415 if (a->mtime > b->mtime)
1416 return 1;
1417 return 0;
1418 }
1419
1420 static int fill_included_packs_all(struct repository *r,
1421 struct multi_pack_index *m,
1422 unsigned char *include_pack)
1423 {
1424 uint32_t i, count = 0;
1425 int pack_kept_objects = 0;
1426
1427 repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
1428
1429 for (i = 0; i < m->num_packs; i++) {
1430 if (prepare_midx_pack(r, m, i))
1431 continue;
1432 if (!pack_kept_objects && m->packs[i]->pack_keep)
1433 continue;
1434
1435 include_pack[i] = 1;
1436 count++;
1437 }
1438
1439 return count < 2;
1440 }
1441
1442 static int fill_included_packs_batch(struct repository *r,
1443 struct multi_pack_index *m,
1444 unsigned char *include_pack,
1445 size_t batch_size)
1446 {
1447 uint32_t i, packs_to_repack;
1448 size_t total_size;
1449 struct repack_info *pack_info = xcalloc(m->num_packs, sizeof(struct repack_info));
1450 int pack_kept_objects = 0;
1451
1452 repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
1453
1454 for (i = 0; i < m->num_packs; i++) {
1455 pack_info[i].pack_int_id = i;
1456
1457 if (prepare_midx_pack(r, m, i))
1458 continue;
1459
1460 pack_info[i].mtime = m->packs[i]->mtime;
1461 }
1462
1463 for (i = 0; batch_size && i < m->num_objects; i++) {
1464 uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
1465 pack_info[pack_int_id].referenced_objects++;
1466 }
1467
1468 QSORT(pack_info, m->num_packs, compare_by_mtime);
1469
1470 total_size = 0;
1471 packs_to_repack = 0;
1472 for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
1473 int pack_int_id = pack_info[i].pack_int_id;
1474 struct packed_git *p = m->packs[pack_int_id];
1475 size_t expected_size;
1476
1477 if (!p)
1478 continue;
1479 if (!pack_kept_objects && p->pack_keep)
1480 continue;
1481 if (open_pack_index(p) || !p->num_objects)
1482 continue;
1483
1484 expected_size = (size_t)(p->pack_size
1485 * pack_info[i].referenced_objects);
1486 expected_size /= p->num_objects;
1487
1488 if (expected_size >= batch_size)
1489 continue;
1490
1491 packs_to_repack++;
1492 total_size += expected_size;
1493 include_pack[pack_int_id] = 1;
1494 }
1495
1496 free(pack_info);
1497
1498 if (packs_to_repack < 2)
1499 return 1;
1500
1501 return 0;
1502 }
1503
1504 int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
1505 {
1506 int result = 0;
1507 uint32_t i;
1508 unsigned char *include_pack;
1509 struct child_process cmd = CHILD_PROCESS_INIT;
1510 FILE *cmd_in;
1511 struct strbuf base_name = STRBUF_INIT;
1512 struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
1513
1514 /*
1515 * When updating the default for these configuration
1516 * variables in builtin/repack.c, these must be adjusted
1517 * to match.
1518 */
1519 int delta_base_offset = 1;
1520 int use_delta_islands = 0;
1521
1522 if (!m)
1523 return 0;
1524
1525 CALLOC_ARRAY(include_pack, m->num_packs);
1526
1527 if (batch_size) {
1528 if (fill_included_packs_batch(r, m, include_pack, batch_size))
1529 goto cleanup;
1530 } else if (fill_included_packs_all(r, m, include_pack))
1531 goto cleanup;
1532
1533 repo_config_get_bool(r, "repack.usedeltabaseoffset", &delta_base_offset);
1534 repo_config_get_bool(r, "repack.usedeltaislands", &use_delta_islands);
1535
1536 strvec_push(&cmd.args, "pack-objects");
1537
1538 strbuf_addstr(&base_name, object_dir);
1539 strbuf_addstr(&base_name, "/pack/pack");
1540 strvec_push(&cmd.args, base_name.buf);
1541
1542 if (delta_base_offset)
1543 strvec_push(&cmd.args, "--delta-base-offset");
1544 if (use_delta_islands)
1545 strvec_push(&cmd.args, "--delta-islands");
1546
1547 if (flags & MIDX_PROGRESS)
1548 strvec_push(&cmd.args, "--progress");
1549 else
1550 strvec_push(&cmd.args, "-q");
1551
1552 strbuf_release(&base_name);
1553
1554 cmd.git_cmd = 1;
1555 cmd.in = cmd.out = -1;
1556
1557 if (start_command(&cmd)) {
1558 error(_("could not start pack-objects"));
1559 result = 1;
1560 goto cleanup;
1561 }
1562
1563 cmd_in = xfdopen(cmd.in, "w");
1564
1565 for (i = 0; i < m->num_objects; i++) {
1566 struct object_id oid;
1567 uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
1568
1569 if (!include_pack[pack_int_id])
1570 continue;
1571
1572 nth_midxed_object_oid(&oid, m, i);
1573 fprintf(cmd_in, "%s\n", oid_to_hex(&oid));
1574 }
1575 fclose(cmd_in);
1576
1577 if (finish_command(&cmd)) {
1578 error(_("could not finish pack-objects"));
1579 result = 1;
1580 goto cleanup;
1581 }
1582
1583 result = write_midx_internal(object_dir, m, NULL, NULL, flags);
1584 m = NULL;
1585
1586 cleanup:
1587 if (m)
1588 close_midx(m);
1589 free(include_pack);
1590 return result;
1591 }