]> git.ipfire.org Git - thirdparty/git.git/blob - pack-revindex.c
Merge branch 'tb/submodule-null-deref-fix'
[thirdparty/git.git] / pack-revindex.c
1 #include "git-compat-util.h"
2 #include "gettext.h"
3 #include "pack-revindex.h"
4 #include "object-file.h"
5 #include "object-store.h"
6 #include "packfile.h"
7 #include "trace2.h"
8 #include "config.h"
9 #include "midx.h"
10 #include "csum-file.h"
11
12 struct revindex_entry {
13 off_t offset;
14 unsigned int nr;
15 };
16
17 /*
18 * Pack index for existing packs give us easy access to the offsets into
19 * corresponding pack file where each object's data starts, but the entries
20 * do not store the size of the compressed representation (uncompressed
21 * size is easily available by examining the pack entry header). It is
22 * also rather expensive to find the sha1 for an object given its offset.
23 *
24 * The pack index file is sorted by object name mapping to offset;
25 * this revindex array is a list of offset/index_nr pairs
26 * ordered by offset, so if you know the offset of an object, next offset
27 * is where its packed representation ends and the index_nr can be used to
28 * get the object sha1 from the main index.
29 */
30
31 /*
32 * This is a least-significant-digit radix sort.
33 *
34 * It sorts each of the "n" items in "entries" by its offset field. The "max"
35 * parameter must be at least as large as the largest offset in the array,
36 * and lets us quit the sort early.
37 */
38 static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
39 {
40 /*
41 * We use a "digit" size of 16 bits. That keeps our memory
42 * usage reasonable, and we can generally (for a 4G or smaller
43 * packfile) quit after two rounds of radix-sorting.
44 */
45 #define DIGIT_SIZE (16)
46 #define BUCKETS (1 << DIGIT_SIZE)
47 /*
48 * We want to know the bucket that a[i] will go into when we are using
49 * the digit that is N bits from the (least significant) end.
50 */
51 #define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
52
53 /*
54 * We need O(n) temporary storage. Rather than do an extra copy of the
55 * partial results into "entries", we sort back and forth between the
56 * real array and temporary storage. In each iteration of the loop, we
57 * keep track of them with alias pointers, always sorting from "from"
58 * to "to".
59 */
60 struct revindex_entry *tmp, *from, *to;
61 int bits;
62 unsigned *pos;
63
64 ALLOC_ARRAY(pos, BUCKETS);
65 ALLOC_ARRAY(tmp, n);
66 from = entries;
67 to = tmp;
68
69 /*
70 * If (max >> bits) is zero, then we know that the radix digit we are
71 * on (and any higher) will be zero for all entries, and our loop will
72 * be a no-op, as everybody lands in the same zero-th bucket.
73 */
74 for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
75 unsigned i;
76
77 memset(pos, 0, BUCKETS * sizeof(*pos));
78
79 /*
80 * We want pos[i] to store the index of the last element that
81 * will go in bucket "i" (actually one past the last element).
82 * To do this, we first count the items that will go in each
83 * bucket, which gives us a relative offset from the last
84 * bucket. We can then cumulatively add the index from the
85 * previous bucket to get the true index.
86 */
87 for (i = 0; i < n; i++)
88 pos[BUCKET_FOR(from, i, bits)]++;
89 for (i = 1; i < BUCKETS; i++)
90 pos[i] += pos[i-1];
91
92 /*
93 * Now we can drop the elements into their correct buckets (in
94 * our temporary array). We iterate the pos counter backwards
95 * to avoid using an extra index to count up. And since we are
96 * going backwards there, we must also go backwards through the
97 * array itself, to keep the sort stable.
98 *
99 * Note that we use an unsigned iterator to make sure we can
100 * handle 2^32-1 objects, even on a 32-bit system. But this
101 * means we cannot use the more obvious "i >= 0" loop condition
102 * for counting backwards, and must instead check for
103 * wrap-around with UINT_MAX.
104 */
105 for (i = n - 1; i != UINT_MAX; i--)
106 to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
107
108 /*
109 * Now "to" contains the most sorted list, so we swap "from" and
110 * "to" for the next iteration.
111 */
112 SWAP(from, to);
113 }
114
115 /*
116 * If we ended with our data in the original array, great. If not,
117 * we have to move it back from the temporary storage.
118 */
119 if (from != entries)
120 COPY_ARRAY(entries, tmp, n);
121 free(tmp);
122 free(pos);
123
124 #undef BUCKET_FOR
125 #undef BUCKETS
126 #undef DIGIT_SIZE
127 }
128
129 /*
130 * Ordered list of offsets of objects in the pack.
131 */
132 static void create_pack_revindex(struct packed_git *p)
133 {
134 const unsigned num_ent = p->num_objects;
135 unsigned i;
136 const char *index = p->index_data;
137 const unsigned hashsz = the_hash_algo->rawsz;
138
139 ALLOC_ARRAY(p->revindex, num_ent + 1);
140 index += 4 * 256;
141
142 if (p->index_version > 1) {
143 const uint32_t *off_32 =
144 (uint32_t *)(index + 8 + (size_t)p->num_objects * (hashsz + 4));
145 const uint32_t *off_64 = off_32 + p->num_objects;
146 for (i = 0; i < num_ent; i++) {
147 const uint32_t off = ntohl(*off_32++);
148 if (!(off & 0x80000000)) {
149 p->revindex[i].offset = off;
150 } else {
151 p->revindex[i].offset = get_be64(off_64);
152 off_64 += 2;
153 }
154 p->revindex[i].nr = i;
155 }
156 } else {
157 for (i = 0; i < num_ent; i++) {
158 const uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i));
159 p->revindex[i].offset = ntohl(hl);
160 p->revindex[i].nr = i;
161 }
162 }
163
164 /*
165 * This knows the pack format -- the hash trailer
166 * follows immediately after the last object data.
167 */
168 p->revindex[num_ent].offset = p->pack_size - hashsz;
169 p->revindex[num_ent].nr = -1;
170 sort_revindex(p->revindex, num_ent, p->pack_size);
171 }
172
173 static int create_pack_revindex_in_memory(struct packed_git *p)
174 {
175 if (git_env_bool(GIT_TEST_REV_INDEX_DIE_IN_MEMORY, 0))
176 die("dying as requested by '%s'",
177 GIT_TEST_REV_INDEX_DIE_IN_MEMORY);
178 if (open_pack_index(p))
179 return -1;
180 create_pack_revindex(p);
181 return 0;
182 }
183
184 static char *pack_revindex_filename(struct packed_git *p)
185 {
186 size_t len;
187 if (!strip_suffix(p->pack_name, ".pack", &len))
188 BUG("pack_name does not end in .pack");
189 return xstrfmt("%.*s.rev", (int)len, p->pack_name);
190 }
191
192 #define RIDX_HEADER_SIZE (12)
193 #define RIDX_MIN_SIZE (RIDX_HEADER_SIZE + (2 * the_hash_algo->rawsz))
194
195 struct revindex_header {
196 uint32_t signature;
197 uint32_t version;
198 uint32_t hash_id;
199 };
200
201 static int load_revindex_from_disk(char *revindex_name,
202 uint32_t num_objects,
203 const uint32_t **data_p, size_t *len_p)
204 {
205 int fd, ret = 0;
206 struct stat st;
207 void *data = NULL;
208 size_t revindex_size;
209 struct revindex_header *hdr;
210
211 if (git_env_bool(GIT_TEST_REV_INDEX_DIE_ON_DISK, 0))
212 die("dying as requested by '%s'", GIT_TEST_REV_INDEX_DIE_ON_DISK);
213
214 fd = git_open(revindex_name);
215
216 if (fd < 0) {
217 /* "No file" means return 1. */
218 ret = 1;
219 goto cleanup;
220 }
221 if (fstat(fd, &st)) {
222 ret = error_errno(_("failed to read %s"), revindex_name);
223 goto cleanup;
224 }
225
226 revindex_size = xsize_t(st.st_size);
227
228 if (revindex_size < RIDX_MIN_SIZE) {
229 ret = error(_("reverse-index file %s is too small"), revindex_name);
230 goto cleanup;
231 }
232
233 if (revindex_size - RIDX_MIN_SIZE != st_mult(sizeof(uint32_t), num_objects)) {
234 ret = error(_("reverse-index file %s is corrupt"), revindex_name);
235 goto cleanup;
236 }
237
238 data = xmmap(NULL, revindex_size, PROT_READ, MAP_PRIVATE, fd, 0);
239 hdr = data;
240
241 if (ntohl(hdr->signature) != RIDX_SIGNATURE) {
242 ret = error(_("reverse-index file %s has unknown signature"), revindex_name);
243 goto cleanup;
244 }
245 if (ntohl(hdr->version) != 1) {
246 ret = error(_("reverse-index file %s has unsupported version %"PRIu32),
247 revindex_name, ntohl(hdr->version));
248 goto cleanup;
249 }
250 if (!(ntohl(hdr->hash_id) == 1 || ntohl(hdr->hash_id) == 2)) {
251 ret = error(_("reverse-index file %s has unsupported hash id %"PRIu32),
252 revindex_name, ntohl(hdr->hash_id));
253 goto cleanup;
254 }
255
256 cleanup:
257 if (ret) {
258 if (data)
259 munmap(data, revindex_size);
260 } else {
261 *len_p = revindex_size;
262 *data_p = (const uint32_t *)data;
263 }
264
265 if (fd >= 0)
266 close(fd);
267 return ret;
268 }
269
270 int load_pack_revindex_from_disk(struct packed_git *p)
271 {
272 char *revindex_name;
273 int ret;
274 if (open_pack_index(p))
275 return -1;
276
277 revindex_name = pack_revindex_filename(p);
278
279 ret = load_revindex_from_disk(revindex_name,
280 p->num_objects,
281 &p->revindex_map,
282 &p->revindex_size);
283 if (ret)
284 goto cleanup;
285
286 p->revindex_data = (const uint32_t *)((const char *)p->revindex_map + RIDX_HEADER_SIZE);
287
288 cleanup:
289 free(revindex_name);
290 return ret;
291 }
292
293 int load_pack_revindex(struct repository *r, struct packed_git *p)
294 {
295 if (p->revindex || p->revindex_data)
296 return 0;
297
298 prepare_repo_settings(r);
299
300 if (r->settings.pack_read_reverse_index &&
301 !load_pack_revindex_from_disk(p))
302 return 0;
303 else if (!create_pack_revindex_in_memory(p))
304 return 0;
305 return -1;
306 }
307
308 /*
309 * verify_pack_revindex verifies that the on-disk rev-index for the given
310 * pack-file is the same that would be created if written from scratch.
311 *
312 * A negative number is returned on error.
313 */
314 int verify_pack_revindex(struct packed_git *p)
315 {
316 int res = 0;
317
318 /* Do not bother checking if not initialized. */
319 if (!p->revindex_map || !p->revindex_data)
320 return res;
321
322 if (!hashfile_checksum_valid((const unsigned char *)p->revindex_map, p->revindex_size)) {
323 error(_("invalid checksum"));
324 res = -1;
325 }
326
327 /* This may fail due to a broken .idx. */
328 if (create_pack_revindex_in_memory(p))
329 return res;
330
331 for (size_t i = 0; i < p->num_objects; i++) {
332 uint32_t nr = p->revindex[i].nr;
333 uint32_t rev_val = get_be32(p->revindex_data + i);
334
335 if (nr != rev_val) {
336 error(_("invalid rev-index position at %"PRIu64": %"PRIu32" != %"PRIu32""),
337 (uint64_t)i, nr, rev_val);
338 res = -1;
339 }
340 }
341
342 return res;
343 }
344
345 int load_midx_revindex(struct multi_pack_index *m)
346 {
347 struct strbuf revindex_name = STRBUF_INIT;
348 int ret;
349
350 if (m->revindex_data)
351 return 0;
352
353 if (m->chunk_revindex) {
354 /*
355 * If the MIDX `m` has a `RIDX` chunk, then use its contents for
356 * the reverse index instead of trying to load a separate `.rev`
357 * file.
358 *
359 * Note that we do *not* set `m->revindex_map` here, since we do
360 * not want to accidentally call munmap() in the middle of the
361 * MIDX.
362 */
363 trace2_data_string("load_midx_revindex", the_repository,
364 "source", "midx");
365 m->revindex_data = (const uint32_t *)m->chunk_revindex;
366 return 0;
367 }
368
369 trace2_data_string("load_midx_revindex", the_repository,
370 "source", "rev");
371
372 get_midx_rev_filename(&revindex_name, m);
373
374 ret = load_revindex_from_disk(revindex_name.buf,
375 m->num_objects,
376 &m->revindex_map,
377 &m->revindex_len);
378 if (ret)
379 goto cleanup;
380
381 m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE);
382
383 cleanup:
384 strbuf_release(&revindex_name);
385 return ret;
386 }
387
388 int close_midx_revindex(struct multi_pack_index *m)
389 {
390 if (!m || !m->revindex_map)
391 return 0;
392
393 munmap((void*)m->revindex_map, m->revindex_len);
394
395 m->revindex_map = NULL;
396 m->revindex_data = NULL;
397 m->revindex_len = 0;
398
399 return 0;
400 }
401
402 int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
403 {
404 unsigned lo, hi;
405
406 if (load_pack_revindex(the_repository, p) < 0)
407 return -1;
408
409 lo = 0;
410 hi = p->num_objects + 1;
411
412 do {
413 const unsigned mi = lo + (hi - lo) / 2;
414 off_t got = pack_pos_to_offset(p, mi);
415
416 if (got == ofs) {
417 *pos = mi;
418 return 0;
419 } else if (ofs < got)
420 hi = mi;
421 else
422 lo = mi + 1;
423 } while (lo < hi);
424
425 error("bad offset for revindex");
426 return -1;
427 }
428
429 uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos)
430 {
431 if (!(p->revindex || p->revindex_data))
432 BUG("pack_pos_to_index: reverse index not yet loaded");
433 if (p->num_objects <= pos)
434 BUG("pack_pos_to_index: out-of-bounds object at %"PRIu32, pos);
435
436 if (p->revindex)
437 return p->revindex[pos].nr;
438 else
439 return get_be32(p->revindex_data + pos);
440 }
441
442 off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
443 {
444 if (!(p->revindex || p->revindex_data))
445 BUG("pack_pos_to_index: reverse index not yet loaded");
446 if (p->num_objects < pos)
447 BUG("pack_pos_to_offset: out-of-bounds object at %"PRIu32, pos);
448
449 if (p->revindex)
450 return p->revindex[pos].offset;
451 else if (pos == p->num_objects)
452 return p->pack_size - the_hash_algo->rawsz;
453 else
454 return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
455 }
456
457 uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos)
458 {
459 if (!m->revindex_data)
460 BUG("pack_pos_to_midx: reverse index not yet loaded");
461 if (m->num_objects <= pos)
462 BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos);
463 return get_be32(m->revindex_data + pos);
464 }
465
466 struct midx_pack_key {
467 uint32_t pack;
468 off_t offset;
469
470 uint32_t preferred_pack;
471 struct multi_pack_index *midx;
472 };
473
474 static int midx_pack_order_cmp(const void *va, const void *vb)
475 {
476 const struct midx_pack_key *key = va;
477 struct multi_pack_index *midx = key->midx;
478
479 uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data);
480 uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus);
481 off_t versus_offset;
482
483 uint32_t key_preferred = key->pack == key->preferred_pack;
484 uint32_t versus_preferred = versus_pack == key->preferred_pack;
485
486 /*
487 * First, compare the preferred-ness, noting that the preferred pack
488 * comes first.
489 */
490 if (key_preferred && !versus_preferred)
491 return -1;
492 else if (!key_preferred && versus_preferred)
493 return 1;
494
495 /* Then, break ties first by comparing the pack IDs. */
496 if (key->pack < versus_pack)
497 return -1;
498 else if (key->pack > versus_pack)
499 return 1;
500
501 /* Finally, break ties by comparing offsets within a pack. */
502 versus_offset = nth_midxed_offset(midx, versus);
503 if (key->offset < versus_offset)
504 return -1;
505 else if (key->offset > versus_offset)
506 return 1;
507
508 return 0;
509 }
510
511 int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos)
512 {
513 struct midx_pack_key key;
514 uint32_t *found;
515
516 if (!m->revindex_data)
517 BUG("midx_to_pack_pos: reverse index not yet loaded");
518 if (m->num_objects <= at)
519 BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at);
520
521 key.pack = nth_midxed_pack_int_id(m, at);
522 key.offset = nth_midxed_offset(m, at);
523 key.midx = m;
524 /*
525 * The preferred pack sorts first, so determine its identifier by
526 * looking at the first object in pseudo-pack order.
527 *
528 * Note that if no --preferred-pack is explicitly given when writing a
529 * multi-pack index, then whichever pack has the lowest identifier
530 * implicitly is preferred (and includes all its objects, since ties are
531 * broken first by pack identifier).
532 */
533 key.preferred_pack = nth_midxed_pack_int_id(m, pack_pos_to_midx(m, 0));
534
535 found = bsearch(&key, m->revindex_data, m->num_objects,
536 sizeof(*m->revindex_data), midx_pack_order_cmp);
537
538 if (!found)
539 return error("bad offset for revindex");
540
541 *pos = found - m->revindex_data;
542 return 0;
543 }