]> git.ipfire.org Git - thirdparty/git.git/blame - reftable/writer.c
Merge branch 'rj/use-adv-if-enabled'
[thirdparty/git.git] / reftable / writer.c
CommitLineData
f14bd719
HWN
1/*
2Copyright 2020 Google LLC
3
4Use of this source code is governed by a BSD-style
5license that can be found in the LICENSE file or at
6https://developers.google.com/open-source/licenses/bsd
7*/
8
9#include "writer.h"
10
11#include "system.h"
12
13#include "block.h"
14#include "constants.h"
15#include "record.h"
16#include "tree.h"
17#include "reftable-error.h"
18
19/* finishes a block, and writes it to storage */
20static int writer_flush_block(struct reftable_writer *w);
21
22/* deallocates memory related to the index */
23static void writer_clear_index(struct reftable_writer *w);
24
25/* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26static int writer_finish_public_section(struct reftable_writer *w);
27
28static struct reftable_block_stats *
29writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
30{
31 switch (typ) {
32 case 'r':
33 return &w->stats.ref_stats;
34 case 'o':
35 return &w->stats.obj_stats;
36 case 'i':
37 return &w->stats.idx_stats;
38 case 'g':
39 return &w->stats.log_stats;
40 }
41 abort();
42 return NULL;
43}
44
45/* write data, queuing the padding for the next write. Returns negative for
46 * error. */
47static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
48 int padding)
49{
50 int n = 0;
51 if (w->pending_padding > 0) {
b4ff12c8 52 uint8_t *zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
f14bd719
HWN
53 int n = w->write(w->write_arg, zeroed, w->pending_padding);
54 if (n < 0)
55 return n;
56
57 w->pending_padding = 0;
58 reftable_free(zeroed);
59 }
60
61 w->pending_padding = padding;
62 n = w->write(w->write_arg, data, len);
63 if (n < 0)
64 return n;
65 n += padding;
66 return 0;
67}
68
69static void options_set_defaults(struct reftable_write_options *opts)
70{
71 if (opts->restart_interval == 0) {
72 opts->restart_interval = 16;
73 }
74
75 if (opts->hash_id == 0) {
76 opts->hash_id = GIT_SHA1_FORMAT_ID;
77 }
78 if (opts->block_size == 0) {
79 opts->block_size = DEFAULT_BLOCK_SIZE;
80 }
81}
82
83static int writer_version(struct reftable_writer *w)
84{
85 return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
86 1 :
87 2;
88}
89
90static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
91{
92 memcpy(dest, "REFT", 4);
93
94 dest[4] = writer_version(w);
95
96 put_be24(dest + 5, w->opts.block_size);
97 put_be64(dest + 8, w->min_update_index);
98 put_be64(dest + 16, w->max_update_index);
99 if (writer_version(w) == 2) {
100 put_be32(dest + 24, w->opts.hash_id);
101 }
102 return header_size(writer_version(w));
103}
104
105static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
106{
107 int block_start = 0;
108 if (w->next == 0) {
109 block_start = header_size(writer_version(w));
110 }
111
112 strbuf_release(&w->last_key);
113 block_writer_init(&w->block_writer_data, typ, w->block,
114 w->opts.block_size, block_start,
115 hash_size(w->opts.hash_id));
116 w->block_writer = &w->block_writer_data;
117 w->block_writer->restart_interval = w->opts.restart_interval;
118}
119
120static struct strbuf reftable_empty_strbuf = STRBUF_INIT;
121
122struct reftable_writer *
123reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
1df18a1c 124 int (*flush_func)(void *),
f14bd719
HWN
125 void *writer_arg, struct reftable_write_options *opts)
126{
b4ff12c8 127 struct reftable_writer *wp = reftable_calloc(1, sizeof(*wp));
f14bd719
HWN
128 strbuf_init(&wp->block_writer_data.last_key, 0);
129 options_set_defaults(opts);
130 if (opts->block_size >= (1 << 24)) {
131 /* TODO - error return? */
132 abort();
133 }
134 wp->last_key = reftable_empty_strbuf;
b4ff12c8 135 REFTABLE_CALLOC_ARRAY(wp->block, opts->block_size);
f14bd719
HWN
136 wp->write = writer_func;
137 wp->write_arg = writer_arg;
138 wp->opts = *opts;
1df18a1c 139 wp->flush = flush_func;
f14bd719
HWN
140 writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
141
142 return wp;
143}
144
145void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
146 uint64_t max)
147{
148 w->min_update_index = min;
149 w->max_update_index = max;
150}
151
152void reftable_writer_free(struct reftable_writer *w)
153{
33e92243
HWN
154 if (!w)
155 return;
f14bd719
HWN
156 reftable_free(w->block);
157 reftable_free(w);
158}
159
160struct obj_index_tree_node {
161 struct strbuf hash;
162 uint64_t *offsets;
163 size_t offset_len;
164 size_t offset_cap;
165};
166
167#define OBJ_INDEX_TREE_NODE_INIT \
168 { \
169 .hash = STRBUF_INIT \
170 }
171
172static int obj_index_tree_node_compare(const void *a, const void *b)
173{
174 return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
175 &((const struct obj_index_tree_node *)b)->hash);
176}
177
178static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
179{
180 uint64_t off = w->next;
181
182 struct obj_index_tree_node want = { .hash = *hash };
183
184 struct tree_node *node = tree_search(&want, &w->obj_index_tree,
185 &obj_index_tree_node_compare, 0);
186 struct obj_index_tree_node *key = NULL;
72a4ea71 187 if (!node) {
f14bd719
HWN
188 struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
189 key = reftable_malloc(sizeof(struct obj_index_tree_node));
190 *key = empty;
191
192 strbuf_reset(&key->hash);
193 strbuf_addbuf(&key->hash, hash);
194 tree_search((void *)key, &w->obj_index_tree,
195 &obj_index_tree_node_compare, 1);
196 } else {
197 key = node->key;
198 }
199
200 if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
201 return;
202 }
203
f6b58c1b 204 REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
f14bd719
HWN
205 key->offsets[key->offset_len++] = off;
206}
207
208static int writer_add_record(struct reftable_writer *w,
209 struct reftable_record *rec)
210{
211 struct strbuf key = STRBUF_INIT;
212 int err = -1;
213 reftable_record_key(rec, &key);
214 if (strbuf_cmp(&w->last_key, &key) >= 0) {
215 err = REFTABLE_API_ERROR;
216 goto done;
217 }
218
219 strbuf_reset(&w->last_key);
220 strbuf_addbuf(&w->last_key, &key);
72a4ea71 221 if (!w->block_writer) {
f14bd719
HWN
222 writer_reinit_block_writer(w, reftable_record_type(rec));
223 }
224
225 assert(block_writer_type(w->block_writer) == reftable_record_type(rec));
226
227 if (block_writer_add(w->block_writer, rec) == 0) {
228 err = 0;
229 goto done;
230 }
231
232 err = writer_flush_block(w);
233 if (err < 0) {
234 goto done;
235 }
236
237 writer_reinit_block_writer(w, reftable_record_type(rec));
238 err = block_writer_add(w->block_writer, rec);
45c2fcc2 239 if (err == -1) {
0dd44584
HWN
240 /* we are writing into memory, so an error can only mean it
241 * doesn't fit. */
242 err = REFTABLE_ENTRY_TOO_BIG_ERROR;
f14bd719
HWN
243 goto done;
244 }
245
f14bd719
HWN
246done:
247 strbuf_release(&key);
248 return err;
249}
250
251int reftable_writer_add_ref(struct reftable_writer *w,
252 struct reftable_ref_record *ref)
253{
66c0daba
HWN
254 struct reftable_record rec = {
255 .type = BLOCK_TYPE_REF,
33665d98
ÆAB
256 .u = {
257 .ref = *ref
258 },
66c0daba 259 };
f14bd719
HWN
260 int err = 0;
261
72a4ea71 262 if (!ref->refname)
f14bd719
HWN
263 return REFTABLE_API_ERROR;
264 if (ref->update_index < w->min_update_index ||
265 ref->update_index > w->max_update_index)
266 return REFTABLE_API_ERROR;
267
66c0daba 268 rec.u.ref.update_index -= w->min_update_index;
f14bd719
HWN
269
270 err = writer_add_record(w, &rec);
271 if (err < 0)
272 return err;
273
274 if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
275 struct strbuf h = STRBUF_INIT;
276 strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
277 hash_size(w->opts.hash_id));
278 writer_index_hash(w, &h);
279 strbuf_release(&h);
280 }
281
282 if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
283 struct strbuf h = STRBUF_INIT;
284 strbuf_add(&h, reftable_ref_record_val2(ref),
285 hash_size(w->opts.hash_id));
286 writer_index_hash(w, &h);
287 strbuf_release(&h);
288 }
289 return 0;
290}
291
292int reftable_writer_add_refs(struct reftable_writer *w,
293 struct reftable_ref_record *refs, int n)
294{
295 int err = 0;
296 int i = 0;
297 QSORT(refs, n, reftable_ref_record_compare_name);
298 for (i = 0; err == 0 && i < n; i++) {
299 err = reftable_writer_add_ref(w, &refs[i]);
300 }
301 return err;
302}
303
304static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
305 struct reftable_log_record *log)
306{
66c0daba
HWN
307 struct reftable_record rec = {
308 .type = BLOCK_TYPE_LOG,
33665d98
ÆAB
309 .u = {
310 .log = *log,
311 },
66c0daba 312 };
f14bd719
HWN
313 if (w->block_writer &&
314 block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
315 int err = writer_finish_public_section(w);
316 if (err < 0)
317 return err;
318 }
319
320 w->next -= w->pending_padding;
321 w->pending_padding = 0;
f14bd719
HWN
322 return writer_add_record(w, &rec);
323}
324
325int reftable_writer_add_log(struct reftable_writer *w,
326 struct reftable_log_record *log)
327{
328 char *input_log_message = NULL;
329 struct strbuf cleaned_message = STRBUF_INIT;
330 int err = 0;
331
332 if (log->value_type == REFTABLE_LOG_DELETION)
333 return reftable_writer_add_log_verbatim(w, log);
334
72a4ea71 335 if (!log->refname)
f14bd719
HWN
336 return REFTABLE_API_ERROR;
337
338 input_log_message = log->value.update.message;
339 if (!w->opts.exact_log_message && log->value.update.message) {
340 strbuf_addstr(&cleaned_message, log->value.update.message);
341 while (cleaned_message.len &&
342 cleaned_message.buf[cleaned_message.len - 1] == '\n')
343 strbuf_setlen(&cleaned_message,
344 cleaned_message.len - 1);
345 if (strchr(cleaned_message.buf, '\n')) {
346 /* multiple lines not allowed. */
347 err = REFTABLE_API_ERROR;
348 goto done;
349 }
350 strbuf_addstr(&cleaned_message, "\n");
351 log->value.update.message = cleaned_message.buf;
352 }
353
354 err = reftable_writer_add_log_verbatim(w, log);
355 log->value.update.message = input_log_message;
356done:
357 strbuf_release(&cleaned_message);
358 return err;
359}
360
361int reftable_writer_add_logs(struct reftable_writer *w,
362 struct reftable_log_record *logs, int n)
363{
364 int err = 0;
365 int i = 0;
366 QSORT(logs, n, reftable_log_record_compare_key);
367
368 for (i = 0; err == 0 && i < n; i++) {
369 err = reftable_writer_add_log(w, &logs[i]);
370 }
371 return err;
372}
373
374static int writer_finish_section(struct reftable_writer *w)
375{
9ebb2d7b 376 struct reftable_block_stats *bstats = NULL;
f14bd719
HWN
377 uint8_t typ = block_writer_type(w->block_writer);
378 uint64_t index_start = 0;
379 int max_level = 0;
9ebb2d7b 380 size_t threshold = w->opts.unpadded ? 1 : 3;
f14bd719 381 int before_blocks = w->stats.idx_stats.blocks;
9ebb2d7b
PS
382 int err;
383
384 err = writer_flush_block(w);
f14bd719
HWN
385 if (err < 0)
386 return err;
387
4950acae
PS
388 /*
389 * When the section we are about to index has a lot of blocks then the
390 * index itself may span across multiple blocks, as well. This would
391 * require a linear scan over index blocks only to find the desired
392 * indexed block, which is inefficient. Instead, we write a multi-level
393 * index where index records of level N+1 will refer to index blocks of
394 * level N. This isn't constant time, either, but at least logarithmic.
395 *
396 * This loop handles writing this multi-level index. Note that we write
397 * the lowest-level index pointing to the indexed blocks first. We then
398 * continue writing additional index levels until the current level has
399 * less blocks than the threshold so that the highest level will be at
400 * the end of the index section.
401 *
402 * Readers are thus required to start reading the index section from
403 * its end, which is why we set `index_start` to the beginning of the
404 * last index section.
405 */
f14bd719
HWN
406 while (w->index_len > threshold) {
407 struct reftable_index_record *idx = NULL;
9ebb2d7b 408 size_t i, idx_len;
f14bd719
HWN
409
410 max_level++;
411 index_start = w->next;
412 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
413
414 idx = w->index;
415 idx_len = w->index_len;
416
417 w->index = NULL;
418 w->index_len = 0;
419 w->index_cap = 0;
420 for (i = 0; i < idx_len; i++) {
66c0daba
HWN
421 struct reftable_record rec = {
422 .type = BLOCK_TYPE_INDEX,
33665d98
ÆAB
423 .u = {
424 .idx = idx[i],
425 },
66c0daba 426 };
f14bd719 427
b66e006f 428 err = writer_add_record(w, &rec);
f14bd719
HWN
429 if (err < 0)
430 return err;
f14bd719 431 }
b66e006f 432
e7485601
PS
433 err = writer_flush_block(w);
434 if (err < 0)
435 return err;
436
b66e006f 437 for (i = 0; i < idx_len; i++)
f14bd719 438 strbuf_release(&idx[i].last_key);
f14bd719
HWN
439 reftable_free(idx);
440 }
441
4950acae
PS
442 /*
443 * The index may still contain a number of index blocks lower than the
444 * threshold. Clear it so that these entries don't leak into the next
445 * index section.
446 */
ddac9659
PS
447 writer_clear_index(w);
448
f14bd719
HWN
449 bstats = writer_reftable_block_stats(w, typ);
450 bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
451 bstats->index_offset = index_start;
452 bstats->max_index_level = max_level;
453
454 /* Reinit lastKey, as the next section can start with any key. */
455 w->last_key.len = 0;
456
457 return 0;
458}
459
460struct common_prefix_arg {
461 struct strbuf *last;
462 int max;
463};
464
465static void update_common(void *void_arg, void *key)
466{
467 struct common_prefix_arg *arg = void_arg;
468 struct obj_index_tree_node *entry = key;
469 if (arg->last) {
470 int n = common_prefix_size(&entry->hash, arg->last);
471 if (n > arg->max) {
472 arg->max = n;
473 }
474 }
475 arg->last = &entry->hash;
476}
477
478struct write_record_arg {
479 struct reftable_writer *w;
480 int err;
481};
482
483static void write_object_record(void *void_arg, void *key)
484{
485 struct write_record_arg *arg = void_arg;
486 struct obj_index_tree_node *entry = key;
66c0daba
HWN
487 struct reftable_record
488 rec = { .type = BLOCK_TYPE_OBJ,
489 .u.obj = {
490 .hash_prefix = (uint8_t *)entry->hash.buf,
491 .hash_prefix_len = arg->w->stats.object_id_len,
492 .offsets = entry->offsets,
493 .offset_len = entry->offset_len,
494 } };
f14bd719
HWN
495 if (arg->err < 0)
496 goto done;
497
f14bd719
HWN
498 arg->err = block_writer_add(arg->w->block_writer, &rec);
499 if (arg->err == 0)
500 goto done;
501
502 arg->err = writer_flush_block(arg->w);
503 if (arg->err < 0)
504 goto done;
505
506 writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
507 arg->err = block_writer_add(arg->w->block_writer, &rec);
508 if (arg->err == 0)
509 goto done;
66c0daba
HWN
510
511 rec.u.obj.offset_len = 0;
f14bd719
HWN
512 arg->err = block_writer_add(arg->w->block_writer, &rec);
513
514 /* Should be able to write into a fresh block. */
515 assert(arg->err == 0);
516
517done:;
518}
519
520static void object_record_free(void *void_arg, void *key)
521{
522 struct obj_index_tree_node *entry = key;
523
524 FREE_AND_NULL(entry->offsets);
525 strbuf_release(&entry->hash);
526 reftable_free(entry);
527}
528
529static int writer_dump_object_index(struct reftable_writer *w)
530{
531 struct write_record_arg closure = { .w = w };
b4007fcc
HWN
532 struct common_prefix_arg common = {
533 .max = 1, /* obj_id_len should be >= 2. */
534 };
f14bd719
HWN
535 if (w->obj_index_tree) {
536 infix_walk(w->obj_index_tree, &update_common, &common);
537 }
538 w->stats.object_id_len = common.max + 1;
539
540 writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
541
542 if (w->obj_index_tree) {
543 infix_walk(w->obj_index_tree, &write_object_record, &closure);
544 }
545
546 if (closure.err < 0)
547 return closure.err;
548 return writer_finish_section(w);
549}
550
551static int writer_finish_public_section(struct reftable_writer *w)
552{
553 uint8_t typ = 0;
554 int err = 0;
555
72a4ea71 556 if (!w->block_writer)
f14bd719
HWN
557 return 0;
558
559 typ = block_writer_type(w->block_writer);
560 err = writer_finish_section(w);
561 if (err < 0)
562 return err;
563 if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
564 w->stats.ref_stats.index_blocks > 0) {
565 err = writer_dump_object_index(w);
566 if (err < 0)
567 return err;
568 }
569
570 if (w->obj_index_tree) {
571 infix_walk(w->obj_index_tree, &object_record_free, NULL);
572 tree_free(w->obj_index_tree);
573 w->obj_index_tree = NULL;
574 }
575
576 w->block_writer = NULL;
577 return 0;
578}
579
580int reftable_writer_close(struct reftable_writer *w)
581{
582 uint8_t footer[72];
583 uint8_t *p = footer;
584 int err = writer_finish_public_section(w);
585 int empty_table = w->next == 0;
586 if (err != 0)
587 goto done;
588 w->pending_padding = 0;
589 if (empty_table) {
590 /* Empty tables need a header anyway. */
591 uint8_t header[28];
592 int n = writer_write_header(w, header);
593 err = padded_write(w, header, n, 0);
594 if (err < 0)
595 goto done;
596 }
597
598 p += writer_write_header(w, footer);
599 put_be64(p, w->stats.ref_stats.index_offset);
600 p += 8;
601 put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
602 p += 8;
603 put_be64(p, w->stats.obj_stats.index_offset);
604 p += 8;
605
606 put_be64(p, w->stats.log_stats.offset);
607 p += 8;
608 put_be64(p, w->stats.log_stats.index_offset);
609 p += 8;
610
611 put_be32(p, crc32(0, footer, p - footer));
612 p += 4;
613
1df18a1c
JC
614 err = w->flush(w->write_arg);
615 if (err < 0) {
616 err = REFTABLE_IO_ERROR;
617 goto done;
618 }
619
f14bd719
HWN
620 err = padded_write(w, footer, footer_size(writer_version(w)), 0);
621 if (err < 0)
622 goto done;
623
624 if (empty_table) {
625 err = REFTABLE_EMPTY_TABLE_ERROR;
626 goto done;
627 }
628
629done:
630 /* free up memory. */
631 block_writer_release(&w->block_writer_data);
632 writer_clear_index(w);
633 strbuf_release(&w->last_key);
634 return err;
635}
636
637static void writer_clear_index(struct reftable_writer *w)
638{
9ebb2d7b 639 for (size_t i = 0; i < w->index_len; i++)
f14bd719 640 strbuf_release(&w->index[i].last_key);
f14bd719
HWN
641 FREE_AND_NULL(w->index);
642 w->index_len = 0;
643 w->index_cap = 0;
644}
645
646static const int debug = 0;
647
648static int writer_flush_nonempty_block(struct reftable_writer *w)
649{
650 uint8_t typ = block_writer_type(w->block_writer);
651 struct reftable_block_stats *bstats =
652 writer_reftable_block_stats(w, typ);
653 uint64_t block_typ_off = (bstats->blocks == 0) ? w->next : 0;
654 int raw_bytes = block_writer_finish(w->block_writer);
655 int padding = 0;
656 int err = 0;
657 struct reftable_index_record ir = { .last_key = STRBUF_INIT };
658 if (raw_bytes < 0)
659 return raw_bytes;
660
661 if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG) {
662 padding = w->opts.block_size - raw_bytes;
663 }
664
665 if (block_typ_off > 0) {
666 bstats->offset = block_typ_off;
667 }
668
669 bstats->entries += w->block_writer->entries;
670 bstats->restarts += w->block_writer->restart_len;
671 bstats->blocks++;
672 w->stats.blocks++;
673
674 if (debug) {
675 fprintf(stderr, "block %c off %" PRIu64 " sz %d (%d)\n", typ,
676 w->next, raw_bytes,
677 get_be24(w->block + w->block_writer->header_off + 1));
678 }
679
680 if (w->next == 0) {
681 writer_write_header(w, w->block);
682 }
683
684 err = padded_write(w, w->block, raw_bytes, padding);
685 if (err < 0)
686 return err;
687
f6b58c1b 688 REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
f14bd719
HWN
689
690 ir.offset = w->next;
691 strbuf_reset(&ir.last_key);
692 strbuf_addbuf(&ir.last_key, &w->block_writer->last_key);
693 w->index[w->index_len] = ir;
694
695 w->index_len++;
696 w->next += padding + raw_bytes;
697 w->block_writer = NULL;
698 return 0;
699}
700
701static int writer_flush_block(struct reftable_writer *w)
702{
72a4ea71 703 if (!w->block_writer)
f14bd719
HWN
704 return 0;
705 if (w->block_writer->entries == 0)
706 return 0;
707 return writer_flush_nonempty_block(w);
708}
709
73a4c188 710const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)
f14bd719
HWN
711{
712 return &w->stats;
713}