]> git.ipfire.org Git - thirdparty/git.git/blob - reftable/writer.c
reftable: write reftable files
[thirdparty/git.git] / reftable / writer.c
1 /*
2 Copyright 2020 Google LLC
3
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
8
9 #include "writer.h"
10
11 #include "system.h"
12
13 #include "block.h"
14 #include "constants.h"
15 #include "record.h"
16 #include "tree.h"
17 #include "reftable-error.h"
18
19 /* finishes a block, and writes it to storage */
20 static int writer_flush_block(struct reftable_writer *w);
21
22 /* deallocates memory related to the index */
23 static void writer_clear_index(struct reftable_writer *w);
24
25 /* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26 static int writer_finish_public_section(struct reftable_writer *w);
27
28 static struct reftable_block_stats *
29 writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
30 {
31 switch (typ) {
32 case 'r':
33 return &w->stats.ref_stats;
34 case 'o':
35 return &w->stats.obj_stats;
36 case 'i':
37 return &w->stats.idx_stats;
38 case 'g':
39 return &w->stats.log_stats;
40 }
41 abort();
42 return NULL;
43 }
44
45 /* write data, queuing the padding for the next write. Returns negative for
46 * error. */
47 static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
48 int padding)
49 {
50 int n = 0;
51 if (w->pending_padding > 0) {
52 uint8_t *zeroed = reftable_calloc(w->pending_padding);
53 int n = w->write(w->write_arg, zeroed, w->pending_padding);
54 if (n < 0)
55 return n;
56
57 w->pending_padding = 0;
58 reftable_free(zeroed);
59 }
60
61 w->pending_padding = padding;
62 n = w->write(w->write_arg, data, len);
63 if (n < 0)
64 return n;
65 n += padding;
66 return 0;
67 }
68
69 static void options_set_defaults(struct reftable_write_options *opts)
70 {
71 if (opts->restart_interval == 0) {
72 opts->restart_interval = 16;
73 }
74
75 if (opts->hash_id == 0) {
76 opts->hash_id = GIT_SHA1_FORMAT_ID;
77 }
78 if (opts->block_size == 0) {
79 opts->block_size = DEFAULT_BLOCK_SIZE;
80 }
81 }
82
83 static int writer_version(struct reftable_writer *w)
84 {
85 return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
86 1 :
87 2;
88 }
89
90 static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
91 {
92 memcpy(dest, "REFT", 4);
93
94 dest[4] = writer_version(w);
95
96 put_be24(dest + 5, w->opts.block_size);
97 put_be64(dest + 8, w->min_update_index);
98 put_be64(dest + 16, w->max_update_index);
99 if (writer_version(w) == 2) {
100 put_be32(dest + 24, w->opts.hash_id);
101 }
102 return header_size(writer_version(w));
103 }
104
105 static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
106 {
107 int block_start = 0;
108 if (w->next == 0) {
109 block_start = header_size(writer_version(w));
110 }
111
112 strbuf_release(&w->last_key);
113 block_writer_init(&w->block_writer_data, typ, w->block,
114 w->opts.block_size, block_start,
115 hash_size(w->opts.hash_id));
116 w->block_writer = &w->block_writer_data;
117 w->block_writer->restart_interval = w->opts.restart_interval;
118 }
119
120 static struct strbuf reftable_empty_strbuf = STRBUF_INIT;
121
122 struct reftable_writer *
123 reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
124 void *writer_arg, struct reftable_write_options *opts)
125 {
126 struct reftable_writer *wp =
127 reftable_calloc(sizeof(struct reftable_writer));
128 strbuf_init(&wp->block_writer_data.last_key, 0);
129 options_set_defaults(opts);
130 if (opts->block_size >= (1 << 24)) {
131 /* TODO - error return? */
132 abort();
133 }
134 wp->last_key = reftable_empty_strbuf;
135 wp->block = reftable_calloc(opts->block_size);
136 wp->write = writer_func;
137 wp->write_arg = writer_arg;
138 wp->opts = *opts;
139 writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
140
141 return wp;
142 }
143
144 void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
145 uint64_t max)
146 {
147 w->min_update_index = min;
148 w->max_update_index = max;
149 }
150
151 void reftable_writer_free(struct reftable_writer *w)
152 {
153 reftable_free(w->block);
154 reftable_free(w);
155 }
156
157 struct obj_index_tree_node {
158 struct strbuf hash;
159 uint64_t *offsets;
160 size_t offset_len;
161 size_t offset_cap;
162 };
163
164 #define OBJ_INDEX_TREE_NODE_INIT \
165 { \
166 .hash = STRBUF_INIT \
167 }
168
169 static int obj_index_tree_node_compare(const void *a, const void *b)
170 {
171 return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
172 &((const struct obj_index_tree_node *)b)->hash);
173 }
174
175 static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
176 {
177 uint64_t off = w->next;
178
179 struct obj_index_tree_node want = { .hash = *hash };
180
181 struct tree_node *node = tree_search(&want, &w->obj_index_tree,
182 &obj_index_tree_node_compare, 0);
183 struct obj_index_tree_node *key = NULL;
184 if (node == NULL) {
185 struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
186 key = reftable_malloc(sizeof(struct obj_index_tree_node));
187 *key = empty;
188
189 strbuf_reset(&key->hash);
190 strbuf_addbuf(&key->hash, hash);
191 tree_search((void *)key, &w->obj_index_tree,
192 &obj_index_tree_node_compare, 1);
193 } else {
194 key = node->key;
195 }
196
197 if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
198 return;
199 }
200
201 if (key->offset_len == key->offset_cap) {
202 key->offset_cap = 2 * key->offset_cap + 1;
203 key->offsets = reftable_realloc(
204 key->offsets, sizeof(uint64_t) * key->offset_cap);
205 }
206
207 key->offsets[key->offset_len++] = off;
208 }
209
210 static int writer_add_record(struct reftable_writer *w,
211 struct reftable_record *rec)
212 {
213 struct strbuf key = STRBUF_INIT;
214 int err = -1;
215 reftable_record_key(rec, &key);
216 if (strbuf_cmp(&w->last_key, &key) >= 0) {
217 err = REFTABLE_API_ERROR;
218 goto done;
219 }
220
221 strbuf_reset(&w->last_key);
222 strbuf_addbuf(&w->last_key, &key);
223 if (w->block_writer == NULL) {
224 writer_reinit_block_writer(w, reftable_record_type(rec));
225 }
226
227 assert(block_writer_type(w->block_writer) == reftable_record_type(rec));
228
229 if (block_writer_add(w->block_writer, rec) == 0) {
230 err = 0;
231 goto done;
232 }
233
234 err = writer_flush_block(w);
235 if (err < 0) {
236 goto done;
237 }
238
239 writer_reinit_block_writer(w, reftable_record_type(rec));
240 err = block_writer_add(w->block_writer, rec);
241 if (err < 0) {
242 goto done;
243 }
244
245 err = 0;
246 done:
247 strbuf_release(&key);
248 return err;
249 }
250
251 int reftable_writer_add_ref(struct reftable_writer *w,
252 struct reftable_ref_record *ref)
253 {
254 struct reftable_record rec = { NULL };
255 struct reftable_ref_record copy = *ref;
256 int err = 0;
257
258 if (ref->refname == NULL)
259 return REFTABLE_API_ERROR;
260 if (ref->update_index < w->min_update_index ||
261 ref->update_index > w->max_update_index)
262 return REFTABLE_API_ERROR;
263
264 reftable_record_from_ref(&rec, &copy);
265 copy.update_index -= w->min_update_index;
266
267 err = writer_add_record(w, &rec);
268 if (err < 0)
269 return err;
270
271 if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
272 struct strbuf h = STRBUF_INIT;
273 strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
274 hash_size(w->opts.hash_id));
275 writer_index_hash(w, &h);
276 strbuf_release(&h);
277 }
278
279 if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
280 struct strbuf h = STRBUF_INIT;
281 strbuf_add(&h, reftable_ref_record_val2(ref),
282 hash_size(w->opts.hash_id));
283 writer_index_hash(w, &h);
284 strbuf_release(&h);
285 }
286 return 0;
287 }
288
289 int reftable_writer_add_refs(struct reftable_writer *w,
290 struct reftable_ref_record *refs, int n)
291 {
292 int err = 0;
293 int i = 0;
294 QSORT(refs, n, reftable_ref_record_compare_name);
295 for (i = 0; err == 0 && i < n; i++) {
296 err = reftable_writer_add_ref(w, &refs[i]);
297 }
298 return err;
299 }
300
301 static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
302 struct reftable_log_record *log)
303 {
304 struct reftable_record rec = { NULL };
305 if (w->block_writer &&
306 block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
307 int err = writer_finish_public_section(w);
308 if (err < 0)
309 return err;
310 }
311
312 w->next -= w->pending_padding;
313 w->pending_padding = 0;
314
315 reftable_record_from_log(&rec, log);
316 return writer_add_record(w, &rec);
317 }
318
319 int reftable_writer_add_log(struct reftable_writer *w,
320 struct reftable_log_record *log)
321 {
322 char *input_log_message = NULL;
323 struct strbuf cleaned_message = STRBUF_INIT;
324 int err = 0;
325
326 if (log->value_type == REFTABLE_LOG_DELETION)
327 return reftable_writer_add_log_verbatim(w, log);
328
329 if (log->refname == NULL)
330 return REFTABLE_API_ERROR;
331
332 input_log_message = log->value.update.message;
333 if (!w->opts.exact_log_message && log->value.update.message) {
334 strbuf_addstr(&cleaned_message, log->value.update.message);
335 while (cleaned_message.len &&
336 cleaned_message.buf[cleaned_message.len - 1] == '\n')
337 strbuf_setlen(&cleaned_message,
338 cleaned_message.len - 1);
339 if (strchr(cleaned_message.buf, '\n')) {
340 /* multiple lines not allowed. */
341 err = REFTABLE_API_ERROR;
342 goto done;
343 }
344 strbuf_addstr(&cleaned_message, "\n");
345 log->value.update.message = cleaned_message.buf;
346 }
347
348 err = reftable_writer_add_log_verbatim(w, log);
349 log->value.update.message = input_log_message;
350 done:
351 strbuf_release(&cleaned_message);
352 return err;
353 }
354
355 int reftable_writer_add_logs(struct reftable_writer *w,
356 struct reftable_log_record *logs, int n)
357 {
358 int err = 0;
359 int i = 0;
360 QSORT(logs, n, reftable_log_record_compare_key);
361
362 for (i = 0; err == 0 && i < n; i++) {
363 err = reftable_writer_add_log(w, &logs[i]);
364 }
365 return err;
366 }
367
368 static int writer_finish_section(struct reftable_writer *w)
369 {
370 uint8_t typ = block_writer_type(w->block_writer);
371 uint64_t index_start = 0;
372 int max_level = 0;
373 int threshold = w->opts.unpadded ? 1 : 3;
374 int before_blocks = w->stats.idx_stats.blocks;
375 int err = writer_flush_block(w);
376 int i = 0;
377 struct reftable_block_stats *bstats = NULL;
378 if (err < 0)
379 return err;
380
381 while (w->index_len > threshold) {
382 struct reftable_index_record *idx = NULL;
383 int idx_len = 0;
384
385 max_level++;
386 index_start = w->next;
387 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
388
389 idx = w->index;
390 idx_len = w->index_len;
391
392 w->index = NULL;
393 w->index_len = 0;
394 w->index_cap = 0;
395 for (i = 0; i < idx_len; i++) {
396 struct reftable_record rec = { NULL };
397 reftable_record_from_index(&rec, idx + i);
398 if (block_writer_add(w->block_writer, &rec) == 0) {
399 continue;
400 }
401
402 err = writer_flush_block(w);
403 if (err < 0)
404 return err;
405
406 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
407
408 err = block_writer_add(w->block_writer, &rec);
409 if (err != 0) {
410 /* write into fresh block should always succeed
411 */
412 abort();
413 }
414 }
415 for (i = 0; i < idx_len; i++) {
416 strbuf_release(&idx[i].last_key);
417 }
418 reftable_free(idx);
419 }
420
421 writer_clear_index(w);
422
423 err = writer_flush_block(w);
424 if (err < 0)
425 return err;
426
427 bstats = writer_reftable_block_stats(w, typ);
428 bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
429 bstats->index_offset = index_start;
430 bstats->max_index_level = max_level;
431
432 /* Reinit lastKey, as the next section can start with any key. */
433 w->last_key.len = 0;
434
435 return 0;
436 }
437
438 struct common_prefix_arg {
439 struct strbuf *last;
440 int max;
441 };
442
443 static void update_common(void *void_arg, void *key)
444 {
445 struct common_prefix_arg *arg = void_arg;
446 struct obj_index_tree_node *entry = key;
447 if (arg->last) {
448 int n = common_prefix_size(&entry->hash, arg->last);
449 if (n > arg->max) {
450 arg->max = n;
451 }
452 }
453 arg->last = &entry->hash;
454 }
455
456 struct write_record_arg {
457 struct reftable_writer *w;
458 int err;
459 };
460
461 static void write_object_record(void *void_arg, void *key)
462 {
463 struct write_record_arg *arg = void_arg;
464 struct obj_index_tree_node *entry = key;
465 struct reftable_obj_record obj_rec = {
466 .hash_prefix = (uint8_t *)entry->hash.buf,
467 .hash_prefix_len = arg->w->stats.object_id_len,
468 .offsets = entry->offsets,
469 .offset_len = entry->offset_len,
470 };
471 struct reftable_record rec = { NULL };
472 if (arg->err < 0)
473 goto done;
474
475 reftable_record_from_obj(&rec, &obj_rec);
476 arg->err = block_writer_add(arg->w->block_writer, &rec);
477 if (arg->err == 0)
478 goto done;
479
480 arg->err = writer_flush_block(arg->w);
481 if (arg->err < 0)
482 goto done;
483
484 writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
485 arg->err = block_writer_add(arg->w->block_writer, &rec);
486 if (arg->err == 0)
487 goto done;
488 obj_rec.offset_len = 0;
489 arg->err = block_writer_add(arg->w->block_writer, &rec);
490
491 /* Should be able to write into a fresh block. */
492 assert(arg->err == 0);
493
494 done:;
495 }
496
497 static void object_record_free(void *void_arg, void *key)
498 {
499 struct obj_index_tree_node *entry = key;
500
501 FREE_AND_NULL(entry->offsets);
502 strbuf_release(&entry->hash);
503 reftable_free(entry);
504 }
505
506 static int writer_dump_object_index(struct reftable_writer *w)
507 {
508 struct write_record_arg closure = { .w = w };
509 struct common_prefix_arg common = { NULL };
510 if (w->obj_index_tree) {
511 infix_walk(w->obj_index_tree, &update_common, &common);
512 }
513 w->stats.object_id_len = common.max + 1;
514
515 writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
516
517 if (w->obj_index_tree) {
518 infix_walk(w->obj_index_tree, &write_object_record, &closure);
519 }
520
521 if (closure.err < 0)
522 return closure.err;
523 return writer_finish_section(w);
524 }
525
526 static int writer_finish_public_section(struct reftable_writer *w)
527 {
528 uint8_t typ = 0;
529 int err = 0;
530
531 if (w->block_writer == NULL)
532 return 0;
533
534 typ = block_writer_type(w->block_writer);
535 err = writer_finish_section(w);
536 if (err < 0)
537 return err;
538 if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
539 w->stats.ref_stats.index_blocks > 0) {
540 err = writer_dump_object_index(w);
541 if (err < 0)
542 return err;
543 }
544
545 if (w->obj_index_tree) {
546 infix_walk(w->obj_index_tree, &object_record_free, NULL);
547 tree_free(w->obj_index_tree);
548 w->obj_index_tree = NULL;
549 }
550
551 w->block_writer = NULL;
552 return 0;
553 }
554
555 int reftable_writer_close(struct reftable_writer *w)
556 {
557 uint8_t footer[72];
558 uint8_t *p = footer;
559 int err = writer_finish_public_section(w);
560 int empty_table = w->next == 0;
561 if (err != 0)
562 goto done;
563 w->pending_padding = 0;
564 if (empty_table) {
565 /* Empty tables need a header anyway. */
566 uint8_t header[28];
567 int n = writer_write_header(w, header);
568 err = padded_write(w, header, n, 0);
569 if (err < 0)
570 goto done;
571 }
572
573 p += writer_write_header(w, footer);
574 put_be64(p, w->stats.ref_stats.index_offset);
575 p += 8;
576 put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
577 p += 8;
578 put_be64(p, w->stats.obj_stats.index_offset);
579 p += 8;
580
581 put_be64(p, w->stats.log_stats.offset);
582 p += 8;
583 put_be64(p, w->stats.log_stats.index_offset);
584 p += 8;
585
586 put_be32(p, crc32(0, footer, p - footer));
587 p += 4;
588
589 err = padded_write(w, footer, footer_size(writer_version(w)), 0);
590 if (err < 0)
591 goto done;
592
593 if (empty_table) {
594 err = REFTABLE_EMPTY_TABLE_ERROR;
595 goto done;
596 }
597
598 done:
599 /* free up memory. */
600 block_writer_release(&w->block_writer_data);
601 writer_clear_index(w);
602 strbuf_release(&w->last_key);
603 return err;
604 }
605
606 static void writer_clear_index(struct reftable_writer *w)
607 {
608 int i = 0;
609 for (i = 0; i < w->index_len; i++) {
610 strbuf_release(&w->index[i].last_key);
611 }
612
613 FREE_AND_NULL(w->index);
614 w->index_len = 0;
615 w->index_cap = 0;
616 }
617
618 static const int debug = 0;
619
620 static int writer_flush_nonempty_block(struct reftable_writer *w)
621 {
622 uint8_t typ = block_writer_type(w->block_writer);
623 struct reftable_block_stats *bstats =
624 writer_reftable_block_stats(w, typ);
625 uint64_t block_typ_off = (bstats->blocks == 0) ? w->next : 0;
626 int raw_bytes = block_writer_finish(w->block_writer);
627 int padding = 0;
628 int err = 0;
629 struct reftable_index_record ir = { .last_key = STRBUF_INIT };
630 if (raw_bytes < 0)
631 return raw_bytes;
632
633 if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG) {
634 padding = w->opts.block_size - raw_bytes;
635 }
636
637 if (block_typ_off > 0) {
638 bstats->offset = block_typ_off;
639 }
640
641 bstats->entries += w->block_writer->entries;
642 bstats->restarts += w->block_writer->restart_len;
643 bstats->blocks++;
644 w->stats.blocks++;
645
646 if (debug) {
647 fprintf(stderr, "block %c off %" PRIu64 " sz %d (%d)\n", typ,
648 w->next, raw_bytes,
649 get_be24(w->block + w->block_writer->header_off + 1));
650 }
651
652 if (w->next == 0) {
653 writer_write_header(w, w->block);
654 }
655
656 err = padded_write(w, w->block, raw_bytes, padding);
657 if (err < 0)
658 return err;
659
660 if (w->index_cap == w->index_len) {
661 w->index_cap = 2 * w->index_cap + 1;
662 w->index = reftable_realloc(
663 w->index,
664 sizeof(struct reftable_index_record) * w->index_cap);
665 }
666
667 ir.offset = w->next;
668 strbuf_reset(&ir.last_key);
669 strbuf_addbuf(&ir.last_key, &w->block_writer->last_key);
670 w->index[w->index_len] = ir;
671
672 w->index_len++;
673 w->next += padding + raw_bytes;
674 w->block_writer = NULL;
675 return 0;
676 }
677
678 static int writer_flush_block(struct reftable_writer *w)
679 {
680 if (w->block_writer == NULL)
681 return 0;
682 if (w->block_writer->entries == 0)
683 return 0;
684 return writer_flush_nonempty_block(w);
685 }
686
687 const struct reftable_stats *writer_stats(struct reftable_writer *w)
688 {
689 return &w->stats;
690 }