]> git.ipfire.org Git - thirdparty/git.git/blob - reftable/stack.c
reftable: fix resource warning
[thirdparty/git.git] / reftable / stack.c
1 /*
2 Copyright 2020 Google LLC
3
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
8
9 #include "stack.h"
10
11 #include "system.h"
12 #include "merged.h"
13 #include "reader.h"
14 #include "refname.h"
15 #include "reftable-error.h"
16 #include "reftable-record.h"
17 #include "reftable-merged.h"
18 #include "writer.h"
19
20 static int stack_try_add(struct reftable_stack *st,
21 int (*write_table)(struct reftable_writer *wr,
22 void *arg),
23 void *arg);
24 static int stack_write_compact(struct reftable_stack *st,
25 struct reftable_writer *wr, int first, int last,
26 struct reftable_log_expiry_config *config);
27 static int stack_check_addition(struct reftable_stack *st,
28 const char *new_tab_name);
29 static void reftable_addition_close(struct reftable_addition *add);
30 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
31 int reuse_open);
32
33 static void stack_filename(struct strbuf *dest, struct reftable_stack *st,
34 const char *name)
35 {
36 strbuf_reset(dest);
37 strbuf_addstr(dest, st->reftable_dir);
38 strbuf_addstr(dest, "/");
39 strbuf_addstr(dest, name);
40 }
41
42 static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
43 {
44 int *fdp = (int *)arg;
45 return write(*fdp, data, sz);
46 }
47
48 int reftable_new_stack(struct reftable_stack **dest, const char *dir,
49 struct reftable_write_options config)
50 {
51 struct reftable_stack *p =
52 reftable_calloc(sizeof(struct reftable_stack));
53 struct strbuf list_file_name = STRBUF_INIT;
54 int err = 0;
55
56 if (config.hash_id == 0) {
57 config.hash_id = GIT_SHA1_FORMAT_ID;
58 }
59
60 *dest = NULL;
61
62 strbuf_reset(&list_file_name);
63 strbuf_addstr(&list_file_name, dir);
64 strbuf_addstr(&list_file_name, "/tables.list");
65
66 p->list_file = strbuf_detach(&list_file_name, NULL);
67 p->reftable_dir = xstrdup(dir);
68 p->config = config;
69
70 err = reftable_stack_reload_maybe_reuse(p, 1);
71 if (err < 0) {
72 reftable_stack_destroy(p);
73 } else {
74 *dest = p;
75 }
76 return err;
77 }
78
79 static int fd_read_lines(int fd, char ***namesp)
80 {
81 off_t size = lseek(fd, 0, SEEK_END);
82 char *buf = NULL;
83 int err = 0;
84 if (size < 0) {
85 err = REFTABLE_IO_ERROR;
86 goto done;
87 }
88 err = lseek(fd, 0, SEEK_SET);
89 if (err < 0) {
90 err = REFTABLE_IO_ERROR;
91 goto done;
92 }
93
94 buf = reftable_malloc(size + 1);
95 if (read(fd, buf, size) != size) {
96 err = REFTABLE_IO_ERROR;
97 goto done;
98 }
99 buf[size] = 0;
100
101 parse_names(buf, size, namesp);
102
103 done:
104 reftable_free(buf);
105 return err;
106 }
107
108 int read_lines(const char *filename, char ***namesp)
109 {
110 int fd = open(filename, O_RDONLY);
111 int err = 0;
112 if (fd < 0) {
113 if (errno == ENOENT) {
114 *namesp = reftable_calloc(sizeof(char *));
115 return 0;
116 }
117
118 return REFTABLE_IO_ERROR;
119 }
120 err = fd_read_lines(fd, namesp);
121 close(fd);
122 return err;
123 }
124
125 struct reftable_merged_table *
126 reftable_stack_merged_table(struct reftable_stack *st)
127 {
128 return st->merged;
129 }
130
131 static int has_name(char **names, const char *name)
132 {
133 while (*names) {
134 if (!strcmp(*names, name))
135 return 1;
136 names++;
137 }
138 return 0;
139 }
140
141 /* Close and free the stack */
142 void reftable_stack_destroy(struct reftable_stack *st)
143 {
144 char **names = NULL;
145 int err = 0;
146 if (st->merged) {
147 reftable_merged_table_free(st->merged);
148 st->merged = NULL;
149 }
150
151 err = read_lines(st->list_file, &names);
152 if (err < 0) {
153 FREE_AND_NULL(names);
154 }
155
156 if (st->readers) {
157 int i = 0;
158 struct strbuf filename = STRBUF_INIT;
159 for (i = 0; i < st->readers_len; i++) {
160 const char *name = reader_name(st->readers[i]);
161 strbuf_reset(&filename);
162 if (names && !has_name(names, name)) {
163 stack_filename(&filename, st, name);
164 }
165 reftable_reader_free(st->readers[i]);
166
167 if (filename.len) {
168 /* On Windows, can only unlink after closing. */
169 unlink(filename.buf);
170 }
171 }
172 strbuf_release(&filename);
173 st->readers_len = 0;
174 FREE_AND_NULL(st->readers);
175 }
176 FREE_AND_NULL(st->list_file);
177 FREE_AND_NULL(st->reftable_dir);
178 reftable_free(st);
179 free_names(names);
180 }
181
182 static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
183 int cur_len)
184 {
185 struct reftable_reader **cur =
186 reftable_calloc(sizeof(struct reftable_reader *) * cur_len);
187 int i = 0;
188 for (i = 0; i < cur_len; i++) {
189 cur[i] = st->readers[i];
190 }
191 return cur;
192 }
193
194 static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
195 int reuse_open)
196 {
197 int cur_len = !st->merged ? 0 : st->merged->stack_len;
198 struct reftable_reader **cur = stack_copy_readers(st, cur_len);
199 int err = 0;
200 int names_len = names_length(names);
201 struct reftable_reader **new_readers =
202 reftable_calloc(sizeof(struct reftable_reader *) * names_len);
203 struct reftable_table *new_tables =
204 reftable_calloc(sizeof(struct reftable_table) * names_len);
205 int new_readers_len = 0;
206 struct reftable_merged_table *new_merged = NULL;
207 int i;
208
209 while (*names) {
210 struct reftable_reader *rd = NULL;
211 char *name = *names++;
212
213 /* this is linear; we assume compaction keeps the number of
214 tables under control so this is not quadratic. */
215 int j = 0;
216 for (j = 0; reuse_open && j < cur_len; j++) {
217 if (cur[j] && 0 == strcmp(cur[j]->name, name)) {
218 rd = cur[j];
219 cur[j] = NULL;
220 break;
221 }
222 }
223
224 if (!rd) {
225 struct reftable_block_source src = { NULL };
226 struct strbuf table_path = STRBUF_INIT;
227 stack_filename(&table_path, st, name);
228
229 err = reftable_block_source_from_file(&src,
230 table_path.buf);
231 strbuf_release(&table_path);
232
233 if (err < 0)
234 goto done;
235
236 err = reftable_new_reader(&rd, &src, name);
237 if (err < 0)
238 goto done;
239 }
240
241 new_readers[new_readers_len] = rd;
242 reftable_table_from_reader(&new_tables[new_readers_len], rd);
243 new_readers_len++;
244 }
245
246 /* success! */
247 err = reftable_new_merged_table(&new_merged, new_tables,
248 new_readers_len, st->config.hash_id);
249 if (err < 0)
250 goto done;
251
252 new_tables = NULL;
253 st->readers_len = new_readers_len;
254 if (st->merged) {
255 merged_table_release(st->merged);
256 reftable_merged_table_free(st->merged);
257 }
258 if (st->readers) {
259 reftable_free(st->readers);
260 }
261 st->readers = new_readers;
262 new_readers = NULL;
263 new_readers_len = 0;
264
265 new_merged->suppress_deletions = 1;
266 st->merged = new_merged;
267 for (i = 0; i < cur_len; i++) {
268 if (cur[i]) {
269 const char *name = reader_name(cur[i]);
270 struct strbuf filename = STRBUF_INIT;
271 stack_filename(&filename, st, name);
272
273 reader_close(cur[i]);
274 reftable_reader_free(cur[i]);
275
276 /* On Windows, can only unlink after closing. */
277 unlink(filename.buf);
278
279 strbuf_release(&filename);
280 }
281 }
282
283 done:
284 for (i = 0; i < new_readers_len; i++) {
285 reader_close(new_readers[i]);
286 reftable_reader_free(new_readers[i]);
287 }
288 reftable_free(new_readers);
289 reftable_free(new_tables);
290 reftable_free(cur);
291 return err;
292 }
293
294 /* return negative if a before b. */
295 static int tv_cmp(struct timeval *a, struct timeval *b)
296 {
297 time_t diff = a->tv_sec - b->tv_sec;
298 int udiff = a->tv_usec - b->tv_usec;
299
300 if (diff != 0)
301 return diff;
302
303 return udiff;
304 }
305
306 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
307 int reuse_open)
308 {
309 struct timeval deadline = { 0 };
310 int err = gettimeofday(&deadline, NULL);
311 int64_t delay = 0;
312 int tries = 0;
313 if (err < 0)
314 return err;
315
316 deadline.tv_sec += 3;
317 while (1) {
318 char **names = NULL;
319 char **names_after = NULL;
320 struct timeval now = { 0 };
321 int err = gettimeofday(&now, NULL);
322 int err2 = 0;
323 if (err < 0) {
324 return err;
325 }
326
327 /* Only look at deadlines after the first few times. This
328 simplifies debugging in GDB */
329 tries++;
330 if (tries > 3 && tv_cmp(&now, &deadline) >= 0) {
331 break;
332 }
333
334 err = read_lines(st->list_file, &names);
335 if (err < 0) {
336 free_names(names);
337 return err;
338 }
339 err = reftable_stack_reload_once(st, names, reuse_open);
340 if (err == 0) {
341 free_names(names);
342 break;
343 }
344 if (err != REFTABLE_NOT_EXIST_ERROR) {
345 free_names(names);
346 return err;
347 }
348
349 /* err == REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
350 writer. Check if there was one by checking if the name list
351 changed.
352 */
353 err2 = read_lines(st->list_file, &names_after);
354 if (err2 < 0) {
355 free_names(names);
356 return err2;
357 }
358
359 if (names_equal(names_after, names)) {
360 free_names(names);
361 free_names(names_after);
362 return err;
363 }
364 free_names(names);
365 free_names(names_after);
366
367 delay = delay + (delay * rand()) / RAND_MAX + 1;
368 sleep_millisec(delay);
369 }
370
371 return 0;
372 }
373
374 /* -1 = error
375 0 = up to date
376 1 = changed. */
377 static int stack_uptodate(struct reftable_stack *st)
378 {
379 char **names = NULL;
380 int err = read_lines(st->list_file, &names);
381 int i = 0;
382 if (err < 0)
383 return err;
384
385 for (i = 0; i < st->readers_len; i++) {
386 if (!names[i]) {
387 err = 1;
388 goto done;
389 }
390
391 if (strcmp(st->readers[i]->name, names[i])) {
392 err = 1;
393 goto done;
394 }
395 }
396
397 if (names[st->merged->stack_len]) {
398 err = 1;
399 goto done;
400 }
401
402 done:
403 free_names(names);
404 return err;
405 }
406
407 int reftable_stack_reload(struct reftable_stack *st)
408 {
409 int err = stack_uptodate(st);
410 if (err > 0)
411 return reftable_stack_reload_maybe_reuse(st, 1);
412 return err;
413 }
414
415 int reftable_stack_add(struct reftable_stack *st,
416 int (*write)(struct reftable_writer *wr, void *arg),
417 void *arg)
418 {
419 int err = stack_try_add(st, write, arg);
420 if (err < 0) {
421 if (err == REFTABLE_LOCK_ERROR) {
422 /* Ignore error return, we want to propagate
423 REFTABLE_LOCK_ERROR.
424 */
425 reftable_stack_reload(st);
426 }
427 return err;
428 }
429
430 if (!st->disable_auto_compact)
431 return reftable_stack_auto_compact(st);
432
433 return 0;
434 }
435
436 static void format_name(struct strbuf *dest, uint64_t min, uint64_t max)
437 {
438 char buf[100];
439 uint32_t rnd = (uint32_t)rand();
440 snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
441 min, max, rnd);
442 strbuf_reset(dest);
443 strbuf_addstr(dest, buf);
444 }
445
446 struct reftable_addition {
447 int lock_file_fd;
448 struct strbuf lock_file_name;
449 struct reftable_stack *stack;
450
451 char **new_tables;
452 int new_tables_len;
453 uint64_t next_update_index;
454 };
455
456 #define REFTABLE_ADDITION_INIT \
457 { \
458 .lock_file_name = STRBUF_INIT \
459 }
460
461 static int reftable_stack_init_addition(struct reftable_addition *add,
462 struct reftable_stack *st)
463 {
464 int err = 0;
465 add->stack = st;
466
467 strbuf_reset(&add->lock_file_name);
468 strbuf_addstr(&add->lock_file_name, st->list_file);
469 strbuf_addstr(&add->lock_file_name, ".lock");
470
471 add->lock_file_fd = open(add->lock_file_name.buf,
472 O_EXCL | O_CREAT | O_WRONLY, 0644);
473 if (add->lock_file_fd < 0) {
474 if (errno == EEXIST) {
475 err = REFTABLE_LOCK_ERROR;
476 } else {
477 err = REFTABLE_IO_ERROR;
478 }
479 goto done;
480 }
481 err = stack_uptodate(st);
482 if (err < 0)
483 goto done;
484
485 if (err > 1) {
486 err = REFTABLE_LOCK_ERROR;
487 goto done;
488 }
489
490 add->next_update_index = reftable_stack_next_update_index(st);
491 done:
492 if (err) {
493 reftable_addition_close(add);
494 }
495 return err;
496 }
497
498 static void reftable_addition_close(struct reftable_addition *add)
499 {
500 int i = 0;
501 struct strbuf nm = STRBUF_INIT;
502 for (i = 0; i < add->new_tables_len; i++) {
503 stack_filename(&nm, add->stack, add->new_tables[i]);
504 unlink(nm.buf);
505 reftable_free(add->new_tables[i]);
506 add->new_tables[i] = NULL;
507 }
508 reftable_free(add->new_tables);
509 add->new_tables = NULL;
510 add->new_tables_len = 0;
511
512 if (add->lock_file_fd > 0) {
513 close(add->lock_file_fd);
514 add->lock_file_fd = 0;
515 }
516 if (add->lock_file_name.len > 0) {
517 unlink(add->lock_file_name.buf);
518 strbuf_release(&add->lock_file_name);
519 }
520
521 strbuf_release(&nm);
522 }
523
524 void reftable_addition_destroy(struct reftable_addition *add)
525 {
526 if (!add) {
527 return;
528 }
529 reftable_addition_close(add);
530 reftable_free(add);
531 }
532
533 int reftable_addition_commit(struct reftable_addition *add)
534 {
535 struct strbuf table_list = STRBUF_INIT;
536 int i = 0;
537 int err = 0;
538 if (add->new_tables_len == 0)
539 goto done;
540
541 for (i = 0; i < add->stack->merged->stack_len; i++) {
542 strbuf_addstr(&table_list, add->stack->readers[i]->name);
543 strbuf_addstr(&table_list, "\n");
544 }
545 for (i = 0; i < add->new_tables_len; i++) {
546 strbuf_addstr(&table_list, add->new_tables[i]);
547 strbuf_addstr(&table_list, "\n");
548 }
549
550 err = write(add->lock_file_fd, table_list.buf, table_list.len);
551 strbuf_release(&table_list);
552 if (err < 0) {
553 err = REFTABLE_IO_ERROR;
554 goto done;
555 }
556
557 err = close(add->lock_file_fd);
558 add->lock_file_fd = 0;
559 if (err < 0) {
560 err = REFTABLE_IO_ERROR;
561 goto done;
562 }
563
564 err = rename(add->lock_file_name.buf, add->stack->list_file);
565 if (err < 0) {
566 err = REFTABLE_IO_ERROR;
567 goto done;
568 }
569
570 /* success, no more state to clean up. */
571 strbuf_release(&add->lock_file_name);
572 for (i = 0; i < add->new_tables_len; i++) {
573 reftable_free(add->new_tables[i]);
574 }
575 reftable_free(add->new_tables);
576 add->new_tables = NULL;
577 add->new_tables_len = 0;
578
579 err = reftable_stack_reload(add->stack);
580 done:
581 reftable_addition_close(add);
582 return err;
583 }
584
585 int reftable_stack_new_addition(struct reftable_addition **dest,
586 struct reftable_stack *st)
587 {
588 int err = 0;
589 struct reftable_addition empty = REFTABLE_ADDITION_INIT;
590 *dest = reftable_calloc(sizeof(**dest));
591 **dest = empty;
592 err = reftable_stack_init_addition(*dest, st);
593 if (err) {
594 reftable_free(*dest);
595 *dest = NULL;
596 }
597 return err;
598 }
599
600 static int stack_try_add(struct reftable_stack *st,
601 int (*write_table)(struct reftable_writer *wr,
602 void *arg),
603 void *arg)
604 {
605 struct reftable_addition add = REFTABLE_ADDITION_INIT;
606 int err = reftable_stack_init_addition(&add, st);
607 if (err < 0)
608 goto done;
609 if (err > 0) {
610 err = REFTABLE_LOCK_ERROR;
611 goto done;
612 }
613
614 err = reftable_addition_add(&add, write_table, arg);
615 if (err < 0)
616 goto done;
617
618 err = reftable_addition_commit(&add);
619 done:
620 reftable_addition_close(&add);
621 return err;
622 }
623
624 int reftable_addition_add(struct reftable_addition *add,
625 int (*write_table)(struct reftable_writer *wr,
626 void *arg),
627 void *arg)
628 {
629 struct strbuf temp_tab_file_name = STRBUF_INIT;
630 struct strbuf tab_file_name = STRBUF_INIT;
631 struct strbuf next_name = STRBUF_INIT;
632 struct reftable_writer *wr = NULL;
633 int err = 0;
634 int tab_fd = 0;
635
636 strbuf_reset(&next_name);
637 format_name(&next_name, add->next_update_index, add->next_update_index);
638
639 stack_filename(&temp_tab_file_name, add->stack, next_name.buf);
640 strbuf_addstr(&temp_tab_file_name, ".temp.XXXXXX");
641
642 tab_fd = mkstemp(temp_tab_file_name.buf);
643 if (tab_fd < 0) {
644 err = REFTABLE_IO_ERROR;
645 goto done;
646 }
647
648 wr = reftable_new_writer(reftable_fd_write, &tab_fd,
649 &add->stack->config);
650 err = write_table(wr, arg);
651 if (err < 0)
652 goto done;
653
654 err = reftable_writer_close(wr);
655 if (err == REFTABLE_EMPTY_TABLE_ERROR) {
656 err = 0;
657 goto done;
658 }
659 if (err < 0)
660 goto done;
661
662 err = close(tab_fd);
663 tab_fd = 0;
664 if (err < 0) {
665 err = REFTABLE_IO_ERROR;
666 goto done;
667 }
668
669 err = stack_check_addition(add->stack, temp_tab_file_name.buf);
670 if (err < 0)
671 goto done;
672
673 if (wr->min_update_index < add->next_update_index) {
674 err = REFTABLE_API_ERROR;
675 goto done;
676 }
677
678 format_name(&next_name, wr->min_update_index, wr->max_update_index);
679 strbuf_addstr(&next_name, ".ref");
680
681 stack_filename(&tab_file_name, add->stack, next_name.buf);
682
683 /*
684 On windows, this relies on rand() picking a unique destination name.
685 Maybe we should do retry loop as well?
686 */
687 err = rename(temp_tab_file_name.buf, tab_file_name.buf);
688 if (err < 0) {
689 err = REFTABLE_IO_ERROR;
690 goto done;
691 }
692
693 add->new_tables = reftable_realloc(add->new_tables,
694 sizeof(*add->new_tables) *
695 (add->new_tables_len + 1));
696 add->new_tables[add->new_tables_len] = strbuf_detach(&next_name, NULL);
697 add->new_tables_len++;
698 done:
699 if (tab_fd > 0) {
700 close(tab_fd);
701 tab_fd = 0;
702 }
703 if (temp_tab_file_name.len > 0) {
704 unlink(temp_tab_file_name.buf);
705 }
706
707 strbuf_release(&temp_tab_file_name);
708 strbuf_release(&tab_file_name);
709 strbuf_release(&next_name);
710 reftable_writer_free(wr);
711 return err;
712 }
713
714 uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
715 {
716 int sz = st->merged->stack_len;
717 if (sz > 0)
718 return reftable_reader_max_update_index(st->readers[sz - 1]) +
719 1;
720 return 1;
721 }
722
723 static int stack_compact_locked(struct reftable_stack *st, int first, int last,
724 struct strbuf *temp_tab,
725 struct reftable_log_expiry_config *config)
726 {
727 struct strbuf next_name = STRBUF_INIT;
728 int tab_fd = -1;
729 struct reftable_writer *wr = NULL;
730 int err = 0;
731
732 format_name(&next_name,
733 reftable_reader_min_update_index(st->readers[first]),
734 reftable_reader_max_update_index(st->readers[last]));
735
736 stack_filename(temp_tab, st, next_name.buf);
737 strbuf_addstr(temp_tab, ".temp.XXXXXX");
738
739 tab_fd = mkstemp(temp_tab->buf);
740 wr = reftable_new_writer(reftable_fd_write, &tab_fd, &st->config);
741
742 err = stack_write_compact(st, wr, first, last, config);
743 if (err < 0)
744 goto done;
745 err = reftable_writer_close(wr);
746 if (err < 0)
747 goto done;
748
749 err = close(tab_fd);
750 tab_fd = 0;
751
752 done:
753 reftable_writer_free(wr);
754 if (tab_fd > 0) {
755 close(tab_fd);
756 tab_fd = 0;
757 }
758 if (err != 0 && temp_tab->len > 0) {
759 unlink(temp_tab->buf);
760 strbuf_release(temp_tab);
761 }
762 strbuf_release(&next_name);
763 return err;
764 }
765
766 static int stack_write_compact(struct reftable_stack *st,
767 struct reftable_writer *wr, int first, int last,
768 struct reftable_log_expiry_config *config)
769 {
770 int subtabs_len = last - first + 1;
771 struct reftable_table *subtabs = reftable_calloc(
772 sizeof(struct reftable_table) * (last - first + 1));
773 struct reftable_merged_table *mt = NULL;
774 int err = 0;
775 struct reftable_iterator it = { NULL };
776 struct reftable_ref_record ref = { NULL };
777 struct reftable_log_record log = { NULL };
778
779 uint64_t entries = 0;
780
781 int i = 0, j = 0;
782 for (i = first, j = 0; i <= last; i++) {
783 struct reftable_reader *t = st->readers[i];
784 reftable_table_from_reader(&subtabs[j++], t);
785 st->stats.bytes += t->size;
786 }
787 reftable_writer_set_limits(wr, st->readers[first]->min_update_index,
788 st->readers[last]->max_update_index);
789
790 err = reftable_new_merged_table(&mt, subtabs, subtabs_len,
791 st->config.hash_id);
792 if (err < 0) {
793 reftable_free(subtabs);
794 goto done;
795 }
796
797 err = reftable_merged_table_seek_ref(mt, &it, "");
798 if (err < 0)
799 goto done;
800
801 while (1) {
802 err = reftable_iterator_next_ref(&it, &ref);
803 if (err > 0) {
804 err = 0;
805 break;
806 }
807 if (err < 0) {
808 break;
809 }
810
811 if (first == 0 && reftable_ref_record_is_deletion(&ref)) {
812 continue;
813 }
814
815 err = reftable_writer_add_ref(wr, &ref);
816 if (err < 0) {
817 break;
818 }
819 entries++;
820 }
821 reftable_iterator_destroy(&it);
822
823 err = reftable_merged_table_seek_log(mt, &it, "");
824 if (err < 0)
825 goto done;
826
827 while (1) {
828 err = reftable_iterator_next_log(&it, &log);
829 if (err > 0) {
830 err = 0;
831 break;
832 }
833 if (err < 0) {
834 break;
835 }
836 if (first == 0 && reftable_log_record_is_deletion(&log)) {
837 continue;
838 }
839
840 if (config && config->min_update_index > 0 &&
841 log.update_index < config->min_update_index) {
842 continue;
843 }
844
845 if (config && config->time > 0 &&
846 log.value.update.time < config->time) {
847 continue;
848 }
849
850 err = reftable_writer_add_log(wr, &log);
851 if (err < 0) {
852 break;
853 }
854 entries++;
855 }
856
857 done:
858 reftable_iterator_destroy(&it);
859 if (mt) {
860 merged_table_release(mt);
861 reftable_merged_table_free(mt);
862 }
863 reftable_ref_record_release(&ref);
864 reftable_log_record_release(&log);
865 st->stats.entries_written += entries;
866 return err;
867 }
868
869 /* < 0: error. 0 == OK, > 0 attempt failed; could retry. */
870 static int stack_compact_range(struct reftable_stack *st, int first, int last,
871 struct reftable_log_expiry_config *expiry)
872 {
873 struct strbuf temp_tab_file_name = STRBUF_INIT;
874 struct strbuf new_table_name = STRBUF_INIT;
875 struct strbuf lock_file_name = STRBUF_INIT;
876 struct strbuf ref_list_contents = STRBUF_INIT;
877 struct strbuf new_table_path = STRBUF_INIT;
878 int err = 0;
879 int have_lock = 0;
880 int lock_file_fd = -1;
881 int compact_count = last - first + 1;
882 char **listp = NULL;
883 char **delete_on_success =
884 reftable_calloc(sizeof(char *) * (compact_count + 1));
885 char **subtable_locks =
886 reftable_calloc(sizeof(char *) * (compact_count + 1));
887 int i = 0;
888 int j = 0;
889 int is_empty_table = 0;
890
891 if (first > last || (!expiry && first == last)) {
892 err = 0;
893 goto done;
894 }
895
896 st->stats.attempts++;
897
898 strbuf_reset(&lock_file_name);
899 strbuf_addstr(&lock_file_name, st->list_file);
900 strbuf_addstr(&lock_file_name, ".lock");
901
902 lock_file_fd =
903 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0644);
904 if (lock_file_fd < 0) {
905 if (errno == EEXIST) {
906 err = 1;
907 } else {
908 err = REFTABLE_IO_ERROR;
909 }
910 goto done;
911 }
912 /* Don't want to write to the lock for now. */
913 close(lock_file_fd);
914 lock_file_fd = -1;
915
916 have_lock = 1;
917 err = stack_uptodate(st);
918 if (err != 0)
919 goto done;
920
921 for (i = first, j = 0; i <= last; i++) {
922 struct strbuf subtab_file_name = STRBUF_INIT;
923 struct strbuf subtab_lock = STRBUF_INIT;
924 int sublock_file_fd = -1;
925
926 stack_filename(&subtab_file_name, st,
927 reader_name(st->readers[i]));
928
929 strbuf_reset(&subtab_lock);
930 strbuf_addbuf(&subtab_lock, &subtab_file_name);
931 strbuf_addstr(&subtab_lock, ".lock");
932
933 sublock_file_fd = open(subtab_lock.buf,
934 O_EXCL | O_CREAT | O_WRONLY, 0644);
935 if (sublock_file_fd > 0) {
936 close(sublock_file_fd);
937 } else if (sublock_file_fd < 0) {
938 if (errno == EEXIST) {
939 err = 1;
940 } else {
941 err = REFTABLE_IO_ERROR;
942 }
943 }
944
945 subtable_locks[j] = subtab_lock.buf;
946 delete_on_success[j] = subtab_file_name.buf;
947 j++;
948
949 if (err != 0)
950 goto done;
951 }
952
953 err = unlink(lock_file_name.buf);
954 if (err < 0)
955 goto done;
956 have_lock = 0;
957
958 err = stack_compact_locked(st, first, last, &temp_tab_file_name,
959 expiry);
960 /* Compaction + tombstones can create an empty table out of non-empty
961 * tables. */
962 is_empty_table = (err == REFTABLE_EMPTY_TABLE_ERROR);
963 if (is_empty_table) {
964 err = 0;
965 }
966 if (err < 0)
967 goto done;
968
969 lock_file_fd =
970 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0644);
971 if (lock_file_fd < 0) {
972 if (errno == EEXIST) {
973 err = 1;
974 } else {
975 err = REFTABLE_IO_ERROR;
976 }
977 goto done;
978 }
979 have_lock = 1;
980
981 format_name(&new_table_name, st->readers[first]->min_update_index,
982 st->readers[last]->max_update_index);
983 strbuf_addstr(&new_table_name, ".ref");
984
985 stack_filename(&new_table_path, st, new_table_name.buf);
986
987 if (!is_empty_table) {
988 /* retry? */
989 err = rename(temp_tab_file_name.buf, new_table_path.buf);
990 if (err < 0) {
991 err = REFTABLE_IO_ERROR;
992 goto done;
993 }
994 }
995
996 for (i = 0; i < first; i++) {
997 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
998 strbuf_addstr(&ref_list_contents, "\n");
999 }
1000 if (!is_empty_table) {
1001 strbuf_addbuf(&ref_list_contents, &new_table_name);
1002 strbuf_addstr(&ref_list_contents, "\n");
1003 }
1004 for (i = last + 1; i < st->merged->stack_len; i++) {
1005 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
1006 strbuf_addstr(&ref_list_contents, "\n");
1007 }
1008
1009 err = write(lock_file_fd, ref_list_contents.buf, ref_list_contents.len);
1010 if (err < 0) {
1011 err = REFTABLE_IO_ERROR;
1012 unlink(new_table_path.buf);
1013 goto done;
1014 }
1015 err = close(lock_file_fd);
1016 lock_file_fd = -1;
1017 if (err < 0) {
1018 err = REFTABLE_IO_ERROR;
1019 unlink(new_table_path.buf);
1020 goto done;
1021 }
1022
1023 err = rename(lock_file_name.buf, st->list_file);
1024 if (err < 0) {
1025 err = REFTABLE_IO_ERROR;
1026 unlink(new_table_path.buf);
1027 goto done;
1028 }
1029 have_lock = 0;
1030
1031 /* Reload the stack before deleting. On windows, we can only delete the
1032 files after we closed them.
1033 */
1034 err = reftable_stack_reload_maybe_reuse(st, first < last);
1035
1036 listp = delete_on_success;
1037 while (*listp) {
1038 if (strcmp(*listp, new_table_path.buf)) {
1039 unlink(*listp);
1040 }
1041 listp++;
1042 }
1043
1044 done:
1045 free_names(delete_on_success);
1046
1047 listp = subtable_locks;
1048 while (*listp) {
1049 unlink(*listp);
1050 listp++;
1051 }
1052 free_names(subtable_locks);
1053 if (lock_file_fd >= 0) {
1054 close(lock_file_fd);
1055 lock_file_fd = -1;
1056 }
1057 if (have_lock) {
1058 unlink(lock_file_name.buf);
1059 }
1060 strbuf_release(&new_table_name);
1061 strbuf_release(&new_table_path);
1062 strbuf_release(&ref_list_contents);
1063 strbuf_release(&temp_tab_file_name);
1064 strbuf_release(&lock_file_name);
1065 return err;
1066 }
1067
1068 int reftable_stack_compact_all(struct reftable_stack *st,
1069 struct reftable_log_expiry_config *config)
1070 {
1071 return stack_compact_range(st, 0, st->merged->stack_len - 1, config);
1072 }
1073
1074 static int stack_compact_range_stats(struct reftable_stack *st, int first,
1075 int last,
1076 struct reftable_log_expiry_config *config)
1077 {
1078 int err = stack_compact_range(st, first, last, config);
1079 if (err > 0) {
1080 st->stats.failures++;
1081 }
1082 return err;
1083 }
1084
1085 static int segment_size(struct segment *s)
1086 {
1087 return s->end - s->start;
1088 }
1089
1090 int fastlog2(uint64_t sz)
1091 {
1092 int l = 0;
1093 if (sz == 0)
1094 return 0;
1095 for (; sz; sz /= 2) {
1096 l++;
1097 }
1098 return l - 1;
1099 }
1100
1101 struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n)
1102 {
1103 struct segment *segs = reftable_calloc(sizeof(struct segment) * n);
1104 int next = 0;
1105 struct segment cur = { 0 };
1106 int i = 0;
1107
1108 if (n == 0) {
1109 *seglen = 0;
1110 return segs;
1111 }
1112 for (i = 0; i < n; i++) {
1113 int log = fastlog2(sizes[i]);
1114 if (cur.log != log && cur.bytes > 0) {
1115 struct segment fresh = {
1116 .start = i,
1117 };
1118
1119 segs[next++] = cur;
1120 cur = fresh;
1121 }
1122
1123 cur.log = log;
1124 cur.end = i + 1;
1125 cur.bytes += sizes[i];
1126 }
1127 segs[next++] = cur;
1128 *seglen = next;
1129 return segs;
1130 }
1131
1132 struct segment suggest_compaction_segment(uint64_t *sizes, int n)
1133 {
1134 int seglen = 0;
1135 struct segment *segs = sizes_to_segments(&seglen, sizes, n);
1136 struct segment min_seg = {
1137 .log = 64,
1138 };
1139 int i = 0;
1140 for (i = 0; i < seglen; i++) {
1141 if (segment_size(&segs[i]) == 1) {
1142 continue;
1143 }
1144
1145 if (segs[i].log < min_seg.log) {
1146 min_seg = segs[i];
1147 }
1148 }
1149
1150 while (min_seg.start > 0) {
1151 int prev = min_seg.start - 1;
1152 if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev])) {
1153 break;
1154 }
1155
1156 min_seg.start = prev;
1157 min_seg.bytes += sizes[prev];
1158 }
1159
1160 reftable_free(segs);
1161 return min_seg;
1162 }
1163
1164 static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
1165 {
1166 uint64_t *sizes =
1167 reftable_calloc(sizeof(uint64_t) * st->merged->stack_len);
1168 int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
1169 int overhead = header_size(version) - 1;
1170 int i = 0;
1171 for (i = 0; i < st->merged->stack_len; i++) {
1172 sizes[i] = st->readers[i]->size - overhead;
1173 }
1174 return sizes;
1175 }
1176
1177 int reftable_stack_auto_compact(struct reftable_stack *st)
1178 {
1179 uint64_t *sizes = stack_table_sizes_for_compaction(st);
1180 struct segment seg =
1181 suggest_compaction_segment(sizes, st->merged->stack_len);
1182 reftable_free(sizes);
1183 if (segment_size(&seg) > 0)
1184 return stack_compact_range_stats(st, seg.start, seg.end - 1,
1185 NULL);
1186
1187 return 0;
1188 }
1189
1190 struct reftable_compaction_stats *
1191 reftable_stack_compaction_stats(struct reftable_stack *st)
1192 {
1193 return &st->stats;
1194 }
1195
1196 int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
1197 struct reftable_ref_record *ref)
1198 {
1199 struct reftable_table tab = { NULL };
1200 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1201 return reftable_table_read_ref(&tab, refname, ref);
1202 }
1203
1204 int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
1205 struct reftable_log_record *log)
1206 {
1207 struct reftable_iterator it = { NULL };
1208 struct reftable_merged_table *mt = reftable_stack_merged_table(st);
1209 int err = reftable_merged_table_seek_log(mt, &it, refname);
1210 if (err)
1211 goto done;
1212
1213 err = reftable_iterator_next_log(&it, log);
1214 if (err)
1215 goto done;
1216
1217 if (strcmp(log->refname, refname) ||
1218 reftable_log_record_is_deletion(log)) {
1219 err = 1;
1220 goto done;
1221 }
1222
1223 done:
1224 if (err) {
1225 reftable_log_record_release(log);
1226 }
1227 reftable_iterator_destroy(&it);
1228 return err;
1229 }
1230
1231 static int stack_check_addition(struct reftable_stack *st,
1232 const char *new_tab_name)
1233 {
1234 int err = 0;
1235 struct reftable_block_source src = { NULL };
1236 struct reftable_reader *rd = NULL;
1237 struct reftable_table tab = { NULL };
1238 struct reftable_ref_record *refs = NULL;
1239 struct reftable_iterator it = { NULL };
1240 int cap = 0;
1241 int len = 0;
1242 int i = 0;
1243
1244 if (st->config.skip_name_check)
1245 return 0;
1246
1247 err = reftable_block_source_from_file(&src, new_tab_name);
1248 if (err < 0)
1249 goto done;
1250
1251 err = reftable_new_reader(&rd, &src, new_tab_name);
1252 if (err < 0)
1253 goto done;
1254
1255 err = reftable_reader_seek_ref(rd, &it, "");
1256 if (err > 0) {
1257 err = 0;
1258 goto done;
1259 }
1260 if (err < 0)
1261 goto done;
1262
1263 while (1) {
1264 struct reftable_ref_record ref = { NULL };
1265 err = reftable_iterator_next_ref(&it, &ref);
1266 if (err > 0) {
1267 break;
1268 }
1269 if (err < 0)
1270 goto done;
1271
1272 if (len >= cap) {
1273 cap = 2 * cap + 1;
1274 refs = reftable_realloc(refs, cap * sizeof(refs[0]));
1275 }
1276
1277 refs[len++] = ref;
1278 }
1279
1280 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1281
1282 err = validate_ref_record_addition(tab, refs, len);
1283
1284 done:
1285 for (i = 0; i < len; i++) {
1286 reftable_ref_record_release(&refs[i]);
1287 }
1288
1289 free(refs);
1290 reftable_iterator_destroy(&it);
1291 reftable_reader_free(rd);
1292 return err;
1293 }
1294
1295 static int is_table_name(const char *s)
1296 {
1297 const char *dot = strrchr(s, '.');
1298 return dot && !strcmp(dot, ".ref");
1299 }
1300
1301 static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max,
1302 const char *name)
1303 {
1304 int err = 0;
1305 uint64_t update_idx = 0;
1306 struct reftable_block_source src = { NULL };
1307 struct reftable_reader *rd = NULL;
1308 struct strbuf table_path = STRBUF_INIT;
1309 stack_filename(&table_path, st, name);
1310
1311 err = reftable_block_source_from_file(&src, table_path.buf);
1312 if (err < 0)
1313 goto done;
1314
1315 err = reftable_new_reader(&rd, &src, name);
1316 if (err < 0)
1317 goto done;
1318
1319 update_idx = reftable_reader_max_update_index(rd);
1320 reftable_reader_free(rd);
1321
1322 if (update_idx <= max) {
1323 unlink(table_path.buf);
1324 }
1325 done:
1326 strbuf_release(&table_path);
1327 }
1328
1329 static int reftable_stack_clean_locked(struct reftable_stack *st)
1330 {
1331 uint64_t max = reftable_merged_table_max_update_index(
1332 reftable_stack_merged_table(st));
1333 DIR *dir = opendir(st->reftable_dir);
1334 struct dirent *d = NULL;
1335 if (!dir) {
1336 return REFTABLE_IO_ERROR;
1337 }
1338
1339 while ((d = readdir(dir))) {
1340 int i = 0;
1341 int found = 0;
1342 if (!is_table_name(d->d_name))
1343 continue;
1344
1345 for (i = 0; !found && i < st->readers_len; i++) {
1346 found = !strcmp(reader_name(st->readers[i]), d->d_name);
1347 }
1348 if (found)
1349 continue;
1350
1351 remove_maybe_stale_table(st, max, d->d_name);
1352 }
1353
1354 closedir(dir);
1355 return 0;
1356 }
1357
1358 int reftable_stack_clean(struct reftable_stack *st)
1359 {
1360 struct reftable_addition *add = NULL;
1361 int err = reftable_stack_new_addition(&add, st);
1362 if (err < 0) {
1363 goto done;
1364 }
1365
1366 err = reftable_stack_reload(st);
1367 if (err < 0) {
1368 goto done;
1369 }
1370
1371 err = reftable_stack_clean_locked(st);
1372
1373 done:
1374 reftable_addition_destroy(add);
1375 return err;
1376 }
1377
1378 int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id)
1379 {
1380 struct reftable_stack *stack = NULL;
1381 struct reftable_write_options cfg = { .hash_id = hash_id };
1382 struct reftable_merged_table *merged = NULL;
1383 struct reftable_table table = { NULL };
1384
1385 int err = reftable_new_stack(&stack, stackdir, cfg);
1386 if (err < 0)
1387 goto done;
1388
1389 merged = reftable_stack_merged_table(stack);
1390 reftable_table_from_merged_table(&table, merged);
1391 err = reftable_table_print(&table);
1392 done:
1393 if (stack)
1394 reftable_stack_destroy(stack);
1395 return err;
1396 }