]> git.ipfire.org Git - thirdparty/git.git/blame - reftable/stack.c
Merge branch 'jc/local-extern-shell-rules'
[thirdparty/git.git] / reftable / stack.c
CommitLineData
e48d4272
HWN
1/*
2Copyright 2020 Google LLC
3
4Use of this source code is governed by a BSD-style
5license that can be found in the LICENSE file or at
6https://developers.google.com/open-source/licenses/bsd
7*/
8
9#include "stack.h"
10
1df18a1c 11#include "../write-or-die.h"
e48d4272
HWN
12#include "system.h"
13#include "merged.h"
14#include "reader.h"
15#include "refname.h"
16#include "reftable-error.h"
17#include "reftable-record.h"
18#include "reftable-merged.h"
19#include "writer.h"
3054fbd9
PS
20#include "tempfile.h"
21
e48d4272
HWN
22static int stack_try_add(struct reftable_stack *st,
23 int (*write_table)(struct reftable_writer *wr,
24 void *arg),
25 void *arg);
26static int stack_write_compact(struct reftable_stack *st,
47616c43
PS
27 struct reftable_writer *wr,
28 size_t first, size_t last,
e48d4272
HWN
29 struct reftable_log_expiry_config *config);
30static int stack_check_addition(struct reftable_stack *st,
31 const char *new_tab_name);
32static void reftable_addition_close(struct reftable_addition *add);
33static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
34 int reuse_open);
35
36static void stack_filename(struct strbuf *dest, struct reftable_stack *st,
37 const char *name)
38{
39 strbuf_reset(dest);
40 strbuf_addstr(dest, st->reftable_dir);
41 strbuf_addstr(dest, "/");
42 strbuf_addstr(dest, name);
43}
44
45static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
46{
47 int *fdp = (int *)arg;
85a8c899 48 return write_in_full(*fdp, data, sz);
e48d4272
HWN
49}
50
1df18a1c
JC
51static int reftable_fd_flush(void *arg)
52{
53 int *fdp = (int *)arg;
54
55 return fsync_component(FSYNC_COMPONENT_REFERENCE, *fdp);
56}
57
e48d4272
HWN
58int reftable_new_stack(struct reftable_stack **dest, const char *dir,
59 struct reftable_write_options config)
60{
b4ff12c8 61 struct reftable_stack *p = reftable_calloc(1, sizeof(*p));
e48d4272
HWN
62 struct strbuf list_file_name = STRBUF_INIT;
63 int err = 0;
64
65 if (config.hash_id == 0) {
66 config.hash_id = GIT_SHA1_FORMAT_ID;
67 }
68
69 *dest = NULL;
70
71 strbuf_reset(&list_file_name);
72 strbuf_addstr(&list_file_name, dir);
73 strbuf_addstr(&list_file_name, "/tables.list");
74
75 p->list_file = strbuf_detach(&list_file_name, NULL);
4f36b859 76 p->list_fd = -1;
e48d4272
HWN
77 p->reftable_dir = xstrdup(dir);
78 p->config = config;
79
80 err = reftable_stack_reload_maybe_reuse(p, 1);
81 if (err < 0) {
82 reftable_stack_destroy(p);
83 } else {
84 *dest = p;
85 }
86 return err;
87}
88
89static int fd_read_lines(int fd, char ***namesp)
90{
91 off_t size = lseek(fd, 0, SEEK_END);
92 char *buf = NULL;
93 int err = 0;
94 if (size < 0) {
95 err = REFTABLE_IO_ERROR;
96 goto done;
97 }
98 err = lseek(fd, 0, SEEK_SET);
99 if (err < 0) {
100 err = REFTABLE_IO_ERROR;
101 goto done;
102 }
103
b4ff12c8 104 REFTABLE_ALLOC_ARRAY(buf, size + 1);
917a2b3c 105 if (read_in_full(fd, buf, size) != size) {
e48d4272
HWN
106 err = REFTABLE_IO_ERROR;
107 goto done;
108 }
109 buf[size] = 0;
110
111 parse_names(buf, size, namesp);
112
113done:
114 reftable_free(buf);
115 return err;
116}
117
118int read_lines(const char *filename, char ***namesp)
119{
120 int fd = open(filename, O_RDONLY);
121 int err = 0;
122 if (fd < 0) {
123 if (errno == ENOENT) {
b4ff12c8 124 REFTABLE_CALLOC_ARRAY(*namesp, 1);
e48d4272
HWN
125 return 0;
126 }
127
128 return REFTABLE_IO_ERROR;
129 }
130 err = fd_read_lines(fd, namesp);
131 close(fd);
132 return err;
133}
134
135struct reftable_merged_table *
136reftable_stack_merged_table(struct reftable_stack *st)
137{
138 return st->merged;
139}
140
141static int has_name(char **names, const char *name)
142{
143 while (*names) {
144 if (!strcmp(*names, name))
145 return 1;
146 names++;
147 }
148 return 0;
149}
150
151/* Close and free the stack */
152void reftable_stack_destroy(struct reftable_stack *st)
153{
154 char **names = NULL;
155 int err = 0;
156 if (st->merged) {
157 reftable_merged_table_free(st->merged);
158 st->merged = NULL;
159 }
160
161 err = read_lines(st->list_file, &names);
162 if (err < 0) {
163 FREE_AND_NULL(names);
164 }
165
166 if (st->readers) {
167 int i = 0;
168 struct strbuf filename = STRBUF_INIT;
169 for (i = 0; i < st->readers_len; i++) {
170 const char *name = reader_name(st->readers[i]);
171 strbuf_reset(&filename);
172 if (names && !has_name(names, name)) {
173 stack_filename(&filename, st, name);
174 }
175 reftable_reader_free(st->readers[i]);
176
177 if (filename.len) {
178 /* On Windows, can only unlink after closing. */
179 unlink(filename.buf);
180 }
181 }
182 strbuf_release(&filename);
183 st->readers_len = 0;
184 FREE_AND_NULL(st->readers);
185 }
4f36b859
PS
186
187 if (st->list_fd >= 0) {
188 close(st->list_fd);
189 st->list_fd = -1;
190 }
191
e48d4272
HWN
192 FREE_AND_NULL(st->list_file);
193 FREE_AND_NULL(st->reftable_dir);
194 reftable_free(st);
195 free_names(names);
196}
197
198static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
199 int cur_len)
200{
b4ff12c8 201 struct reftable_reader **cur = reftable_calloc(cur_len, sizeof(*cur));
e48d4272
HWN
202 int i = 0;
203 for (i = 0; i < cur_len; i++) {
204 cur[i] = st->readers[i];
205 }
206 return cur;
207}
208
209static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
210 int reuse_open)
211{
81879123 212 size_t cur_len = !st->merged ? 0 : st->merged->stack_len;
e48d4272 213 struct reftable_reader **cur = stack_copy_readers(st, cur_len);
81879123 214 size_t names_len = names_length(names);
e48d4272 215 struct reftable_reader **new_readers =
b4ff12c8 216 reftable_calloc(names_len, sizeof(*new_readers));
e48d4272 217 struct reftable_table *new_tables =
b4ff12c8 218 reftable_calloc(names_len, sizeof(*new_tables));
81879123 219 size_t new_readers_len = 0;
e48d4272 220 struct reftable_merged_table *new_merged = NULL;
d779996a 221 struct strbuf table_path = STRBUF_INIT;
81879123
PS
222 int err = 0;
223 size_t i;
e48d4272
HWN
224
225 while (*names) {
226 struct reftable_reader *rd = NULL;
227 char *name = *names++;
228
229 /* this is linear; we assume compaction keeps the number of
230 tables under control so this is not quadratic. */
81879123
PS
231 for (i = 0; reuse_open && i < cur_len; i++) {
232 if (cur[i] && 0 == strcmp(cur[i]->name, name)) {
233 rd = cur[i];
234 cur[i] = NULL;
e48d4272
HWN
235 break;
236 }
237 }
238
239 if (!rd) {
240 struct reftable_block_source src = { NULL };
e48d4272
HWN
241 stack_filename(&table_path, st, name);
242
243 err = reftable_block_source_from_file(&src,
244 table_path.buf);
e48d4272
HWN
245 if (err < 0)
246 goto done;
247
248 err = reftable_new_reader(&rd, &src, name);
249 if (err < 0)
250 goto done;
251 }
252
253 new_readers[new_readers_len] = rd;
254 reftable_table_from_reader(&new_tables[new_readers_len], rd);
255 new_readers_len++;
256 }
257
258 /* success! */
259 err = reftable_new_merged_table(&new_merged, new_tables,
260 new_readers_len, st->config.hash_id);
261 if (err < 0)
262 goto done;
263
264 new_tables = NULL;
265 st->readers_len = new_readers_len;
266 if (st->merged) {
267 merged_table_release(st->merged);
268 reftable_merged_table_free(st->merged);
269 }
270 if (st->readers) {
271 reftable_free(st->readers);
272 }
273 st->readers = new_readers;
274 new_readers = NULL;
275 new_readers_len = 0;
276
277 new_merged->suppress_deletions = 1;
278 st->merged = new_merged;
279 for (i = 0; i < cur_len; i++) {
280 if (cur[i]) {
281 const char *name = reader_name(cur[i]);
d779996a 282 stack_filename(&table_path, st, name);
e48d4272
HWN
283
284 reader_close(cur[i]);
285 reftable_reader_free(cur[i]);
286
287 /* On Windows, can only unlink after closing. */
d779996a 288 unlink(table_path.buf);
e48d4272
HWN
289 }
290 }
291
292done:
293 for (i = 0; i < new_readers_len; i++) {
294 reader_close(new_readers[i]);
295 reftable_reader_free(new_readers[i]);
296 }
297 reftable_free(new_readers);
298 reftable_free(new_tables);
299 reftable_free(cur);
d779996a 300 strbuf_release(&table_path);
e48d4272
HWN
301 return err;
302}
303
304/* return negative if a before b. */
305static int tv_cmp(struct timeval *a, struct timeval *b)
306{
307 time_t diff = a->tv_sec - b->tv_sec;
308 int udiff = a->tv_usec - b->tv_usec;
309
310 if (diff != 0)
311 return diff;
312
313 return udiff;
314}
315
316static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
317 int reuse_open)
318{
3c94bd8d
PS
319 char **names = NULL, **names_after = NULL;
320 struct timeval deadline;
e48d4272 321 int64_t delay = 0;
3c94bd8d 322 int tries = 0, err;
c5b5d5fb 323 int fd = -1;
e48d4272 324
3c94bd8d
PS
325 err = gettimeofday(&deadline, NULL);
326 if (err < 0)
327 goto out;
e48d4272 328 deadline.tv_sec += 3;
3c94bd8d 329
e48d4272 330 while (1) {
3c94bd8d 331 struct timeval now;
e48d4272 332
3c94bd8d
PS
333 err = gettimeofday(&now, NULL);
334 if (err < 0)
335 goto out;
e48d4272 336
3c94bd8d
PS
337 /*
338 * Only look at deadlines after the first few times. This
339 * simplifies debugging in GDB.
340 */
e48d4272 341 tries++;
3c94bd8d
PS
342 if (tries > 3 && tv_cmp(&now, &deadline) >= 0)
343 goto out;
e48d4272 344
c5b5d5fb
PS
345 fd = open(st->list_file, O_RDONLY);
346 if (fd < 0) {
347 if (errno != ENOENT) {
348 err = REFTABLE_IO_ERROR;
349 goto out;
350 }
e48d4272 351
b4ff12c8 352 REFTABLE_CALLOC_ARRAY(names, 1);
c5b5d5fb
PS
353 } else {
354 err = fd_read_lines(fd, &names);
355 if (err < 0)
356 goto out;
e48d4272 357 }
3c94bd8d 358
e48d4272 359 err = reftable_stack_reload_once(st, names, reuse_open);
3c94bd8d 360 if (!err)
e48d4272 361 break;
3c94bd8d
PS
362 if (err != REFTABLE_NOT_EXIST_ERROR)
363 goto out;
364
365 /*
366 * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
367 * writer. Check if there was one by checking if the name list
368 * changed.
369 */
370 err = read_lines(st->list_file, &names_after);
371 if (err < 0)
372 goto out;
e48d4272 373 if (names_equal(names_after, names)) {
3c94bd8d
PS
374 err = REFTABLE_NOT_EXIST_ERROR;
375 goto out;
e48d4272 376 }
3c94bd8d 377
e48d4272 378 free_names(names);
3c94bd8d 379 names = NULL;
e48d4272 380 free_names(names_after);
3c94bd8d 381 names_after = NULL;
c5b5d5fb
PS
382 close(fd);
383 fd = -1;
e48d4272
HWN
384
385 delay = delay + (delay * rand()) / RAND_MAX + 1;
386 sleep_millisec(delay);
387 }
388
3c94bd8d 389out:
4f36b859
PS
390 /*
391 * Invalidate the stat cache. It is sufficient to only close the file
392 * descriptor and keep the cached stat info because we never use the
393 * latter when the former is negative.
394 */
395 if (st->list_fd >= 0) {
396 close(st->list_fd);
397 st->list_fd = -1;
398 }
399
400 /*
401 * Cache stat information in case it provides a useful signal to us.
402 * According to POSIX, "The st_ino and st_dev fields taken together
403 * uniquely identify the file within the system." That being said,
404 * Windows is not POSIX compliant and we do not have these fields
405 * available. So the information we have there is insufficient to
406 * determine whether two file descriptors point to the same file.
407 *
408 * While we could fall back to using other signals like the file's
409 * mtime, those are not sufficient to avoid races. We thus refrain from
410 * using the stat cache on such systems and fall back to the secondary
411 * caching mechanism, which is to check whether contents of the file
412 * have changed.
413 *
414 * On other systems which are POSIX compliant we must keep the file
415 * descriptor open. This is to avoid a race condition where two
416 * processes access the reftable stack at the same point in time:
417 *
418 * 1. A reads the reftable stack and caches its stat info.
419 *
420 * 2. B updates the stack, appending a new table to "tables.list".
421 * This will both use a new inode and result in a different file
422 * size, thus invalidating A's cache in theory.
423 *
424 * 3. B decides to auto-compact the stack and merges two tables. The
425 * file size now matches what A has cached again. Furthermore, the
426 * filesystem may decide to recycle the inode number of the file
427 * we have replaced in (2) because it is not in use anymore.
428 *
429 * 4. A reloads the reftable stack. Neither the inode number nor the
430 * file size changed. If the timestamps did not change either then
431 * we think the cached copy of our stack is up-to-date.
432 *
433 * By keeping the file descriptor open the inode number cannot be
434 * recycled, mitigating the race.
435 */
436 if (!err && fd >= 0 && !fstat(fd, &st->list_st) &&
437 st->list_st.st_dev && st->list_st.st_ino) {
438 st->list_fd = fd;
439 fd = -1;
440 }
441
c5b5d5fb
PS
442 if (fd >= 0)
443 close(fd);
3c94bd8d
PS
444 free_names(names);
445 free_names(names_after);
446 return err;
e48d4272
HWN
447}
448
449/* -1 = error
450 0 = up to date
451 1 = changed. */
452static int stack_uptodate(struct reftable_stack *st)
453{
454 char **names = NULL;
6fdfaf15 455 int err;
e48d4272 456 int i = 0;
6fdfaf15 457
4f36b859
PS
458 /*
459 * When we have cached stat information available then we use it to
460 * verify whether the file has been rewritten.
461 *
462 * Note that we explicitly do not want to use `stat_validity_check()`
463 * and friends here because they may end up not comparing the `st_dev`
464 * and `st_ino` fields. These functions thus cannot guarantee that we
465 * indeed still have the same file.
466 */
467 if (st->list_fd >= 0) {
468 struct stat list_st;
469
470 if (stat(st->list_file, &list_st) < 0) {
471 /*
472 * It's fine for "tables.list" to not exist. In that
473 * case, we have to refresh when the loaded stack has
474 * any readers.
475 */
476 if (errno == ENOENT)
477 return !!st->readers_len;
478 return REFTABLE_IO_ERROR;
479 }
480
481 /*
482 * When "tables.list" refers to the same file we can assume
483 * that it didn't change. This is because we always use
484 * rename(3P) to update the file and never write to it
485 * directly.
486 */
487 if (st->list_st.st_dev == list_st.st_dev &&
488 st->list_st.st_ino == list_st.st_ino)
489 return 0;
490 }
6fdfaf15
PS
491
492 err = read_lines(st->list_file, &names);
e48d4272
HWN
493 if (err < 0)
494 return err;
495
496 for (i = 0; i < st->readers_len; i++) {
497 if (!names[i]) {
498 err = 1;
499 goto done;
500 }
501
502 if (strcmp(st->readers[i]->name, names[i])) {
503 err = 1;
504 goto done;
505 }
506 }
507
508 if (names[st->merged->stack_len]) {
509 err = 1;
510 goto done;
511 }
512
513done:
514 free_names(names);
515 return err;
516}
517
518int reftable_stack_reload(struct reftable_stack *st)
519{
520 int err = stack_uptodate(st);
521 if (err > 0)
522 return reftable_stack_reload_maybe_reuse(st, 1);
523 return err;
524}
525
526int reftable_stack_add(struct reftable_stack *st,
527 int (*write)(struct reftable_writer *wr, void *arg),
528 void *arg)
529{
530 int err = stack_try_add(st, write, arg);
531 if (err < 0) {
af18098c 532 if (err == REFTABLE_OUTDATED_ERROR) {
e48d4272 533 /* Ignore error return, we want to propagate
af18098c 534 REFTABLE_OUTDATED_ERROR.
e48d4272
HWN
535 */
536 reftable_stack_reload(st);
537 }
538 return err;
539 }
540
e48d4272
HWN
541 return 0;
542}
543
544static void format_name(struct strbuf *dest, uint64_t min, uint64_t max)
545{
546 char buf[100];
9abda981 547 uint32_t rnd = (uint32_t)git_rand();
e48d4272
HWN
548 snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
549 min, max, rnd);
550 strbuf_reset(dest);
551 strbuf_addstr(dest, buf);
552}
553
554struct reftable_addition {
3054fbd9 555 struct tempfile *lock_file;
e48d4272
HWN
556 struct reftable_stack *stack;
557
558 char **new_tables;
f6b58c1b 559 size_t new_tables_len, new_tables_cap;
e48d4272
HWN
560 uint64_t next_update_index;
561};
562
3054fbd9 563#define REFTABLE_ADDITION_INIT {0}
e48d4272
HWN
564
565static int reftable_stack_init_addition(struct reftable_addition *add,
566 struct reftable_stack *st)
567{
3054fbd9 568 struct strbuf lock_file_name = STRBUF_INIT;
e48d4272
HWN
569 int err = 0;
570 add->stack = st;
571
3054fbd9 572 strbuf_addf(&lock_file_name, "%s.lock", st->list_file);
e48d4272 573
3054fbd9
PS
574 add->lock_file = create_tempfile(lock_file_name.buf);
575 if (!add->lock_file) {
e48d4272
HWN
576 if (errno == EEXIST) {
577 err = REFTABLE_LOCK_ERROR;
578 } else {
579 err = REFTABLE_IO_ERROR;
580 }
581 goto done;
582 }
cd1799de 583 if (st->config.default_permissions) {
3054fbd9 584 if (chmod(add->lock_file->filename.buf, st->config.default_permissions) < 0) {
cd1799de
HWN
585 err = REFTABLE_IO_ERROR;
586 goto done;
587 }
588 }
589
e48d4272
HWN
590 err = stack_uptodate(st);
591 if (err < 0)
592 goto done;
630942a8 593 if (err > 0) {
af18098c 594 err = REFTABLE_OUTDATED_ERROR;
e48d4272
HWN
595 goto done;
596 }
597
598 add->next_update_index = reftable_stack_next_update_index(st);
599done:
600 if (err) {
601 reftable_addition_close(add);
602 }
3054fbd9 603 strbuf_release(&lock_file_name);
e48d4272
HWN
604 return err;
605}
606
607static void reftable_addition_close(struct reftable_addition *add)
608{
e48d4272 609 struct strbuf nm = STRBUF_INIT;
f6b58c1b
PS
610 size_t i;
611
e48d4272
HWN
612 for (i = 0; i < add->new_tables_len; i++) {
613 stack_filename(&nm, add->stack, add->new_tables[i]);
614 unlink(nm.buf);
615 reftable_free(add->new_tables[i]);
616 add->new_tables[i] = NULL;
617 }
618 reftable_free(add->new_tables);
619 add->new_tables = NULL;
620 add->new_tables_len = 0;
f6b58c1b 621 add->new_tables_cap = 0;
e48d4272 622
3054fbd9 623 delete_tempfile(&add->lock_file);
e48d4272
HWN
624 strbuf_release(&nm);
625}
626
627void reftable_addition_destroy(struct reftable_addition *add)
628{
629 if (!add) {
630 return;
631 }
632 reftable_addition_close(add);
633 reftable_free(add);
634}
635
636int reftable_addition_commit(struct reftable_addition *add)
637{
638 struct strbuf table_list = STRBUF_INIT;
3054fbd9 639 int lock_file_fd = get_tempfile_fd(add->lock_file);
e48d4272 640 int err = 0;
f6b58c1b 641 size_t i;
3054fbd9 642
e48d4272
HWN
643 if (add->new_tables_len == 0)
644 goto done;
645
646 for (i = 0; i < add->stack->merged->stack_len; i++) {
647 strbuf_addstr(&table_list, add->stack->readers[i]->name);
648 strbuf_addstr(&table_list, "\n");
649 }
650 for (i = 0; i < add->new_tables_len; i++) {
651 strbuf_addstr(&table_list, add->new_tables[i]);
652 strbuf_addstr(&table_list, "\n");
653 }
654
3054fbd9 655 err = write_in_full(lock_file_fd, table_list.buf, table_list.len);
e48d4272
HWN
656 strbuf_release(&table_list);
657 if (err < 0) {
658 err = REFTABLE_IO_ERROR;
659 goto done;
660 }
661
1df18a1c
JC
662 fsync_component_or_die(FSYNC_COMPONENT_REFERENCE, lock_file_fd,
663 get_tempfile_path(add->lock_file));
664
3054fbd9 665 err = rename_tempfile(&add->lock_file, add->stack->list_file);
e48d4272
HWN
666 if (err < 0) {
667 err = REFTABLE_IO_ERROR;
668 goto done;
669 }
670
671 /* success, no more state to clean up. */
f6b58c1b 672 for (i = 0; i < add->new_tables_len; i++)
e48d4272 673 reftable_free(add->new_tables[i]);
e48d4272
HWN
674 reftable_free(add->new_tables);
675 add->new_tables = NULL;
676 add->new_tables_len = 0;
f6b58c1b 677 add->new_tables_cap = 0;
e48d4272 678
456333eb 679 err = reftable_stack_reload_maybe_reuse(add->stack, 1);
5c086453
PS
680 if (err)
681 goto done;
682
a2f711ad
PS
683 if (!add->stack->disable_auto_compact) {
684 /*
685 * Auto-compact the stack to keep the number of tables in
686 * control. It is possible that a concurrent writer is already
687 * trying to compact parts of the stack, which would lead to a
688 * `REFTABLE_LOCK_ERROR` because parts of the stack are locked
689 * already. This is a benign error though, so we ignore it.
690 */
5c086453 691 err = reftable_stack_auto_compact(add->stack);
a2f711ad
PS
692 if (err < 0 && err != REFTABLE_LOCK_ERROR)
693 goto done;
694 err = 0;
695 }
5c086453 696
e48d4272
HWN
697done:
698 reftable_addition_close(add);
699 return err;
700}
701
702int reftable_stack_new_addition(struct reftable_addition **dest,
703 struct reftable_stack *st)
704{
705 int err = 0;
706 struct reftable_addition empty = REFTABLE_ADDITION_INIT;
b4ff12c8 707 REFTABLE_CALLOC_ARRAY(*dest, 1);
e48d4272
HWN
708 **dest = empty;
709 err = reftable_stack_init_addition(*dest, st);
710 if (err) {
711 reftable_free(*dest);
712 *dest = NULL;
713 }
714 return err;
715}
716
717static int stack_try_add(struct reftable_stack *st,
718 int (*write_table)(struct reftable_writer *wr,
719 void *arg),
720 void *arg)
721{
722 struct reftable_addition add = REFTABLE_ADDITION_INIT;
723 int err = reftable_stack_init_addition(&add, st);
724 if (err < 0)
725 goto done;
e48d4272
HWN
726
727 err = reftable_addition_add(&add, write_table, arg);
728 if (err < 0)
729 goto done;
730
731 err = reftable_addition_commit(&add);
732done:
733 reftable_addition_close(&add);
734 return err;
735}
736
737int reftable_addition_add(struct reftable_addition *add,
738 int (*write_table)(struct reftable_writer *wr,
739 void *arg),
740 void *arg)
741{
742 struct strbuf temp_tab_file_name = STRBUF_INIT;
743 struct strbuf tab_file_name = STRBUF_INIT;
744 struct strbuf next_name = STRBUF_INIT;
745 struct reftable_writer *wr = NULL;
1920d17a 746 struct tempfile *tab_file = NULL;
e48d4272 747 int err = 0;
1920d17a 748 int tab_fd;
e48d4272
HWN
749
750 strbuf_reset(&next_name);
751 format_name(&next_name, add->next_update_index, add->next_update_index);
752
753 stack_filename(&temp_tab_file_name, add->stack, next_name.buf);
754 strbuf_addstr(&temp_tab_file_name, ".temp.XXXXXX");
755
1920d17a
PS
756 tab_file = mks_tempfile(temp_tab_file_name.buf);
757 if (!tab_file) {
e48d4272
HWN
758 err = REFTABLE_IO_ERROR;
759 goto done;
760 }
cd1799de 761 if (add->stack->config.default_permissions) {
1920d17a
PS
762 if (chmod(get_tempfile_path(tab_file),
763 add->stack->config.default_permissions)) {
cd1799de
HWN
764 err = REFTABLE_IO_ERROR;
765 goto done;
766 }
767 }
1920d17a
PS
768 tab_fd = get_tempfile_fd(tab_file);
769
1df18a1c 770 wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush, &tab_fd,
e48d4272
HWN
771 &add->stack->config);
772 err = write_table(wr, arg);
773 if (err < 0)
774 goto done;
775
776 err = reftable_writer_close(wr);
777 if (err == REFTABLE_EMPTY_TABLE_ERROR) {
778 err = 0;
779 goto done;
780 }
781 if (err < 0)
782 goto done;
783
1920d17a 784 err = close_tempfile_gently(tab_file);
e48d4272
HWN
785 if (err < 0) {
786 err = REFTABLE_IO_ERROR;
787 goto done;
788 }
789
1920d17a 790 err = stack_check_addition(add->stack, get_tempfile_path(tab_file));
e48d4272
HWN
791 if (err < 0)
792 goto done;
793
794 if (wr->min_update_index < add->next_update_index) {
795 err = REFTABLE_API_ERROR;
796 goto done;
797 }
798
799 format_name(&next_name, wr->min_update_index, wr->max_update_index);
800 strbuf_addstr(&next_name, ".ref");
e48d4272
HWN
801 stack_filename(&tab_file_name, add->stack, next_name.buf);
802
803 /*
804 On windows, this relies on rand() picking a unique destination name.
805 Maybe we should do retry loop as well?
806 */
1920d17a 807 err = rename_tempfile(&tab_file, tab_file_name.buf);
e48d4272
HWN
808 if (err < 0) {
809 err = REFTABLE_IO_ERROR;
810 goto done;
811 }
812
f6b58c1b
PS
813 REFTABLE_ALLOC_GROW(add->new_tables, add->new_tables_len + 1,
814 add->new_tables_cap);
815 add->new_tables[add->new_tables_len++] = strbuf_detach(&next_name, NULL);
e48d4272 816done:
1920d17a 817 delete_tempfile(&tab_file);
e48d4272
HWN
818 strbuf_release(&temp_tab_file_name);
819 strbuf_release(&tab_file_name);
820 strbuf_release(&next_name);
821 reftable_writer_free(wr);
822 return err;
823}
824
825uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
826{
827 int sz = st->merged->stack_len;
828 if (sz > 0)
829 return reftable_reader_max_update_index(st->readers[sz - 1]) +
830 1;
831 return 1;
832}
833
47616c43
PS
834static int stack_compact_locked(struct reftable_stack *st,
835 size_t first, size_t last,
60c4c425
PS
836 struct reftable_log_expiry_config *config,
837 struct tempfile **tab_file_out)
e48d4272
HWN
838{
839 struct strbuf next_name = STRBUF_INIT;
60c4c425 840 struct strbuf tab_file_path = STRBUF_INIT;
e48d4272 841 struct reftable_writer *wr = NULL;
60c4c425
PS
842 struct tempfile *tab_file;
843 int tab_fd, err = 0;
e48d4272
HWN
844
845 format_name(&next_name,
846 reftable_reader_min_update_index(st->readers[first]),
847 reftable_reader_max_update_index(st->readers[last]));
60c4c425
PS
848 stack_filename(&tab_file_path, st, next_name.buf);
849 strbuf_addstr(&tab_file_path, ".temp.XXXXXX");
e48d4272 850
60c4c425
PS
851 tab_file = mks_tempfile(tab_file_path.buf);
852 if (!tab_file) {
853 err = REFTABLE_IO_ERROR;
854 goto done;
855 }
856 tab_fd = get_tempfile_fd(tab_file);
e48d4272 857
b3a79dd4 858 if (st->config.default_permissions &&
60c4c425 859 chmod(get_tempfile_path(tab_file), st->config.default_permissions) < 0) {
b3a79dd4
PS
860 err = REFTABLE_IO_ERROR;
861 goto done;
862 }
863
60c4c425
PS
864 wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush,
865 &tab_fd, &st->config);
e48d4272
HWN
866 err = stack_write_compact(st, wr, first, last, config);
867 if (err < 0)
868 goto done;
60c4c425 869
e48d4272
HWN
870 err = reftable_writer_close(wr);
871 if (err < 0)
872 goto done;
873
60c4c425
PS
874 err = close_tempfile_gently(tab_file);
875 if (err < 0)
876 goto done;
877
878 *tab_file_out = tab_file;
879 tab_file = NULL;
e48d4272
HWN
880
881done:
60c4c425 882 delete_tempfile(&tab_file);
e48d4272 883 reftable_writer_free(wr);
e48d4272 884 strbuf_release(&next_name);
60c4c425 885 strbuf_release(&tab_file_path);
e48d4272
HWN
886 return err;
887}
888
889static int stack_write_compact(struct reftable_stack *st,
47616c43
PS
890 struct reftable_writer *wr,
891 size_t first, size_t last,
e48d4272
HWN
892 struct reftable_log_expiry_config *config)
893{
81879123 894 size_t subtabs_len = last - first + 1;
e48d4272 895 struct reftable_table *subtabs = reftable_calloc(
b4ff12c8 896 last - first + 1, sizeof(*subtabs));
e48d4272 897 struct reftable_merged_table *mt = NULL;
e48d4272
HWN
898 struct reftable_iterator it = { NULL };
899 struct reftable_ref_record ref = { NULL };
900 struct reftable_log_record log = { NULL };
e48d4272 901 uint64_t entries = 0;
47616c43 902 int err = 0;
e48d4272 903
47616c43 904 for (size_t i = first, j = 0; i <= last; i++) {
e48d4272
HWN
905 struct reftable_reader *t = st->readers[i];
906 reftable_table_from_reader(&subtabs[j++], t);
907 st->stats.bytes += t->size;
908 }
909 reftable_writer_set_limits(wr, st->readers[first]->min_update_index,
910 st->readers[last]->max_update_index);
911
912 err = reftable_new_merged_table(&mt, subtabs, subtabs_len,
913 st->config.hash_id);
914 if (err < 0) {
915 reftable_free(subtabs);
916 goto done;
917 }
918
919 err = reftable_merged_table_seek_ref(mt, &it, "");
920 if (err < 0)
921 goto done;
922
923 while (1) {
924 err = reftable_iterator_next_ref(&it, &ref);
925 if (err > 0) {
926 err = 0;
927 break;
928 }
d26c2148
PS
929 if (err < 0)
930 goto done;
e48d4272
HWN
931
932 if (first == 0 && reftable_ref_record_is_deletion(&ref)) {
933 continue;
934 }
935
936 err = reftable_writer_add_ref(wr, &ref);
d26c2148
PS
937 if (err < 0)
938 goto done;
e48d4272
HWN
939 entries++;
940 }
941 reftable_iterator_destroy(&it);
942
943 err = reftable_merged_table_seek_log(mt, &it, "");
944 if (err < 0)
945 goto done;
946
947 while (1) {
948 err = reftable_iterator_next_log(&it, &log);
949 if (err > 0) {
950 err = 0;
951 break;
952 }
d26c2148
PS
953 if (err < 0)
954 goto done;
e48d4272
HWN
955 if (first == 0 && reftable_log_record_is_deletion(&log)) {
956 continue;
957 }
958
959 if (config && config->min_update_index > 0 &&
960 log.update_index < config->min_update_index) {
961 continue;
962 }
963
964 if (config && config->time > 0 &&
965 log.value.update.time < config->time) {
966 continue;
967 }
968
969 err = reftable_writer_add_log(wr, &log);
d26c2148
PS
970 if (err < 0)
971 goto done;
e48d4272
HWN
972 entries++;
973 }
974
975done:
976 reftable_iterator_destroy(&it);
977 if (mt) {
978 merged_table_release(mt);
979 reftable_merged_table_free(mt);
980 }
981 reftable_ref_record_release(&ref);
982 reftable_log_record_release(&log);
983 st->stats.entries_written += entries;
984 return err;
985}
986
33358350
PS
987/*
988 * Compact all tables in the range `[first, last)` into a single new table.
989 *
990 * This function returns `0` on success or a code `< 0` on failure. When the
991 * stack or any of the tables in the specified range are already locked then
992 * this function returns `REFTABLE_LOCK_ERROR`. This is a benign error that
993 * callers can either ignore, or they may choose to retry compaction after some
994 * amount of time.
995 */
47616c43
PS
996static int stack_compact_range(struct reftable_stack *st,
997 size_t first, size_t last,
e48d4272
HWN
998 struct reftable_log_expiry_config *expiry)
999{
3a60f6a2 1000 struct strbuf tables_list_buf = STRBUF_INIT;
e48d4272 1001 struct strbuf new_table_name = STRBUF_INIT;
e48d4272 1002 struct strbuf new_table_path = STRBUF_INIT;
3a60f6a2
PS
1003 struct strbuf table_name = STRBUF_INIT;
1004 struct lock_file tables_list_lock = LOCK_INIT;
1005 struct lock_file *table_locks = NULL;
60c4c425 1006 struct tempfile *new_table = NULL;
3a60f6a2
PS
1007 int is_empty_table = 0, err = 0;
1008 size_t i;
e48d4272
HWN
1009
1010 if (first > last || (!expiry && first == last)) {
1011 err = 0;
1012 goto done;
1013 }
1014
1015 st->stats.attempts++;
1016
3a60f6a2
PS
1017 /*
1018 * Hold the lock so that we can read "tables.list" and lock all tables
1019 * which are part of the user-specified range.
1020 */
1021 err = hold_lock_file_for_update(&tables_list_lock, st->list_file,
1022 LOCK_NO_DEREF);
1023 if (err < 0) {
1024 if (errno == EEXIST)
33358350 1025 err = REFTABLE_LOCK_ERROR;
3a60f6a2 1026 else
e48d4272 1027 err = REFTABLE_IO_ERROR;
e48d4272
HWN
1028 goto done;
1029 }
e48d4272 1030
e48d4272 1031 err = stack_uptodate(st);
3a60f6a2 1032 if (err)
e48d4272
HWN
1033 goto done;
1034
3a60f6a2
PS
1035 /*
1036 * Lock all tables in the user-provided range. This is the slice of our
1037 * stack which we'll compact.
1038 */
1039 REFTABLE_CALLOC_ARRAY(table_locks, last - first + 1);
1040 for (i = first; i <= last; i++) {
1041 stack_filename(&table_name, st, reader_name(st->readers[i]));
e48d4272 1042
3a60f6a2
PS
1043 err = hold_lock_file_for_update(&table_locks[i - first],
1044 table_name.buf, LOCK_NO_DEREF);
1045 if (err < 0) {
1046 if (errno == EEXIST)
33358350 1047 err = REFTABLE_LOCK_ERROR;
3a60f6a2 1048 else
e48d4272 1049 err = REFTABLE_IO_ERROR;
3a60f6a2 1050 goto done;
e48d4272
HWN
1051 }
1052
3a60f6a2
PS
1053 /*
1054 * We need to close the lockfiles as we might otherwise easily
1055 * run into file descriptor exhaustion when we compress a lot
1056 * of tables.
1057 */
1058 err = close_lock_file_gently(&table_locks[i - first]);
1059 if (err < 0) {
1060 err = REFTABLE_IO_ERROR;
e48d4272 1061 goto done;
3a60f6a2 1062 }
e48d4272
HWN
1063 }
1064
3a60f6a2
PS
1065 /*
1066 * We have locked all tables in our range and can thus release the
1067 * "tables.list" lock while compacting the locked tables. This allows
1068 * concurrent updates to the stack to proceed.
1069 */
1070 err = rollback_lock_file(&tables_list_lock);
1071 if (err < 0) {
1072 err = REFTABLE_IO_ERROR;
e48d4272 1073 goto done;
e48d4272 1074 }
e48d4272 1075
3a60f6a2
PS
1076 /*
1077 * Compact the now-locked tables into a new table. Note that compacting
1078 * these tables may end up with an empty new table in case tombstones
1079 * end up cancelling out all refs in that range.
1080 */
60c4c425 1081 err = stack_compact_locked(st, first, last, expiry, &new_table);
3a60f6a2
PS
1082 if (err < 0) {
1083 if (err != REFTABLE_EMPTY_TABLE_ERROR)
1084 goto done;
1085 is_empty_table = 1;
1086 }
1087
1088 /*
1089 * Now that we have written the new, compacted table we need to re-lock
1090 * "tables.list". We'll then replace the compacted range of tables with
1091 * the new table.
1092 */
1093 err = hold_lock_file_for_update(&tables_list_lock, st->list_file,
1094 LOCK_NO_DEREF);
1095 if (err < 0) {
1096 if (errno == EEXIST)
33358350 1097 err = REFTABLE_LOCK_ERROR;
3a60f6a2 1098 else
e48d4272 1099 err = REFTABLE_IO_ERROR;
e48d4272
HWN
1100 goto done;
1101 }
3a60f6a2 1102
cd1799de 1103 if (st->config.default_permissions) {
3a60f6a2
PS
1104 if (chmod(get_lock_file_path(&tables_list_lock),
1105 st->config.default_permissions) < 0) {
cd1799de
HWN
1106 err = REFTABLE_IO_ERROR;
1107 goto done;
1108 }
1109 }
e48d4272 1110
3a60f6a2
PS
1111 /*
1112 * If the resulting compacted table is not empty, then we need to move
1113 * it into place now.
1114 */
e48d4272 1115 if (!is_empty_table) {
3a60f6a2
PS
1116 format_name(&new_table_name, st->readers[first]->min_update_index,
1117 st->readers[last]->max_update_index);
1118 strbuf_addstr(&new_table_name, ".ref");
1119 stack_filename(&new_table_path, st, new_table_name.buf);
1120
60c4c425 1121 err = rename_tempfile(&new_table, new_table_path.buf);
e48d4272
HWN
1122 if (err < 0) {
1123 err = REFTABLE_IO_ERROR;
1124 goto done;
1125 }
1126 }
1127
3a60f6a2
PS
1128 /*
1129 * Write the new "tables.list" contents with the compacted table we
1130 * have just written. In case the compacted table became empty we
1131 * simply skip writing it.
1132 */
1133 for (i = 0; i < first; i++)
1134 strbuf_addf(&tables_list_buf, "%s\n", st->readers[i]->name);
1135 if (!is_empty_table)
1136 strbuf_addf(&tables_list_buf, "%s\n", new_table_name.buf);
1137 for (i = last + 1; i < st->merged->stack_len; i++)
1138 strbuf_addf(&tables_list_buf, "%s\n", st->readers[i]->name);
1139
1140 err = write_in_full(get_lock_file_fd(&tables_list_lock),
1141 tables_list_buf.buf, tables_list_buf.len);
7fa52fda
PS
1142 if (err < 0) {
1143 err = REFTABLE_IO_ERROR;
1144 unlink(new_table_path.buf);
1145 goto done;
1146 }
1147
3a60f6a2 1148 err = fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&tables_list_lock));
e48d4272
HWN
1149 if (err < 0) {
1150 err = REFTABLE_IO_ERROR;
1151 unlink(new_table_path.buf);
1152 goto done;
1153 }
1154
3a60f6a2 1155 err = commit_lock_file(&tables_list_lock);
e48d4272
HWN
1156 if (err < 0) {
1157 err = REFTABLE_IO_ERROR;
1158 unlink(new_table_path.buf);
1159 goto done;
1160 }
e48d4272 1161
3a60f6a2
PS
1162 /*
1163 * Reload the stack before deleting the compacted tables. We can only
1164 * delete the files after we closed them on Windows, so this needs to
1165 * happen first.
1166 */
e48d4272 1167 err = reftable_stack_reload_maybe_reuse(st, first < last);
3a60f6a2
PS
1168 if (err < 0)
1169 goto done;
e48d4272 1170
3a60f6a2
PS
1171 /*
1172 * Delete the old tables. They may still be in use by concurrent
1173 * readers, so it is expected that unlinking tables may fail.
1174 */
1175 for (i = first; i <= last; i++) {
1176 struct lock_file *table_lock = &table_locks[i - first];
1177 char *table_path = get_locked_file_path(table_lock);
1178 unlink(table_path);
1179 free(table_path);
e48d4272
HWN
1180 }
1181
1182done:
3a60f6a2
PS
1183 rollback_lock_file(&tables_list_lock);
1184 for (i = first; table_locks && i <= last; i++)
1185 rollback_lock_file(&table_locks[i - first]);
1186 reftable_free(table_locks);
e48d4272 1187
60c4c425 1188 delete_tempfile(&new_table);
e48d4272
HWN
1189 strbuf_release(&new_table_name);
1190 strbuf_release(&new_table_path);
60c4c425 1191
3a60f6a2
PS
1192 strbuf_release(&tables_list_buf);
1193 strbuf_release(&table_name);
e48d4272
HWN
1194 return err;
1195}
1196
1197int reftable_stack_compact_all(struct reftable_stack *st,
1198 struct reftable_log_expiry_config *config)
1199{
47616c43
PS
1200 return stack_compact_range(st, 0, st->merged->stack_len ?
1201 st->merged->stack_len - 1 : 0, config);
e48d4272
HWN
1202}
1203
47616c43
PS
1204static int stack_compact_range_stats(struct reftable_stack *st,
1205 size_t first, size_t last,
e48d4272
HWN
1206 struct reftable_log_expiry_config *config)
1207{
1208 int err = stack_compact_range(st, first, last, config);
33358350 1209 if (err == REFTABLE_LOCK_ERROR)
e48d4272 1210 st->stats.failures++;
e48d4272
HWN
1211 return err;
1212}
1213
1214static int segment_size(struct segment *s)
1215{
1216 return s->end - s->start;
1217}
1218
1219int fastlog2(uint64_t sz)
1220{
1221 int l = 0;
1222 if (sz == 0)
1223 return 0;
1224 for (; sz; sz /= 2) {
1225 l++;
1226 }
1227 return l - 1;
1228}
1229
6d5e80fb 1230struct segment *sizes_to_segments(size_t *seglen, uint64_t *sizes, size_t n)
e48d4272 1231{
b4ff12c8 1232 struct segment *segs = reftable_calloc(n, sizeof(*segs));
e48d4272 1233 struct segment cur = { 0 };
6d5e80fb 1234 size_t next = 0, i;
e48d4272
HWN
1235
1236 if (n == 0) {
1237 *seglen = 0;
1238 return segs;
1239 }
1240 for (i = 0; i < n; i++) {
1241 int log = fastlog2(sizes[i]);
1242 if (cur.log != log && cur.bytes > 0) {
1243 struct segment fresh = {
1244 .start = i,
1245 };
1246
1247 segs[next++] = cur;
1248 cur = fresh;
1249 }
1250
1251 cur.log = log;
1252 cur.end = i + 1;
1253 cur.bytes += sizes[i];
1254 }
1255 segs[next++] = cur;
1256 *seglen = next;
1257 return segs;
1258}
1259
6d5e80fb 1260struct segment suggest_compaction_segment(uint64_t *sizes, size_t n)
e48d4272 1261{
e48d4272
HWN
1262 struct segment min_seg = {
1263 .log = 64,
1264 };
6d5e80fb
PS
1265 struct segment *segs;
1266 size_t seglen = 0, i;
1267
1268 segs = sizes_to_segments(&seglen, sizes, n);
e48d4272 1269 for (i = 0; i < seglen; i++) {
6d5e80fb 1270 if (segment_size(&segs[i]) == 1)
e48d4272 1271 continue;
e48d4272 1272
6d5e80fb 1273 if (segs[i].log < min_seg.log)
e48d4272 1274 min_seg = segs[i];
e48d4272
HWN
1275 }
1276
1277 while (min_seg.start > 0) {
6d5e80fb
PS
1278 size_t prev = min_seg.start - 1;
1279 if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev]))
e48d4272 1280 break;
e48d4272
HWN
1281
1282 min_seg.start = prev;
1283 min_seg.bytes += sizes[prev];
1284 }
1285
1286 reftable_free(segs);
1287 return min_seg;
1288}
1289
1290static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
1291{
1292 uint64_t *sizes =
b4ff12c8 1293 reftable_calloc(st->merged->stack_len, sizeof(*sizes));
e48d4272
HWN
1294 int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
1295 int overhead = header_size(version) - 1;
1296 int i = 0;
1297 for (i = 0; i < st->merged->stack_len; i++) {
1298 sizes[i] = st->readers[i]->size - overhead;
1299 }
1300 return sizes;
1301}
1302
1303int reftable_stack_auto_compact(struct reftable_stack *st)
1304{
1305 uint64_t *sizes = stack_table_sizes_for_compaction(st);
1306 struct segment seg =
1307 suggest_compaction_segment(sizes, st->merged->stack_len);
1308 reftable_free(sizes);
1309 if (segment_size(&seg) > 0)
1310 return stack_compact_range_stats(st, seg.start, seg.end - 1,
1311 NULL);
1312
1313 return 0;
1314}
1315
1316struct reftable_compaction_stats *
1317reftable_stack_compaction_stats(struct reftable_stack *st)
1318{
1319 return &st->stats;
1320}
1321
1322int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
1323 struct reftable_ref_record *ref)
1324{
1325 struct reftable_table tab = { NULL };
1326 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1327 return reftable_table_read_ref(&tab, refname, ref);
1328}
1329
1330int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
1331 struct reftable_log_record *log)
1332{
1333 struct reftable_iterator it = { NULL };
1334 struct reftable_merged_table *mt = reftable_stack_merged_table(st);
1335 int err = reftable_merged_table_seek_log(mt, &it, refname);
1336 if (err)
1337 goto done;
1338
1339 err = reftable_iterator_next_log(&it, log);
1340 if (err)
1341 goto done;
1342
1343 if (strcmp(log->refname, refname) ||
1344 reftable_log_record_is_deletion(log)) {
1345 err = 1;
1346 goto done;
1347 }
1348
1349done:
1350 if (err) {
1351 reftable_log_record_release(log);
1352 }
1353 reftable_iterator_destroy(&it);
1354 return err;
1355}
1356
1357static int stack_check_addition(struct reftable_stack *st,
1358 const char *new_tab_name)
1359{
1360 int err = 0;
1361 struct reftable_block_source src = { NULL };
1362 struct reftable_reader *rd = NULL;
1363 struct reftable_table tab = { NULL };
1364 struct reftable_ref_record *refs = NULL;
1365 struct reftable_iterator it = { NULL };
1366 int cap = 0;
1367 int len = 0;
1368 int i = 0;
1369
1370 if (st->config.skip_name_check)
1371 return 0;
1372
1373 err = reftable_block_source_from_file(&src, new_tab_name);
1374 if (err < 0)
1375 goto done;
1376
1377 err = reftable_new_reader(&rd, &src, new_tab_name);
1378 if (err < 0)
1379 goto done;
1380
1381 err = reftable_reader_seek_ref(rd, &it, "");
1382 if (err > 0) {
1383 err = 0;
1384 goto done;
1385 }
1386 if (err < 0)
1387 goto done;
1388
1389 while (1) {
1390 struct reftable_ref_record ref = { NULL };
1391 err = reftable_iterator_next_ref(&it, &ref);
f6b58c1b 1392 if (err > 0)
e48d4272 1393 break;
e48d4272
HWN
1394 if (err < 0)
1395 goto done;
1396
f6b58c1b 1397 REFTABLE_ALLOC_GROW(refs, len + 1, cap);
e48d4272
HWN
1398 refs[len++] = ref;
1399 }
1400
1401 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1402
1403 err = validate_ref_record_addition(tab, refs, len);
1404
1405done:
1406 for (i = 0; i < len; i++) {
1407 reftable_ref_record_release(&refs[i]);
1408 }
1409
1410 free(refs);
1411 reftable_iterator_destroy(&it);
1412 reftable_reader_free(rd);
1413 return err;
1414}
1415
1416static int is_table_name(const char *s)
1417{
1418 const char *dot = strrchr(s, '.');
1419 return dot && !strcmp(dot, ".ref");
1420}
1421
1422static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max,
1423 const char *name)
1424{
1425 int err = 0;
1426 uint64_t update_idx = 0;
1427 struct reftable_block_source src = { NULL };
1428 struct reftable_reader *rd = NULL;
1429 struct strbuf table_path = STRBUF_INIT;
1430 stack_filename(&table_path, st, name);
1431
1432 err = reftable_block_source_from_file(&src, table_path.buf);
1433 if (err < 0)
1434 goto done;
1435
1436 err = reftable_new_reader(&rd, &src, name);
1437 if (err < 0)
1438 goto done;
1439
1440 update_idx = reftable_reader_max_update_index(rd);
1441 reftable_reader_free(rd);
1442
1443 if (update_idx <= max) {
1444 unlink(table_path.buf);
1445 }
1446done:
1447 strbuf_release(&table_path);
1448}
1449
1450static int reftable_stack_clean_locked(struct reftable_stack *st)
1451{
1452 uint64_t max = reftable_merged_table_max_update_index(
1453 reftable_stack_merged_table(st));
1454 DIR *dir = opendir(st->reftable_dir);
1455 struct dirent *d = NULL;
1456 if (!dir) {
1457 return REFTABLE_IO_ERROR;
1458 }
1459
1460 while ((d = readdir(dir))) {
1461 int i = 0;
1462 int found = 0;
1463 if (!is_table_name(d->d_name))
1464 continue;
1465
1466 for (i = 0; !found && i < st->readers_len; i++) {
1467 found = !strcmp(reader_name(st->readers[i]), d->d_name);
1468 }
1469 if (found)
1470 continue;
1471
1472 remove_maybe_stale_table(st, max, d->d_name);
1473 }
1474
1475 closedir(dir);
1476 return 0;
1477}
1478
1479int reftable_stack_clean(struct reftable_stack *st)
1480{
1481 struct reftable_addition *add = NULL;
1482 int err = reftable_stack_new_addition(&add, st);
1483 if (err < 0) {
1484 goto done;
1485 }
1486
1487 err = reftable_stack_reload(st);
1488 if (err < 0) {
1489 goto done;
1490 }
1491
1492 err = reftable_stack_clean_locked(st);
1493
1494done:
1495 reftable_addition_destroy(add);
1496 return err;
1497}
1498
1499int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id)
1500{
1501 struct reftable_stack *stack = NULL;
1502 struct reftable_write_options cfg = { .hash_id = hash_id };
1503 struct reftable_merged_table *merged = NULL;
1504 struct reftable_table table = { NULL };
1505
1506 int err = reftable_new_stack(&stack, stackdir, cfg);
1507 if (err < 0)
1508 goto done;
1509
1510 merged = reftable_stack_merged_table(stack);
1511 reftable_table_from_merged_table(&table, merged);
1512 err = reftable_table_print(&table);
1513done:
1514 if (stack)
1515 reftable_stack_destroy(stack);
1516 return err;
1517}