4 #include "refs-internal.h"
5 #include "packed-backend.h"
6 #include "../iterator.h"
7 #include "../lockfile.h"
11 * Don't use mmap() at all for reading `packed-refs`.
16 * Can use mmap() for reading `packed-refs`, but the file must
17 * not remain mmapped. This is the usual option on Windows,
18 * where you cannot rename a new version of a file onto a file
19 * that is currently mmapped.
24 * It is OK to leave the `packed-refs` file mmapped while
25 * arbitrary other code is running.
31 static enum mmap_strategy mmap_strategy
= MMAP_NONE
;
32 #elif defined(MMAP_PREVENTS_DELETE)
33 static enum mmap_strategy mmap_strategy
= MMAP_TEMPORARY
;
35 static enum mmap_strategy mmap_strategy
= MMAP_OK
;
38 struct packed_ref_store
;
40 struct packed_ref_cache
{
42 * A back-pointer to the packed_ref_store with which this
43 * cache is associated:
45 struct packed_ref_store
*refs
;
47 /* Is the `packed-refs` file currently mmapped? */
51 * The contents of the `packed-refs` file. If the file was
52 * already sorted, this points at the mmapped contents of the
53 * file. If not, this points at heap-allocated memory
54 * containing the contents, sorted. If there were no contents
55 * (e.g., because the file didn't exist), `buf` and `eof` are
60 /* The size of the header line, if any; otherwise, 0: */
64 * What is the peeled state of this cache? (This is usually
65 * determined from the header of the "packed-refs" file.)
67 enum { PEELED_NONE
, PEELED_TAGS
, PEELED_FULLY
} peeled
;
70 * Count of references to the data structure in this instance,
71 * including the pointer from files_ref_store::packed if any.
72 * The data will not be freed as long as the reference count
75 unsigned int referrers
;
77 /* The metadata from when this packed-refs cache was read */
78 struct stat_validity validity
;
82 * A container for `packed-refs`-related data. It is not (yet) a
85 struct packed_ref_store
{
86 struct ref_store base
;
88 unsigned int store_flags
;
90 /* The path of the "packed-refs" file: */
94 * A cache of the values read from the `packed-refs` file, if
95 * it might still be current; otherwise, NULL.
97 struct packed_ref_cache
*cache
;
100 * Lock used for the "packed-refs" file. Note that this (and
101 * thus the enclosing `packed_ref_store`) must not be freed.
103 struct lock_file lock
;
106 * Temporary file used when rewriting new contents to the
107 * "packed-refs" file. Note that this (and thus the enclosing
108 * `packed_ref_store`) must not be freed.
110 struct tempfile tempfile
;
114 * Increment the reference count of *packed_refs.
116 static void acquire_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
118 packed_refs
->referrers
++;
122 * If the buffer in `packed_refs` is active, then either munmap the
123 * memory and close the file, or free the memory. Then set the buffer
126 static void release_packed_ref_buffer(struct packed_ref_cache
*packed_refs
)
128 if (packed_refs
->mmapped
) {
129 if (munmap(packed_refs
->buf
,
130 packed_refs
->eof
- packed_refs
->buf
))
131 die_errno("error ummapping packed-refs file %s",
132 packed_refs
->refs
->path
);
133 packed_refs
->mmapped
= 0;
135 free(packed_refs
->buf
);
137 packed_refs
->buf
= packed_refs
->eof
= NULL
;
138 packed_refs
->header_len
= 0;
142 * Decrease the reference count of *packed_refs. If it goes to zero,
143 * free *packed_refs and return true; otherwise return false.
145 static int release_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
147 if (!--packed_refs
->referrers
) {
148 stat_validity_clear(&packed_refs
->validity
);
149 release_packed_ref_buffer(packed_refs
);
157 struct ref_store
*packed_ref_store_create(const char *path
,
158 unsigned int store_flags
)
160 struct packed_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
161 struct ref_store
*ref_store
= (struct ref_store
*)refs
;
163 base_ref_store_init(ref_store
, &refs_be_packed
);
164 refs
->store_flags
= store_flags
;
166 refs
->path
= xstrdup(path
);
171 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
172 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
173 * support at least the flags specified in `required_flags`. `caller`
174 * is used in any necessary error messages.
176 static struct packed_ref_store
*packed_downcast(struct ref_store
*ref_store
,
177 unsigned int required_flags
,
180 struct packed_ref_store
*refs
;
182 if (ref_store
->be
!= &refs_be_packed
)
183 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
184 ref_store
->be
->name
, caller
);
186 refs
= (struct packed_ref_store
*)ref_store
;
188 if ((refs
->store_flags
& required_flags
) != required_flags
)
189 die("BUG: unallowed operation (%s), requires %x, has %x\n",
190 caller
, required_flags
, refs
->store_flags
);
195 static void clear_packed_ref_cache(struct packed_ref_store
*refs
)
198 struct packed_ref_cache
*cache
= refs
->cache
;
201 release_packed_ref_cache(cache
);
205 static NORETURN
void die_unterminated_line(const char *path
,
206 const char *p
, size_t len
)
209 die("unterminated line in %s: %.*s", path
, (int)len
, p
);
211 die("unterminated line in %s: %.75s...", path
, p
);
214 static NORETURN
void die_invalid_line(const char *path
,
215 const char *p
, size_t len
)
217 const char *eol
= memchr(p
, '\n', len
);
220 die_unterminated_line(path
, p
, len
);
221 else if (eol
- p
< 80)
222 die("unexpected line in %s: %.*s", path
, (int)(eol
- p
), p
);
224 die("unexpected line in %s: %.75s...", path
, p
);
228 struct packed_ref_entry
{
233 static int cmp_packed_ref_entries(const void *v1
, const void *v2
)
235 const struct packed_ref_entry
*e1
= v1
, *e2
= v2
;
236 const char *r1
= e1
->start
+ GIT_SHA1_HEXSZ
+ 1;
237 const char *r2
= e2
->start
+ GIT_SHA1_HEXSZ
+ 1;
241 return *r2
== '\n' ? 0 : -1;
246 return (unsigned char)*r1
< (unsigned char)*r2
? -1 : +1;
254 * Compare a packed-refs record pointed to by `rec` to the specified
255 * NUL-terminated refname.
257 static int cmp_entry_to_refname(const char *rec
, const char *refname
)
259 const char *r1
= rec
+ GIT_SHA1_HEXSZ
+ 1;
260 const char *r2
= refname
;
268 return (unsigned char)*r1
< (unsigned char)*r2
? -1 : +1;
275 * `packed_refs->buf` is not known to be sorted. Check whether it is,
276 * and if not, sort it into new memory and munmap/free the old
279 static void sort_packed_refs(struct packed_ref_cache
*packed_refs
)
281 struct packed_ref_entry
*entries
= NULL
;
282 size_t alloc
= 0, nr
= 0;
284 const char *pos
, *eof
, *eol
;
286 char *new_buffer
, *dst
;
288 pos
= packed_refs
->buf
+ packed_refs
->header_len
;
289 eof
= packed_refs
->eof
;
296 * Initialize entries based on a crude estimate of the number
297 * of references in the file (we'll grow it below if needed):
299 ALLOC_GROW(entries
, len
/ 80 + 20, alloc
);
302 eol
= memchr(pos
, '\n', eof
- pos
);
304 /* The safety check should prevent this. */
305 BUG("unterminated line found in packed-refs");
306 if (eol
- pos
< GIT_SHA1_HEXSZ
+ 2)
307 die_invalid_line(packed_refs
->refs
->path
,
310 if (eol
< eof
&& *eol
== '^') {
312 * Keep any peeled line together with its
315 const char *peeled_start
= eol
;
317 eol
= memchr(peeled_start
, '\n', eof
- peeled_start
);
319 /* The safety check should prevent this. */
320 BUG("unterminated peeled line found in packed-refs");
324 ALLOC_GROW(entries
, nr
+ 1, alloc
);
325 entries
[nr
].start
= pos
;
326 entries
[nr
].len
= eol
- pos
;
331 cmp_packed_ref_entries(&entries
[nr
- 2],
332 &entries
[nr
- 1]) >= 0)
341 /* We need to sort the memory. First we sort the entries array: */
342 QSORT(entries
, nr
, cmp_packed_ref_entries
);
345 * Allocate a new chunk of memory, and copy the old memory to
346 * the new in the order indicated by `entries` (not bothering
347 * with the header line):
349 new_buffer
= xmalloc(len
);
350 for (dst
= new_buffer
, i
= 0; i
< nr
; i
++) {
351 memcpy(dst
, entries
[i
].start
, entries
[i
].len
);
352 dst
+= entries
[i
].len
;
356 * Now munmap the old buffer and use the sorted buffer in its
359 release_packed_ref_buffer(packed_refs
);
360 packed_refs
->buf
= new_buffer
;
361 packed_refs
->eof
= new_buffer
+ len
;
362 packed_refs
->header_len
= 0;
369 * Return a pointer to the start of the record that contains the
370 * character `*p` (which must be within the buffer). If no other
371 * record start is found, return `buf`.
373 static const char *find_start_of_record(const char *buf
, const char *p
)
375 while (p
> buf
&& (p
[-1] != '\n' || p
[0] == '^'))
381 * Return a pointer to the start of the record following the record
382 * that contains `*p`. If none is found before `end`, return `end`.
384 static const char *find_end_of_record(const char *p
, const char *end
)
386 while (++p
< end
&& (p
[-1] != '\n' || p
[0] == '^'))
392 * We want to be able to compare mmapped reference records quickly,
393 * without totally parsing them. We can do so because the records are
394 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
395 * + 1) bytes past the beginning of the record.
397 * But what if the `packed-refs` file contains garbage? We're willing
398 * to tolerate not detecting the problem, as long as we don't produce
399 * totally garbled output (we can't afford to check the integrity of
400 * the whole file during every Git invocation). But we do want to be
401 * sure that we never read past the end of the buffer in memory and
402 * perform an illegal memory access.
404 * Guarantee that minimum level of safety by verifying that the last
405 * record in the file is LF-terminated, and that it has at least
406 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
407 * these checks fails.
409 static void verify_buffer_safe(struct packed_ref_cache
*packed_refs
)
411 const char *buf
= packed_refs
->buf
+ packed_refs
->header_len
;
412 const char *eof
= packed_refs
->eof
;
413 const char *last_line
;
418 last_line
= find_start_of_record(buf
, eof
- 1);
419 if (*(eof
- 1) != '\n' || eof
- last_line
< GIT_SHA1_HEXSZ
+ 2)
420 die_invalid_line(packed_refs
->refs
->path
,
421 last_line
, eof
- last_line
);
425 * Depending on `mmap_strategy`, either mmap or read the contents of
426 * the `packed-refs` file into the `packed_refs` instance. Return 1 if
427 * the file existed and was read, or 0 if the file was absent. Die on
430 static int load_contents(struct packed_ref_cache
*packed_refs
)
437 fd
= open(packed_refs
->refs
->path
, O_RDONLY
);
439 if (errno
== ENOENT
) {
441 * This is OK; it just means that no
442 * "packed-refs" file has been written yet,
443 * which is equivalent to it being empty,
444 * which is its state when initialized with
449 die_errno("couldn't read %s", packed_refs
->refs
->path
);
453 stat_validity_update(&packed_refs
->validity
, fd
);
455 if (fstat(fd
, &st
) < 0)
456 die_errno("couldn't stat %s", packed_refs
->refs
->path
);
457 size
= xsize_t(st
.st_size
);
459 switch (mmap_strategy
) {
461 packed_refs
->buf
= xmalloc(size
);
462 bytes_read
= read_in_full(fd
, packed_refs
->buf
, size
);
463 if (bytes_read
< 0 || bytes_read
!= size
)
464 die_errno("couldn't read %s", packed_refs
->refs
->path
);
465 packed_refs
->eof
= packed_refs
->buf
+ size
;
466 packed_refs
->mmapped
= 0;
470 packed_refs
->buf
= xmmap(NULL
, size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
471 packed_refs
->eof
= packed_refs
->buf
+ size
;
472 packed_refs
->mmapped
= 1;
481 * Find the place in `cache->buf` where the start of the record for
482 * `refname` starts. If `mustexist` is true and the reference doesn't
483 * exist, then return NULL. If `mustexist` is false and the reference
484 * doesn't exist, then return the point where that reference would be
485 * inserted. In the latter mode, `refname` doesn't have to be a proper
486 * reference name; for example, one could search for "refs/replace/"
487 * to find the start of any replace references.
489 * The record is sought using a binary search, so `cache->buf` must be
492 static const char *find_reference_location(struct packed_ref_cache
*cache
,
493 const char *refname
, int mustexist
)
496 * This is not *quite* a garden-variety binary search, because
497 * the data we're searching is made up of records, and we
498 * always need to find the beginning of a record to do a
499 * comparison. A "record" here is one line for the reference
500 * itself and zero or one peel lines that start with '^'. Our
501 * loop invariant is described in the next two comments.
505 * A pointer to the character at the start of a record whose
506 * preceding records all have reference names that come
507 * *before* `refname`.
509 const char *lo
= cache
->buf
+ cache
->header_len
;
512 * A pointer to a the first character of a record whose
513 * reference name comes *after* `refname`.
515 const char *hi
= cache
->eof
;
518 const char *mid
, *rec
;
521 mid
= lo
+ (hi
- lo
) / 2;
522 rec
= find_start_of_record(lo
, mid
);
523 cmp
= cmp_entry_to_refname(rec
, refname
);
525 lo
= find_end_of_record(mid
, hi
);
526 } else if (cmp
> 0) {
540 * Read from the `packed-refs` file into a newly-allocated
541 * `packed_ref_cache` and return it. The return value will already
542 * have its reference count incremented.
544 * A comment line of the form "# pack-refs with: " may contain zero or
545 * more traits. We interpret the traits as follows:
547 * Neither `peeled` nor `fully-peeled`:
549 * Probably no references are peeled. But if the file contains a
550 * peeled value for a reference, we will use it.
554 * References under "refs/tags/", if they *can* be peeled, *are*
555 * peeled in this file. References outside of "refs/tags/" are
556 * probably not peeled even if they could have been, but if we find
557 * a peeled value for such a reference we will use it.
561 * All references in the file that can be peeled are peeled.
562 * Inversely (and this is more important), any references in the
563 * file for which no peeled value is recorded is not peelable. This
564 * trait should typically be written alongside "peeled" for
565 * compatibility with older clients, but we do not require it
566 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
570 * The references in this file are known to be sorted by refname.
572 static struct packed_ref_cache
*read_packed_refs(struct packed_ref_store
*refs
)
574 struct packed_ref_cache
*packed_refs
= xcalloc(1, sizeof(*packed_refs
));
577 packed_refs
->refs
= refs
;
578 acquire_packed_ref_cache(packed_refs
);
579 packed_refs
->peeled
= PEELED_NONE
;
581 if (!load_contents(packed_refs
))
584 /* If the file has a header line, process it: */
585 if (packed_refs
->buf
< packed_refs
->eof
&& *packed_refs
->buf
== '#') {
586 struct strbuf tmp
= STRBUF_INIT
;
589 struct string_list traits
= STRING_LIST_INIT_NODUP
;
591 eol
= memchr(packed_refs
->buf
, '\n',
592 packed_refs
->eof
- packed_refs
->buf
);
594 die_unterminated_line(refs
->path
,
596 packed_refs
->eof
- packed_refs
->buf
);
598 strbuf_add(&tmp
, packed_refs
->buf
, eol
- packed_refs
->buf
);
600 if (!skip_prefix(tmp
.buf
, "# pack-refs with:", (const char **)&p
))
601 die_invalid_line(refs
->path
,
603 packed_refs
->eof
- packed_refs
->buf
);
605 string_list_split_in_place(&traits
, p
, ' ', -1);
607 if (unsorted_string_list_has_string(&traits
, "fully-peeled"))
608 packed_refs
->peeled
= PEELED_FULLY
;
609 else if (unsorted_string_list_has_string(&traits
, "peeled"))
610 packed_refs
->peeled
= PEELED_TAGS
;
612 sorted
= unsorted_string_list_has_string(&traits
, "sorted");
614 /* perhaps other traits later as well */
616 /* The "+ 1" is for the LF character. */
617 packed_refs
->header_len
= eol
+ 1 - packed_refs
->buf
;
619 string_list_clear(&traits
, 0);
620 strbuf_release(&tmp
);
623 verify_buffer_safe(packed_refs
);
626 sort_packed_refs(packed_refs
);
629 * Reordering the records might have moved a short one
630 * to the end of the buffer, so verify the buffer's
633 verify_buffer_safe(packed_refs
);
636 if (mmap_strategy
!= MMAP_OK
&& packed_refs
->mmapped
) {
638 * We don't want to leave the file mmapped, so we are
639 * forced to make a copy now:
641 size_t size
= packed_refs
->eof
-
642 (packed_refs
->buf
+ packed_refs
->header_len
);
643 char *buf_copy
= xmalloc(size
);
645 memcpy(buf_copy
, packed_refs
->buf
+ packed_refs
->header_len
, size
);
646 release_packed_ref_buffer(packed_refs
);
647 packed_refs
->buf
= buf_copy
;
648 packed_refs
->eof
= buf_copy
+ size
;
655 * Check that the packed refs cache (if any) still reflects the
656 * contents of the file. If not, clear the cache.
658 static void validate_packed_ref_cache(struct packed_ref_store
*refs
)
661 !stat_validity_check(&refs
->cache
->validity
, refs
->path
))
662 clear_packed_ref_cache(refs
);
666 * Get the packed_ref_cache for the specified packed_ref_store,
667 * creating and populating it if it hasn't been read before or if the
668 * file has been changed (according to its `validity` field) since it
669 * was last read. On the other hand, if we hold the lock, then assume
670 * that the file hasn't been changed out from under us, so skip the
671 * extra `stat()` call in `stat_validity_check()`.
673 static struct packed_ref_cache
*get_packed_ref_cache(struct packed_ref_store
*refs
)
675 if (!is_lock_file_locked(&refs
->lock
))
676 validate_packed_ref_cache(refs
);
679 refs
->cache
= read_packed_refs(refs
);
684 static int packed_read_raw_ref(struct ref_store
*ref_store
,
685 const char *refname
, unsigned char *sha1
,
686 struct strbuf
*referent
, unsigned int *type
)
688 struct packed_ref_store
*refs
=
689 packed_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
690 struct packed_ref_cache
*packed_refs
= get_packed_ref_cache(refs
);
695 rec
= find_reference_location(packed_refs
, refname
, 1);
698 /* refname is not a packed reference. */
703 if (get_sha1_hex(rec
, sha1
))
704 die_invalid_line(refs
->path
, rec
, packed_refs
->eof
- rec
);
706 *type
= REF_ISPACKED
;
711 * This value is set in `base.flags` if the peeled value of the
712 * current reference is known. In that case, `peeled` contains the
713 * correct peeled value for the reference, which might be `null_sha1`
714 * if the reference is not a tag or if it is broken.
716 #define REF_KNOWS_PEELED 0x40
719 * An iterator over a packed-refs file that is currently mmapped.
721 struct packed_ref_iterator
{
722 struct ref_iterator base
;
724 struct packed_ref_cache
*packed_refs
;
726 /* The current position in the mmapped file: */
729 /* The end of the mmapped file: */
732 struct object_id oid
, peeled
;
734 struct strbuf refname_buf
;
739 static int next_record(struct packed_ref_iterator
*iter
)
741 const char *p
= iter
->pos
, *eol
;
743 strbuf_reset(&iter
->refname_buf
);
745 if (iter
->pos
== iter
->eof
)
748 iter
->base
.flags
= REF_ISPACKED
;
750 if (iter
->eof
- p
< GIT_SHA1_HEXSZ
+ 2 ||
751 parse_oid_hex(p
, &iter
->oid
, &p
) ||
753 die_invalid_line(iter
->packed_refs
->refs
->path
,
754 iter
->pos
, iter
->eof
- iter
->pos
);
756 eol
= memchr(p
, '\n', iter
->eof
- p
);
758 die_unterminated_line(iter
->packed_refs
->refs
->path
,
759 iter
->pos
, iter
->eof
- iter
->pos
);
761 strbuf_add(&iter
->refname_buf
, p
, eol
- p
);
762 iter
->base
.refname
= iter
->refname_buf
.buf
;
764 if (check_refname_format(iter
->base
.refname
, REFNAME_ALLOW_ONELEVEL
)) {
765 if (!refname_is_safe(iter
->base
.refname
))
766 die("packed refname is dangerous: %s",
769 iter
->base
.flags
|= REF_BAD_NAME
| REF_ISBROKEN
;
771 if (iter
->packed_refs
->peeled
== PEELED_FULLY
||
772 (iter
->packed_refs
->peeled
== PEELED_TAGS
&&
773 starts_with(iter
->base
.refname
, "refs/tags/")))
774 iter
->base
.flags
|= REF_KNOWS_PEELED
;
778 if (iter
->pos
< iter
->eof
&& *iter
->pos
== '^') {
780 if (iter
->eof
- p
< GIT_SHA1_HEXSZ
+ 1 ||
781 parse_oid_hex(p
, &iter
->peeled
, &p
) ||
783 die_invalid_line(iter
->packed_refs
->refs
->path
,
784 iter
->pos
, iter
->eof
- iter
->pos
);
788 * Regardless of what the file header said, we
789 * definitely know the value of *this* reference. But
790 * we suppress it if the reference is broken:
792 if ((iter
->base
.flags
& REF_ISBROKEN
)) {
793 oidclr(&iter
->peeled
);
794 iter
->base
.flags
&= ~REF_KNOWS_PEELED
;
796 iter
->base
.flags
|= REF_KNOWS_PEELED
;
799 oidclr(&iter
->peeled
);
805 static int packed_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
807 struct packed_ref_iterator
*iter
=
808 (struct packed_ref_iterator
*)ref_iterator
;
811 while ((ok
= next_record(iter
)) == ITER_OK
) {
812 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
813 ref_type(iter
->base
.refname
) != REF_TYPE_PER_WORKTREE
)
816 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
817 !ref_resolves_to_object(iter
->base
.refname
, &iter
->oid
,
824 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
830 static int packed_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
831 struct object_id
*peeled
)
833 struct packed_ref_iterator
*iter
=
834 (struct packed_ref_iterator
*)ref_iterator
;
836 if ((iter
->base
.flags
& REF_KNOWS_PEELED
)) {
837 oidcpy(peeled
, &iter
->peeled
);
838 return is_null_oid(&iter
->peeled
) ? -1 : 0;
839 } else if ((iter
->base
.flags
& (REF_ISBROKEN
| REF_ISSYMREF
))) {
842 return !!peel_object(iter
->oid
.hash
, peeled
->hash
);
846 static int packed_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
848 struct packed_ref_iterator
*iter
=
849 (struct packed_ref_iterator
*)ref_iterator
;
852 strbuf_release(&iter
->refname_buf
);
853 release_packed_ref_cache(iter
->packed_refs
);
854 base_ref_iterator_free(ref_iterator
);
858 static struct ref_iterator_vtable packed_ref_iterator_vtable
= {
859 packed_ref_iterator_advance
,
860 packed_ref_iterator_peel
,
861 packed_ref_iterator_abort
864 static struct ref_iterator
*packed_ref_iterator_begin(
865 struct ref_store
*ref_store
,
866 const char *prefix
, unsigned int flags
)
868 struct packed_ref_store
*refs
;
869 struct packed_ref_cache
*packed_refs
;
871 struct packed_ref_iterator
*iter
;
872 struct ref_iterator
*ref_iterator
;
873 unsigned int required_flags
= REF_STORE_READ
;
875 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
876 required_flags
|= REF_STORE_ODB
;
877 refs
= packed_downcast(ref_store
, required_flags
, "ref_iterator_begin");
879 packed_refs
= get_packed_ref_cache(refs
);
881 if (!packed_refs
->buf
)
882 return empty_ref_iterator_begin();
884 iter
= xcalloc(1, sizeof(*iter
));
885 ref_iterator
= &iter
->base
;
886 base_ref_iterator_init(ref_iterator
, &packed_ref_iterator_vtable
, 1);
889 * Note that get_packed_ref_cache() internally checks whether
890 * the packed-ref cache is up to date with what is on disk,
891 * and re-reads it if not.
893 iter
->packed_refs
= packed_refs
;
894 acquire_packed_ref_cache(packed_refs
);
896 if (prefix
&& *prefix
)
897 start
= find_reference_location(packed_refs
, prefix
, 0);
899 start
= packed_refs
->buf
+ packed_refs
->header_len
;
902 iter
->eof
= packed_refs
->eof
;
903 strbuf_init(&iter
->refname_buf
, 0);
905 iter
->base
.oid
= &iter
->oid
;
909 if (prefix
&& *prefix
)
910 /* Stop iteration after we've gone *past* prefix: */
911 ref_iterator
= prefix_ref_iterator_begin(ref_iterator
, prefix
, 0);
917 * Write an entry to the packed-refs file for the specified refname.
918 * If peeled is non-NULL, write it as the entry's peeled value. On
919 * error, return a nonzero value and leave errno set at the value left
920 * by the failing call to `fprintf()`.
922 static int write_packed_entry(FILE *fh
, const char *refname
,
923 const unsigned char *sha1
,
924 const unsigned char *peeled
)
926 if (fprintf(fh
, "%s %s\n", sha1_to_hex(sha1
), refname
) < 0 ||
927 (peeled
&& fprintf(fh
, "^%s\n", sha1_to_hex(peeled
)) < 0))
933 int packed_refs_lock(struct ref_store
*ref_store
, int flags
, struct strbuf
*err
)
935 struct packed_ref_store
*refs
=
936 packed_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_MAIN
,
938 static int timeout_configured
= 0;
939 static int timeout_value
= 1000;
941 if (!timeout_configured
) {
942 git_config_get_int("core.packedrefstimeout", &timeout_value
);
943 timeout_configured
= 1;
947 * Note that we close the lockfile immediately because we
948 * don't write new content to it, but rather to a separate
951 if (hold_lock_file_for_update_timeout(
954 flags
, timeout_value
) < 0) {
955 unable_to_lock_message(refs
->path
, errno
, err
);
959 if (close_lock_file(&refs
->lock
)) {
960 strbuf_addf(err
, "unable to close %s: %s", refs
->path
, strerror(errno
));
965 * Now that we hold the `packed-refs` lock, make sure that our
966 * cache matches the current version of the file. Normally
967 * `get_packed_ref_cache()` does that for us, but that
968 * function assumes that when the file is locked, any existing
969 * cache is still valid. We've just locked the file, but it
970 * might have changed the moment *before* we locked it.
972 validate_packed_ref_cache(refs
);
975 * Now make sure that the packed-refs file as it exists in the
976 * locked state is loaded into the cache:
978 get_packed_ref_cache(refs
);
982 void packed_refs_unlock(struct ref_store
*ref_store
)
984 struct packed_ref_store
*refs
= packed_downcast(
986 REF_STORE_READ
| REF_STORE_WRITE
,
987 "packed_refs_unlock");
989 if (!is_lock_file_locked(&refs
->lock
))
990 die("BUG: packed_refs_unlock() called when not locked");
991 rollback_lock_file(&refs
->lock
);
994 int packed_refs_is_locked(struct ref_store
*ref_store
)
996 struct packed_ref_store
*refs
= packed_downcast(
998 REF_STORE_READ
| REF_STORE_WRITE
,
999 "packed_refs_is_locked");
1001 return is_lock_file_locked(&refs
->lock
);
1005 * The packed-refs header line that we write out. Perhaps other
1006 * traits will be added later.
1008 * Note that earlier versions of Git used to parse these traits by
1009 * looking for " trait " in the line. For this reason, the space after
1010 * the colon and the trailing space are required.
1012 static const char PACKED_REFS_HEADER
[] =
1013 "# pack-refs with: peeled fully-peeled sorted \n";
1015 static int packed_init_db(struct ref_store
*ref_store
, struct strbuf
*err
)
1017 /* Nothing to do. */
1022 * Write the packed-refs from the cache to the packed-refs tempfile,
1023 * incorporating any changes from `updates`. `updates` must be a
1024 * sorted string list whose keys are the refnames and whose util
1025 * values are `struct ref_update *`. On error, rollback the tempfile,
1026 * write an error message to `err`, and return a nonzero value.
1028 * The packfile must be locked before calling this function and will
1029 * remain locked when it is done.
1031 static int write_with_updates(struct packed_ref_store
*refs
,
1032 struct string_list
*updates
,
1035 struct ref_iterator
*iter
= NULL
;
1039 struct strbuf sb
= STRBUF_INIT
;
1040 char *packed_refs_path
;
1042 if (!is_lock_file_locked(&refs
->lock
))
1043 die("BUG: write_with_updates() called while unlocked");
1046 * If packed-refs is a symlink, we want to overwrite the
1047 * symlinked-to file, not the symlink itself. Also, put the
1048 * staging file next to it:
1050 packed_refs_path
= get_locked_file_path(&refs
->lock
);
1051 strbuf_addf(&sb
, "%s.new", packed_refs_path
);
1052 free(packed_refs_path
);
1053 if (create_tempfile(&refs
->tempfile
, sb
.buf
) < 0) {
1054 strbuf_addf(err
, "unable to create file %s: %s",
1055 sb
.buf
, strerror(errno
));
1056 strbuf_release(&sb
);
1059 strbuf_release(&sb
);
1061 out
= fdopen_tempfile(&refs
->tempfile
, "w");
1063 strbuf_addf(err
, "unable to fdopen packed-refs tempfile: %s",
1068 if (fprintf(out
, "%s", PACKED_REFS_HEADER
) < 0)
1072 * We iterate in parallel through the current list of refs and
1073 * the list of updates, processing an entry from at least one
1074 * of the lists each time through the loop. When the current
1075 * list of refs is exhausted, set iter to NULL. When the list
1076 * of updates is exhausted, leave i set to updates->nr.
1078 iter
= packed_ref_iterator_begin(&refs
->base
, "",
1079 DO_FOR_EACH_INCLUDE_BROKEN
);
1080 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
1085 while (iter
|| i
< updates
->nr
) {
1086 struct ref_update
*update
= NULL
;
1089 if (i
>= updates
->nr
) {
1092 update
= updates
->items
[i
].util
;
1097 cmp
= strcmp(iter
->refname
, update
->refname
);
1102 * There is both an old value and an update
1103 * for this reference. Check the old value if
1106 if ((update
->flags
& REF_HAVE_OLD
)) {
1107 if (is_null_oid(&update
->old_oid
)) {
1108 strbuf_addf(err
, "cannot update ref '%s': "
1109 "reference already exists",
1112 } else if (oidcmp(&update
->old_oid
, iter
->oid
)) {
1113 strbuf_addf(err
, "cannot update ref '%s': "
1114 "is at %s but expected %s",
1116 oid_to_hex(iter
->oid
),
1117 oid_to_hex(&update
->old_oid
));
1122 /* Now figure out what to use for the new value: */
1123 if ((update
->flags
& REF_HAVE_NEW
)) {
1125 * The update takes precedence. Skip
1126 * the iterator over the unneeded
1129 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
1134 * The update doesn't actually want to
1135 * change anything. We're done with it.
1140 } else if (cmp
> 0) {
1142 * There is no old value but there is an
1143 * update for this reference. Make sure that
1144 * the update didn't expect an existing value:
1146 if ((update
->flags
& REF_HAVE_OLD
) &&
1147 !is_null_oid(&update
->old_oid
)) {
1148 strbuf_addf(err
, "cannot update ref '%s': "
1149 "reference is missing but expected %s",
1151 oid_to_hex(&update
->old_oid
));
1157 /* Pass the old reference through. */
1159 struct object_id peeled
;
1160 int peel_error
= ref_iterator_peel(iter
, &peeled
);
1162 if (write_packed_entry(out
, iter
->refname
,
1164 peel_error
? NULL
: peeled
.hash
))
1167 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
1169 } else if (is_null_oid(&update
->new_oid
)) {
1171 * The update wants to delete the reference,
1172 * and the reference either didn't exist or we
1173 * have already skipped it. So we're done with
1174 * the update (and don't have to write
1179 struct object_id peeled
;
1180 int peel_error
= peel_object(update
->new_oid
.hash
,
1183 if (write_packed_entry(out
, update
->refname
,
1184 update
->new_oid
.hash
,
1185 peel_error
? NULL
: peeled
.hash
))
1192 if (ok
!= ITER_DONE
) {
1193 strbuf_addf(err
, "unable to write packed-refs file: "
1194 "error iterating over old contents");
1198 if (close_tempfile(&refs
->tempfile
)) {
1199 strbuf_addf(err
, "error closing file %s: %s",
1200 get_tempfile_path(&refs
->tempfile
),
1202 strbuf_release(&sb
);
1209 strbuf_addf(err
, "error writing to %s: %s",
1210 get_tempfile_path(&refs
->tempfile
), strerror(errno
));
1214 ref_iterator_abort(iter
);
1216 delete_tempfile(&refs
->tempfile
);
1220 struct packed_transaction_backend_data
{
1221 /* True iff the transaction owns the packed-refs lock. */
1224 struct string_list updates
;
1227 static void packed_transaction_cleanup(struct packed_ref_store
*refs
,
1228 struct ref_transaction
*transaction
)
1230 struct packed_transaction_backend_data
*data
= transaction
->backend_data
;
1233 string_list_clear(&data
->updates
, 0);
1235 if (is_tempfile_active(&refs
->tempfile
))
1236 delete_tempfile(&refs
->tempfile
);
1238 if (data
->own_lock
&& is_lock_file_locked(&refs
->lock
)) {
1239 packed_refs_unlock(&refs
->base
);
1244 transaction
->backend_data
= NULL
;
1247 transaction
->state
= REF_TRANSACTION_CLOSED
;
1250 static int packed_transaction_prepare(struct ref_store
*ref_store
,
1251 struct ref_transaction
*transaction
,
1254 struct packed_ref_store
*refs
= packed_downcast(
1256 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
1257 "ref_transaction_prepare");
1258 struct packed_transaction_backend_data
*data
;
1260 int ret
= TRANSACTION_GENERIC_ERROR
;
1263 * Note that we *don't* skip transactions with zero updates,
1264 * because such a transaction might be executed for the side
1265 * effect of ensuring that all of the references are peeled.
1266 * If the caller wants to optimize away empty transactions, it
1267 * should do so itself.
1270 data
= xcalloc(1, sizeof(*data
));
1271 string_list_init(&data
->updates
, 0);
1273 transaction
->backend_data
= data
;
1276 * Stick the updates in a string list by refname so that we
1279 for (i
= 0; i
< transaction
->nr
; i
++) {
1280 struct ref_update
*update
= transaction
->updates
[i
];
1281 struct string_list_item
*item
=
1282 string_list_append(&data
->updates
, update
->refname
);
1284 /* Store a pointer to update in item->util: */
1285 item
->util
= update
;
1287 string_list_sort(&data
->updates
);
1289 if (ref_update_reject_duplicates(&data
->updates
, err
))
1292 if (!is_lock_file_locked(&refs
->lock
)) {
1293 if (packed_refs_lock(ref_store
, 0, err
))
1298 if (write_with_updates(refs
, &data
->updates
, err
))
1301 transaction
->state
= REF_TRANSACTION_PREPARED
;
1305 packed_transaction_cleanup(refs
, transaction
);
1309 static int packed_transaction_abort(struct ref_store
*ref_store
,
1310 struct ref_transaction
*transaction
,
1313 struct packed_ref_store
*refs
= packed_downcast(
1315 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
1316 "ref_transaction_abort");
1318 packed_transaction_cleanup(refs
, transaction
);
1322 static int packed_transaction_finish(struct ref_store
*ref_store
,
1323 struct ref_transaction
*transaction
,
1326 struct packed_ref_store
*refs
= packed_downcast(
1328 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
1329 "ref_transaction_finish");
1330 int ret
= TRANSACTION_GENERIC_ERROR
;
1331 char *packed_refs_path
;
1333 clear_packed_ref_cache(refs
);
1335 packed_refs_path
= get_locked_file_path(&refs
->lock
);
1336 if (rename_tempfile(&refs
->tempfile
, packed_refs_path
)) {
1337 strbuf_addf(err
, "error replacing %s: %s",
1338 refs
->path
, strerror(errno
));
1345 free(packed_refs_path
);
1346 packed_transaction_cleanup(refs
, transaction
);
1350 static int packed_initial_transaction_commit(struct ref_store
*ref_store
,
1351 struct ref_transaction
*transaction
,
1354 return ref_transaction_commit(transaction
, err
);
1357 static int packed_delete_refs(struct ref_store
*ref_store
, const char *msg
,
1358 struct string_list
*refnames
, unsigned int flags
)
1360 struct packed_ref_store
*refs
=
1361 packed_downcast(ref_store
, REF_STORE_WRITE
, "delete_refs");
1362 struct strbuf err
= STRBUF_INIT
;
1363 struct ref_transaction
*transaction
;
1364 struct string_list_item
*item
;
1367 (void)refs
; /* We need the check above, but don't use the variable */
1373 * Since we don't check the references' old_oids, the
1374 * individual updates can't fail, so we can pack all of the
1375 * updates into a single transaction.
1378 transaction
= ref_store_transaction_begin(ref_store
, &err
);
1382 for_each_string_list_item(item
, refnames
) {
1383 if (ref_transaction_delete(transaction
, item
->string
, NULL
,
1384 flags
, msg
, &err
)) {
1385 warning(_("could not delete reference %s: %s"),
1386 item
->string
, err
.buf
);
1391 ret
= ref_transaction_commit(transaction
, &err
);
1394 if (refnames
->nr
== 1)
1395 error(_("could not delete reference %s: %s"),
1396 refnames
->items
[0].string
, err
.buf
);
1398 error(_("could not delete references: %s"), err
.buf
);
1401 ref_transaction_free(transaction
);
1402 strbuf_release(&err
);
1406 static int packed_pack_refs(struct ref_store
*ref_store
, unsigned int flags
)
1409 * Packed refs are already packed. It might be that loose refs
1410 * are packed *into* a packed refs store, but that is done by
1411 * updating the packed references via a transaction.
1416 static int packed_create_symref(struct ref_store
*ref_store
,
1417 const char *refname
, const char *target
,
1420 die("BUG: packed reference store does not support symrefs");
1423 static int packed_rename_ref(struct ref_store
*ref_store
,
1424 const char *oldrefname
, const char *newrefname
,
1427 die("BUG: packed reference store does not support renaming references");
1430 static struct ref_iterator
*packed_reflog_iterator_begin(struct ref_store
*ref_store
)
1432 return empty_ref_iterator_begin();
1435 static int packed_for_each_reflog_ent(struct ref_store
*ref_store
,
1436 const char *refname
,
1437 each_reflog_ent_fn fn
, void *cb_data
)
1442 static int packed_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
1443 const char *refname
,
1444 each_reflog_ent_fn fn
,
1450 static int packed_reflog_exists(struct ref_store
*ref_store
,
1451 const char *refname
)
1456 static int packed_create_reflog(struct ref_store
*ref_store
,
1457 const char *refname
, int force_create
,
1460 die("BUG: packed reference store does not support reflogs");
1463 static int packed_delete_reflog(struct ref_store
*ref_store
,
1464 const char *refname
)
1469 static int packed_reflog_expire(struct ref_store
*ref_store
,
1470 const char *refname
, const unsigned char *sha1
,
1472 reflog_expiry_prepare_fn prepare_fn
,
1473 reflog_expiry_should_prune_fn should_prune_fn
,
1474 reflog_expiry_cleanup_fn cleanup_fn
,
1475 void *policy_cb_data
)
1480 struct ref_storage_be refs_be_packed
= {
1483 packed_ref_store_create
,
1485 packed_transaction_prepare
,
1486 packed_transaction_finish
,
1487 packed_transaction_abort
,
1488 packed_initial_transaction_commit
,
1491 packed_create_symref
,
1495 packed_ref_iterator_begin
,
1496 packed_read_raw_ref
,
1498 packed_reflog_iterator_begin
,
1499 packed_for_each_reflog_ent
,
1500 packed_for_each_reflog_ent_reverse
,
1501 packed_reflog_exists
,
1502 packed_create_reflog
,
1503 packed_delete_reflog
,
1504 packed_reflog_expire