4 #include "refs-internal.h"
6 #include "packed-backend.h"
7 #include "../iterator.h"
8 #include "../lockfile.h"
10 struct packed_ref_cache
{
11 struct ref_cache
*cache
;
14 * Count of references to the data structure in this instance,
15 * including the pointer from files_ref_store::packed if any.
16 * The data will not be freed as long as the reference count
19 unsigned int referrers
;
21 /* The metadata from when this packed-refs cache was read */
22 struct stat_validity validity
;
26 * Increment the reference count of *packed_refs.
28 static void acquire_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
30 packed_refs
->referrers
++;
34 * Decrease the reference count of *packed_refs. If it goes to zero,
35 * free *packed_refs and return true; otherwise return false.
37 static int release_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
39 if (!--packed_refs
->referrers
) {
40 free_ref_cache(packed_refs
->cache
);
41 stat_validity_clear(&packed_refs
->validity
);
50 * A container for `packed-refs`-related data. It is not (yet) a
53 struct packed_ref_store
{
54 struct ref_store base
;
56 unsigned int store_flags
;
58 /* The path of the "packed-refs" file: */
62 * A cache of the values read from the `packed-refs` file, if
63 * it might still be current; otherwise, NULL.
65 struct packed_ref_cache
*cache
;
68 * Lock used for the "packed-refs" file. Note that this (and
69 * thus the enclosing `packed_ref_store`) must not be freed.
71 struct lock_file lock
;
74 * Temporary file used when rewriting new contents to the
75 * "packed-refs" file. Note that this (and thus the enclosing
76 * `packed_ref_store`) must not be freed.
78 struct tempfile
*tempfile
;
81 struct ref_store
*packed_ref_store_create(const char *path
,
82 unsigned int store_flags
)
84 struct packed_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
85 struct ref_store
*ref_store
= (struct ref_store
*)refs
;
87 base_ref_store_init(ref_store
, &refs_be_packed
);
88 refs
->store_flags
= store_flags
;
90 refs
->path
= xstrdup(path
);
95 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
96 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
97 * support at least the flags specified in `required_flags`. `caller`
98 * is used in any necessary error messages.
100 static struct packed_ref_store
*packed_downcast(struct ref_store
*ref_store
,
101 unsigned int required_flags
,
104 struct packed_ref_store
*refs
;
106 if (ref_store
->be
!= &refs_be_packed
)
107 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
108 ref_store
->be
->name
, caller
);
110 refs
= (struct packed_ref_store
*)ref_store
;
112 if ((refs
->store_flags
& required_flags
) != required_flags
)
113 die("BUG: unallowed operation (%s), requires %x, has %x\n",
114 caller
, required_flags
, refs
->store_flags
);
119 static void clear_packed_ref_cache(struct packed_ref_store
*refs
)
122 struct packed_ref_cache
*cache
= refs
->cache
;
125 release_packed_ref_cache(cache
);
129 /* The length of a peeled reference line in packed-refs, including EOL: */
130 #define PEELED_LINE_LENGTH 42
133 * Parse one line from a packed-refs file. Write the SHA1 to sha1.
134 * Return a pointer to the refname within the line (null-terminated),
135 * or NULL if there was a problem.
137 static const char *parse_ref_line(struct strbuf
*line
, struct object_id
*oid
)
141 if (parse_oid_hex(line
->buf
, oid
, &ref
) < 0)
143 if (!isspace(*ref
++))
149 if (line
->buf
[line
->len
- 1] != '\n')
151 line
->buf
[--line
->len
] = 0;
157 * Read from `packed_refs_file` into a newly-allocated
158 * `packed_ref_cache` and return it. The return value will already
159 * have its reference count incremented.
161 * A comment line of the form "# pack-refs with: " may contain zero or
162 * more traits. We interpret the traits as follows:
166 * Probably no references are peeled. But if the file contains a
167 * peeled value for a reference, we will use it.
171 * References under "refs/tags/", if they *can* be peeled, *are*
172 * peeled in this file. References outside of "refs/tags/" are
173 * probably not peeled even if they could have been, but if we find
174 * a peeled value for such a reference we will use it.
178 * All references in the file that can be peeled are peeled.
179 * Inversely (and this is more important), any references in the
180 * file for which no peeled value is recorded is not peelable. This
181 * trait should typically be written alongside "peeled" for
182 * compatibility with older clients, but we do not require it
183 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
185 static struct packed_ref_cache
*read_packed_refs(const char *packed_refs_file
)
188 struct packed_ref_cache
*packed_refs
= xcalloc(1, sizeof(*packed_refs
));
189 struct ref_entry
*last
= NULL
;
190 struct strbuf line
= STRBUF_INIT
;
191 enum { PEELED_NONE
, PEELED_TAGS
, PEELED_FULLY
} peeled
= PEELED_NONE
;
194 acquire_packed_ref_cache(packed_refs
);
195 packed_refs
->cache
= create_ref_cache(NULL
, NULL
);
196 packed_refs
->cache
->root
->flag
&= ~REF_INCOMPLETE
;
198 f
= fopen(packed_refs_file
, "r");
200 if (errno
== ENOENT
) {
202 * This is OK; it just means that no
203 * "packed-refs" file has been written yet,
204 * which is equivalent to it being empty.
208 die_errno("couldn't read %s", packed_refs_file
);
212 stat_validity_update(&packed_refs
->validity
, fileno(f
));
214 dir
= get_ref_dir(packed_refs
->cache
->root
);
215 while (strbuf_getwholeline(&line
, f
, '\n') != EOF
) {
216 struct object_id oid
;
220 if (!line
.len
|| line
.buf
[line
.len
- 1] != '\n')
221 die("unterminated line in %s: %s", packed_refs_file
, line
.buf
);
223 if (skip_prefix(line
.buf
, "# pack-refs with:", &traits
)) {
224 if (strstr(traits
, " fully-peeled "))
225 peeled
= PEELED_FULLY
;
226 else if (strstr(traits
, " peeled "))
227 peeled
= PEELED_TAGS
;
228 /* perhaps other traits later as well */
232 refname
= parse_ref_line(&line
, &oid
);
234 int flag
= REF_ISPACKED
;
236 if (check_refname_format(refname
, REFNAME_ALLOW_ONELEVEL
)) {
237 if (!refname_is_safe(refname
))
238 die("packed refname is dangerous: %s", refname
);
240 flag
|= REF_BAD_NAME
| REF_ISBROKEN
;
242 last
= create_ref_entry(refname
, &oid
, flag
);
243 if (peeled
== PEELED_FULLY
||
244 (peeled
== PEELED_TAGS
&& starts_with(refname
, "refs/tags/")))
245 last
->flag
|= REF_KNOWS_PEELED
;
246 add_ref_entry(dir
, last
);
248 line
.buf
[0] == '^' &&
249 line
.len
== PEELED_LINE_LENGTH
&&
250 line
.buf
[PEELED_LINE_LENGTH
- 1] == '\n' &&
251 !get_oid_hex(line
.buf
+ 1, &oid
)) {
252 oidcpy(&last
->u
.value
.peeled
, &oid
);
254 * Regardless of what the file header said,
255 * we definitely know the value of *this*
258 last
->flag
|= REF_KNOWS_PEELED
;
260 strbuf_setlen(&line
, line
.len
- 1);
261 die("unexpected line in %s: %s", packed_refs_file
, line
.buf
);
266 strbuf_release(&line
);
272 * Check that the packed refs cache (if any) still reflects the
273 * contents of the file. If not, clear the cache.
275 static void validate_packed_ref_cache(struct packed_ref_store
*refs
)
278 !stat_validity_check(&refs
->cache
->validity
, refs
->path
))
279 clear_packed_ref_cache(refs
);
283 * Get the packed_ref_cache for the specified packed_ref_store,
284 * creating and populating it if it hasn't been read before or if the
285 * file has been changed (according to its `validity` field) since it
286 * was last read. On the other hand, if we hold the lock, then assume
287 * that the file hasn't been changed out from under us, so skip the
288 * extra `stat()` call in `stat_validity_check()`.
290 static struct packed_ref_cache
*get_packed_ref_cache(struct packed_ref_store
*refs
)
292 if (!is_lock_file_locked(&refs
->lock
))
293 validate_packed_ref_cache(refs
);
296 refs
->cache
= read_packed_refs(refs
->path
);
301 static struct ref_dir
*get_packed_ref_dir(struct packed_ref_cache
*packed_ref_cache
)
303 return get_ref_dir(packed_ref_cache
->cache
->root
);
306 static struct ref_dir
*get_packed_refs(struct packed_ref_store
*refs
)
308 return get_packed_ref_dir(get_packed_ref_cache(refs
));
312 * Return the ref_entry for the given refname from the packed
313 * references. If it does not exist, return NULL.
315 static struct ref_entry
*get_packed_ref(struct packed_ref_store
*refs
,
318 return find_ref_entry(get_packed_refs(refs
), refname
);
321 static int packed_read_raw_ref(struct ref_store
*ref_store
,
322 const char *refname
, unsigned char *sha1
,
323 struct strbuf
*referent
, unsigned int *type
)
325 struct packed_ref_store
*refs
=
326 packed_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
328 struct ref_entry
*entry
;
332 entry
= get_packed_ref(refs
, refname
);
338 hashcpy(sha1
, entry
->u
.value
.oid
.hash
);
339 *type
= REF_ISPACKED
;
343 static int packed_peel_ref(struct ref_store
*ref_store
,
344 const char *refname
, unsigned char *sha1
)
346 struct packed_ref_store
*refs
=
347 packed_downcast(ref_store
, REF_STORE_READ
| REF_STORE_ODB
,
349 struct ref_entry
*r
= get_packed_ref(refs
, refname
);
351 if (!r
|| peel_entry(r
, 0))
354 hashcpy(sha1
, r
->u
.value
.peeled
.hash
);
358 struct packed_ref_iterator
{
359 struct ref_iterator base
;
361 struct packed_ref_cache
*cache
;
362 struct ref_iterator
*iter0
;
366 static int packed_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
368 struct packed_ref_iterator
*iter
=
369 (struct packed_ref_iterator
*)ref_iterator
;
372 while ((ok
= ref_iterator_advance(iter
->iter0
)) == ITER_OK
) {
373 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
374 ref_type(iter
->iter0
->refname
) != REF_TYPE_PER_WORKTREE
)
377 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
378 !ref_resolves_to_object(iter
->iter0
->refname
,
383 iter
->base
.refname
= iter
->iter0
->refname
;
384 iter
->base
.oid
= iter
->iter0
->oid
;
385 iter
->base
.flags
= iter
->iter0
->flags
;
390 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
396 static int packed_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
397 struct object_id
*peeled
)
399 struct packed_ref_iterator
*iter
=
400 (struct packed_ref_iterator
*)ref_iterator
;
402 return ref_iterator_peel(iter
->iter0
, peeled
);
405 static int packed_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
407 struct packed_ref_iterator
*iter
=
408 (struct packed_ref_iterator
*)ref_iterator
;
412 ok
= ref_iterator_abort(iter
->iter0
);
414 release_packed_ref_cache(iter
->cache
);
415 base_ref_iterator_free(ref_iterator
);
419 static struct ref_iterator_vtable packed_ref_iterator_vtable
= {
420 packed_ref_iterator_advance
,
421 packed_ref_iterator_peel
,
422 packed_ref_iterator_abort
425 static struct ref_iterator
*packed_ref_iterator_begin(
426 struct ref_store
*ref_store
,
427 const char *prefix
, unsigned int flags
)
429 struct packed_ref_store
*refs
;
430 struct packed_ref_iterator
*iter
;
431 struct ref_iterator
*ref_iterator
;
432 unsigned int required_flags
= REF_STORE_READ
;
434 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
435 required_flags
|= REF_STORE_ODB
;
436 refs
= packed_downcast(ref_store
, required_flags
, "ref_iterator_begin");
438 iter
= xcalloc(1, sizeof(*iter
));
439 ref_iterator
= &iter
->base
;
440 base_ref_iterator_init(ref_iterator
, &packed_ref_iterator_vtable
);
443 * Note that get_packed_ref_cache() internally checks whether
444 * the packed-ref cache is up to date with what is on disk,
445 * and re-reads it if not.
448 iter
->cache
= get_packed_ref_cache(refs
);
449 acquire_packed_ref_cache(iter
->cache
);
450 iter
->iter0
= cache_ref_iterator_begin(iter
->cache
->cache
, prefix
, 0);
458 * Write an entry to the packed-refs file for the specified refname.
459 * If peeled is non-NULL, write it as the entry's peeled value. On
460 * error, return a nonzero value and leave errno set at the value left
461 * by the failing call to `fprintf()`.
463 static int write_packed_entry(FILE *fh
, const char *refname
,
464 const unsigned char *sha1
,
465 const unsigned char *peeled
)
467 if (fprintf(fh
, "%s %s\n", sha1_to_hex(sha1
), refname
) < 0 ||
468 (peeled
&& fprintf(fh
, "^%s\n", sha1_to_hex(peeled
)) < 0))
474 int packed_refs_lock(struct ref_store
*ref_store
, int flags
, struct strbuf
*err
)
476 struct packed_ref_store
*refs
=
477 packed_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_MAIN
,
479 static int timeout_configured
= 0;
480 static int timeout_value
= 1000;
482 if (!timeout_configured
) {
483 git_config_get_int("core.packedrefstimeout", &timeout_value
);
484 timeout_configured
= 1;
488 * Note that we close the lockfile immediately because we
489 * don't write new content to it, but rather to a separate
492 if (hold_lock_file_for_update_timeout(
495 flags
, timeout_value
) < 0) {
496 unable_to_lock_message(refs
->path
, errno
, err
);
500 if (close_lock_file_gently(&refs
->lock
)) {
501 strbuf_addf(err
, "unable to close %s: %s", refs
->path
, strerror(errno
));
502 rollback_lock_file(&refs
->lock
);
507 * Now that we hold the `packed-refs` lock, make sure that our
508 * cache matches the current version of the file. Normally
509 * `get_packed_ref_cache()` does that for us, but that
510 * function assumes that when the file is locked, any existing
511 * cache is still valid. We've just locked the file, but it
512 * might have changed the moment *before* we locked it.
514 validate_packed_ref_cache(refs
);
517 * Now make sure that the packed-refs file as it exists in the
518 * locked state is loaded into the cache:
520 get_packed_ref_cache(refs
);
524 void packed_refs_unlock(struct ref_store
*ref_store
)
526 struct packed_ref_store
*refs
= packed_downcast(
528 REF_STORE_READ
| REF_STORE_WRITE
,
529 "packed_refs_unlock");
531 if (!is_lock_file_locked(&refs
->lock
))
532 die("BUG: packed_refs_unlock() called when not locked");
533 rollback_lock_file(&refs
->lock
);
536 int packed_refs_is_locked(struct ref_store
*ref_store
)
538 struct packed_ref_store
*refs
= packed_downcast(
540 REF_STORE_READ
| REF_STORE_WRITE
,
541 "packed_refs_is_locked");
543 return is_lock_file_locked(&refs
->lock
);
547 * The packed-refs header line that we write out. Perhaps other
548 * traits will be added later. The trailing space is required.
550 static const char PACKED_REFS_HEADER
[] =
551 "# pack-refs with: peeled fully-peeled \n";
553 static int packed_init_db(struct ref_store
*ref_store
, struct strbuf
*err
)
560 * Write the packed-refs from the cache to the packed-refs tempfile,
561 * incorporating any changes from `updates`. `updates` must be a
562 * sorted string list whose keys are the refnames and whose util
563 * values are `struct ref_update *`. On error, rollback the tempfile,
564 * write an error message to `err`, and return a nonzero value.
566 * The packfile must be locked before calling this function and will
567 * remain locked when it is done.
569 static int write_with_updates(struct packed_ref_store
*refs
,
570 struct string_list
*updates
,
573 struct ref_iterator
*iter
= NULL
;
577 struct strbuf sb
= STRBUF_INIT
;
578 char *packed_refs_path
;
580 if (!is_lock_file_locked(&refs
->lock
))
581 die("BUG: write_with_updates() called while unlocked");
584 * If packed-refs is a symlink, we want to overwrite the
585 * symlinked-to file, not the symlink itself. Also, put the
586 * staging file next to it:
588 packed_refs_path
= get_locked_file_path(&refs
->lock
);
589 strbuf_addf(&sb
, "%s.new", packed_refs_path
);
590 free(packed_refs_path
);
591 refs
->tempfile
= create_tempfile(sb
.buf
);
592 if (!refs
->tempfile
) {
593 strbuf_addf(err
, "unable to create file %s: %s",
594 sb
.buf
, strerror(errno
));
600 out
= fdopen_tempfile(refs
->tempfile
, "w");
602 strbuf_addf(err
, "unable to fdopen packed-refs tempfile: %s",
607 if (fprintf(out
, "%s", PACKED_REFS_HEADER
) < 0)
611 * We iterate in parallel through the current list of refs and
612 * the list of updates, processing an entry from at least one
613 * of the lists each time through the loop. When the current
614 * list of refs is exhausted, set iter to NULL. When the list
615 * of updates is exhausted, leave i set to updates->nr.
617 iter
= packed_ref_iterator_begin(&refs
->base
, "",
618 DO_FOR_EACH_INCLUDE_BROKEN
);
619 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
624 while (iter
|| i
< updates
->nr
) {
625 struct ref_update
*update
= NULL
;
628 if (i
>= updates
->nr
) {
631 update
= updates
->items
[i
].util
;
636 cmp
= strcmp(iter
->refname
, update
->refname
);
641 * There is both an old value and an update
642 * for this reference. Check the old value if
645 if ((update
->flags
& REF_HAVE_OLD
)) {
646 if (is_null_oid(&update
->old_oid
)) {
647 strbuf_addf(err
, "cannot update ref '%s': "
648 "reference already exists",
651 } else if (oidcmp(&update
->old_oid
, iter
->oid
)) {
652 strbuf_addf(err
, "cannot update ref '%s': "
653 "is at %s but expected %s",
655 oid_to_hex(iter
->oid
),
656 oid_to_hex(&update
->old_oid
));
661 /* Now figure out what to use for the new value: */
662 if ((update
->flags
& REF_HAVE_NEW
)) {
664 * The update takes precedence. Skip
665 * the iterator over the unneeded
668 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
673 * The update doesn't actually want to
674 * change anything. We're done with it.
679 } else if (cmp
> 0) {
681 * There is no old value but there is an
682 * update for this reference. Make sure that
683 * the update didn't expect an existing value:
685 if ((update
->flags
& REF_HAVE_OLD
) &&
686 !is_null_oid(&update
->old_oid
)) {
687 strbuf_addf(err
, "cannot update ref '%s': "
688 "reference is missing but expected %s",
690 oid_to_hex(&update
->old_oid
));
696 /* Pass the old reference through. */
698 struct object_id peeled
;
699 int peel_error
= ref_iterator_peel(iter
, &peeled
);
701 if (write_packed_entry(out
, iter
->refname
,
703 peel_error
? NULL
: peeled
.hash
))
706 if ((ok
= ref_iterator_advance(iter
)) != ITER_OK
)
708 } else if (is_null_oid(&update
->new_oid
)) {
710 * The update wants to delete the reference,
711 * and the reference either didn't exist or we
712 * have already skipped it. So we're done with
713 * the update (and don't have to write
718 struct object_id peeled
;
719 int peel_error
= peel_object(update
->new_oid
.hash
,
722 if (write_packed_entry(out
, update
->refname
,
723 update
->new_oid
.hash
,
724 peel_error
? NULL
: peeled
.hash
))
731 if (ok
!= ITER_DONE
) {
732 strbuf_addf(err
, "unable to write packed-refs file: "
733 "error iterating over old contents");
737 if (close_tempfile_gently(refs
->tempfile
)) {
738 strbuf_addf(err
, "error closing file %s: %s",
739 get_tempfile_path(refs
->tempfile
),
742 delete_tempfile(&refs
->tempfile
);
749 strbuf_addf(err
, "error writing to %s: %s",
750 get_tempfile_path(refs
->tempfile
), strerror(errno
));
754 ref_iterator_abort(iter
);
756 delete_tempfile(&refs
->tempfile
);
760 struct packed_transaction_backend_data
{
761 /* True iff the transaction owns the packed-refs lock. */
764 struct string_list updates
;
767 static void packed_transaction_cleanup(struct packed_ref_store
*refs
,
768 struct ref_transaction
*transaction
)
770 struct packed_transaction_backend_data
*data
= transaction
->backend_data
;
773 string_list_clear(&data
->updates
, 0);
775 if (is_tempfile_active(refs
->tempfile
))
776 delete_tempfile(&refs
->tempfile
);
778 if (data
->own_lock
&& is_lock_file_locked(&refs
->lock
)) {
779 packed_refs_unlock(&refs
->base
);
784 transaction
->backend_data
= NULL
;
787 transaction
->state
= REF_TRANSACTION_CLOSED
;
790 static int packed_transaction_prepare(struct ref_store
*ref_store
,
791 struct ref_transaction
*transaction
,
794 struct packed_ref_store
*refs
= packed_downcast(
796 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
797 "ref_transaction_prepare");
798 struct packed_transaction_backend_data
*data
;
800 int ret
= TRANSACTION_GENERIC_ERROR
;
803 * Note that we *don't* skip transactions with zero updates,
804 * because such a transaction might be executed for the side
805 * effect of ensuring that all of the references are peeled.
806 * If the caller wants to optimize away empty transactions, it
807 * should do so itself.
810 data
= xcalloc(1, sizeof(*data
));
811 string_list_init(&data
->updates
, 0);
813 transaction
->backend_data
= data
;
816 * Stick the updates in a string list by refname so that we
819 for (i
= 0; i
< transaction
->nr
; i
++) {
820 struct ref_update
*update
= transaction
->updates
[i
];
821 struct string_list_item
*item
=
822 string_list_append(&data
->updates
, update
->refname
);
824 /* Store a pointer to update in item->util: */
827 string_list_sort(&data
->updates
);
829 if (ref_update_reject_duplicates(&data
->updates
, err
))
832 if (!is_lock_file_locked(&refs
->lock
)) {
833 if (packed_refs_lock(ref_store
, 0, err
))
838 if (write_with_updates(refs
, &data
->updates
, err
))
841 transaction
->state
= REF_TRANSACTION_PREPARED
;
845 packed_transaction_cleanup(refs
, transaction
);
849 static int packed_transaction_abort(struct ref_store
*ref_store
,
850 struct ref_transaction
*transaction
,
853 struct packed_ref_store
*refs
= packed_downcast(
855 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
856 "ref_transaction_abort");
858 packed_transaction_cleanup(refs
, transaction
);
862 static int packed_transaction_finish(struct ref_store
*ref_store
,
863 struct ref_transaction
*transaction
,
866 struct packed_ref_store
*refs
= packed_downcast(
868 REF_STORE_READ
| REF_STORE_WRITE
| REF_STORE_ODB
,
869 "ref_transaction_finish");
870 int ret
= TRANSACTION_GENERIC_ERROR
;
871 char *packed_refs_path
;
873 packed_refs_path
= get_locked_file_path(&refs
->lock
);
874 if (rename_tempfile(&refs
->tempfile
, packed_refs_path
)) {
875 strbuf_addf(err
, "error replacing %s: %s",
876 refs
->path
, strerror(errno
));
880 clear_packed_ref_cache(refs
);
884 free(packed_refs_path
);
885 packed_transaction_cleanup(refs
, transaction
);
889 static int packed_initial_transaction_commit(struct ref_store
*ref_store
,
890 struct ref_transaction
*transaction
,
893 return ref_transaction_commit(transaction
, err
);
896 static int packed_delete_refs(struct ref_store
*ref_store
, const char *msg
,
897 struct string_list
*refnames
, unsigned int flags
)
899 struct packed_ref_store
*refs
=
900 packed_downcast(ref_store
, REF_STORE_WRITE
, "delete_refs");
901 struct strbuf err
= STRBUF_INIT
;
902 struct ref_transaction
*transaction
;
903 struct string_list_item
*item
;
906 (void)refs
; /* We need the check above, but don't use the variable */
912 * Since we don't check the references' old_oids, the
913 * individual updates can't fail, so we can pack all of the
914 * updates into a single transaction.
917 transaction
= ref_store_transaction_begin(ref_store
, &err
);
921 for_each_string_list_item(item
, refnames
) {
922 if (ref_transaction_delete(transaction
, item
->string
, NULL
,
924 warning(_("could not delete reference %s: %s"),
925 item
->string
, err
.buf
);
930 ret
= ref_transaction_commit(transaction
, &err
);
933 if (refnames
->nr
== 1)
934 error(_("could not delete reference %s: %s"),
935 refnames
->items
[0].string
, err
.buf
);
937 error(_("could not delete references: %s"), err
.buf
);
940 ref_transaction_free(transaction
);
941 strbuf_release(&err
);
945 static int packed_pack_refs(struct ref_store
*ref_store
, unsigned int flags
)
948 * Packed refs are already packed. It might be that loose refs
949 * are packed *into* a packed refs store, but that is done by
950 * updating the packed references via a transaction.
955 static int packed_create_symref(struct ref_store
*ref_store
,
956 const char *refname
, const char *target
,
959 die("BUG: packed reference store does not support symrefs");
962 static int packed_rename_ref(struct ref_store
*ref_store
,
963 const char *oldrefname
, const char *newrefname
,
966 die("BUG: packed reference store does not support renaming references");
969 static struct ref_iterator
*packed_reflog_iterator_begin(struct ref_store
*ref_store
)
971 return empty_ref_iterator_begin();
974 static int packed_for_each_reflog_ent(struct ref_store
*ref_store
,
976 each_reflog_ent_fn fn
, void *cb_data
)
981 static int packed_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
983 each_reflog_ent_fn fn
,
989 static int packed_reflog_exists(struct ref_store
*ref_store
,
995 static int packed_create_reflog(struct ref_store
*ref_store
,
996 const char *refname
, int force_create
,
999 die("BUG: packed reference store does not support reflogs");
1002 static int packed_delete_reflog(struct ref_store
*ref_store
,
1003 const char *refname
)
1008 static int packed_reflog_expire(struct ref_store
*ref_store
,
1009 const char *refname
, const unsigned char *sha1
,
1011 reflog_expiry_prepare_fn prepare_fn
,
1012 reflog_expiry_should_prune_fn should_prune_fn
,
1013 reflog_expiry_cleanup_fn cleanup_fn
,
1014 void *policy_cb_data
)
1019 struct ref_storage_be refs_be_packed
= {
1022 packed_ref_store_create
,
1024 packed_transaction_prepare
,
1025 packed_transaction_finish
,
1026 packed_transaction_abort
,
1027 packed_initial_transaction_commit
,
1031 packed_create_symref
,
1035 packed_ref_iterator_begin
,
1036 packed_read_raw_ref
,
1038 packed_reflog_iterator_begin
,
1039 packed_for_each_reflog_ent
,
1040 packed_for_each_reflog_ent_reverse
,
1041 packed_reflog_exists
,
1042 packed_create_reflog
,
1043 packed_delete_reflog
,
1044 packed_reflog_expire