]>
Commit | Line | Data |
---|---|---|
1 | #define USE_THE_REPOSITORY_VARIABLE | |
2 | #define DISABLE_SIGN_COMPARE_WARNINGS | |
3 | ||
4 | #include "../git-compat-util.h" | |
5 | #include "../abspath.h" | |
6 | #include "../config.h" | |
7 | #include "../copy.h" | |
8 | #include "../environment.h" | |
9 | #include "../gettext.h" | |
10 | #include "../hash.h" | |
11 | #include "../hex.h" | |
12 | #include "../fsck.h" | |
13 | #include "../refs.h" | |
14 | #include "../repo-settings.h" | |
15 | #include "refs-internal.h" | |
16 | #include "ref-cache.h" | |
17 | #include "packed-backend.h" | |
18 | #include "../ident.h" | |
19 | #include "../iterator.h" | |
20 | #include "../dir-iterator.h" | |
21 | #include "../lockfile.h" | |
22 | #include "../object.h" | |
23 | #include "../object-file.h" | |
24 | #include "../path.h" | |
25 | #include "../dir.h" | |
26 | #include "../chdir-notify.h" | |
27 | #include "../setup.h" | |
28 | #include "../worktree.h" | |
29 | #include "../wrapper.h" | |
30 | #include "../write-or-die.h" | |
31 | #include "../revision.h" | |
32 | #include <wildmatch.h> | |
33 | ||
34 | /* | |
35 | * This backend uses the following flags in `ref_update::flags` for | |
36 | * internal bookkeeping purposes. Their numerical values must not | |
37 | * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW, | |
38 | * or REF_HAVE_OLD, which are also stored in `ref_update::flags`. | |
39 | */ | |
40 | ||
41 | /* | |
42 | * Used as a flag in ref_update::flags when a loose ref is being | |
43 | * pruned. This flag must only be used when REF_NO_DEREF is set. | |
44 | */ | |
45 | #define REF_IS_PRUNING (1 << 4) | |
46 | ||
47 | /* | |
48 | * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken | |
49 | * refs (i.e., because the reference is about to be deleted anyway). | |
50 | */ | |
51 | #define REF_DELETING (1 << 5) | |
52 | ||
53 | /* | |
54 | * Used as a flag in ref_update::flags when the lockfile needs to be | |
55 | * committed. | |
56 | */ | |
57 | #define REF_NEEDS_COMMIT (1 << 6) | |
58 | ||
59 | /* | |
60 | * Used as a flag in ref_update::flags when the ref_update was via an | |
61 | * update to HEAD. | |
62 | */ | |
63 | #define REF_UPDATE_VIA_HEAD (1 << 8) | |
64 | ||
65 | /* | |
66 | * Used as a flag in ref_update::flags when a reference has been | |
67 | * deleted and the ref's parent directories may need cleanup. | |
68 | */ | |
69 | #define REF_DELETED_RMDIR (1 << 9) | |
70 | ||
71 | /* | |
72 | * Used to indicate that the reflog-only update has been created via | |
73 | * `split_head_update()`. | |
74 | */ | |
75 | #define REF_LOG_VIA_SPLIT (1 << 14) | |
76 | ||
77 | struct ref_lock { | |
78 | char *ref_name; | |
79 | struct lock_file lk; | |
80 | struct object_id old_oid; | |
81 | unsigned int count; /* track users of the lock (ref update + reflog updates) */ | |
82 | }; | |
83 | ||
84 | struct files_ref_store { | |
85 | struct ref_store base; | |
86 | unsigned int store_flags; | |
87 | ||
88 | char *gitcommondir; | |
89 | enum log_refs_config log_all_ref_updates; | |
90 | int prefer_symlink_refs; | |
91 | ||
92 | struct ref_cache *loose; | |
93 | ||
94 | struct ref_store *packed_ref_store; | |
95 | }; | |
96 | ||
97 | static void clear_loose_ref_cache(struct files_ref_store *refs) | |
98 | { | |
99 | if (refs->loose) { | |
100 | free_ref_cache(refs->loose); | |
101 | refs->loose = NULL; | |
102 | } | |
103 | } | |
104 | ||
105 | /* | |
106 | * Create a new submodule ref cache and add it to the internal | |
107 | * set of caches. | |
108 | */ | |
109 | static struct ref_store *files_ref_store_init(struct repository *repo, | |
110 | const char *gitdir, | |
111 | unsigned int flags) | |
112 | { | |
113 | struct files_ref_store *refs = xcalloc(1, sizeof(*refs)); | |
114 | struct ref_store *ref_store = (struct ref_store *)refs; | |
115 | struct strbuf sb = STRBUF_INIT; | |
116 | ||
117 | base_ref_store_init(ref_store, repo, gitdir, &refs_be_files); | |
118 | refs->store_flags = flags; | |
119 | get_common_dir_noenv(&sb, gitdir); | |
120 | refs->gitcommondir = strbuf_detach(&sb, NULL); | |
121 | refs->packed_ref_store = | |
122 | packed_ref_store_init(repo, refs->gitcommondir, flags); | |
123 | refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo); | |
124 | repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs); | |
125 | ||
126 | chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir); | |
127 | chdir_notify_reparent("files-backend $GIT_COMMONDIR", | |
128 | &refs->gitcommondir); | |
129 | ||
130 | return ref_store; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Die if refs is not the main ref store. caller is used in any | |
135 | * necessary error messages. | |
136 | */ | |
137 | static void files_assert_main_repository(struct files_ref_store *refs, | |
138 | const char *caller) | |
139 | { | |
140 | if (refs->store_flags & REF_STORE_MAIN) | |
141 | return; | |
142 | ||
143 | BUG("operation %s only allowed for main ref store", caller); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Downcast ref_store to files_ref_store. Die if ref_store is not a | |
148 | * files_ref_store. required_flags is compared with ref_store's | |
149 | * store_flags to ensure the ref_store has all required capabilities. | |
150 | * "caller" is used in any necessary error messages. | |
151 | */ | |
152 | static struct files_ref_store *files_downcast(struct ref_store *ref_store, | |
153 | unsigned int required_flags, | |
154 | const char *caller) | |
155 | { | |
156 | struct files_ref_store *refs; | |
157 | ||
158 | if (ref_store->be != &refs_be_files) | |
159 | BUG("ref_store is type \"%s\" not \"files\" in %s", | |
160 | ref_store->be->name, caller); | |
161 | ||
162 | refs = (struct files_ref_store *)ref_store; | |
163 | ||
164 | if ((refs->store_flags & required_flags) != required_flags) | |
165 | BUG("operation %s requires abilities 0x%x, but only have 0x%x", | |
166 | caller, required_flags, refs->store_flags); | |
167 | ||
168 | return refs; | |
169 | } | |
170 | ||
171 | static void files_ref_store_release(struct ref_store *ref_store) | |
172 | { | |
173 | struct files_ref_store *refs = files_downcast(ref_store, 0, "release"); | |
174 | free_ref_cache(refs->loose); | |
175 | free(refs->gitcommondir); | |
176 | ref_store_release(refs->packed_ref_store); | |
177 | free(refs->packed_ref_store); | |
178 | } | |
179 | ||
180 | static void files_reflog_path(struct files_ref_store *refs, | |
181 | struct strbuf *sb, | |
182 | const char *refname) | |
183 | { | |
184 | const char *bare_refname; | |
185 | const char *wtname; | |
186 | int wtname_len; | |
187 | enum ref_worktree_type wt_type = parse_worktree_ref( | |
188 | refname, &wtname, &wtname_len, &bare_refname); | |
189 | ||
190 | switch (wt_type) { | |
191 | case REF_WORKTREE_CURRENT: | |
192 | strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname); | |
193 | break; | |
194 | case REF_WORKTREE_SHARED: | |
195 | case REF_WORKTREE_MAIN: | |
196 | strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname); | |
197 | break; | |
198 | case REF_WORKTREE_OTHER: | |
199 | strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir, | |
200 | wtname_len, wtname, bare_refname); | |
201 | break; | |
202 | default: | |
203 | BUG("unknown ref type %d of ref %s", wt_type, refname); | |
204 | } | |
205 | } | |
206 | ||
207 | static void files_ref_path(struct files_ref_store *refs, | |
208 | struct strbuf *sb, | |
209 | const char *refname) | |
210 | { | |
211 | const char *bare_refname; | |
212 | const char *wtname; | |
213 | int wtname_len; | |
214 | enum ref_worktree_type wt_type = parse_worktree_ref( | |
215 | refname, &wtname, &wtname_len, &bare_refname); | |
216 | switch (wt_type) { | |
217 | case REF_WORKTREE_CURRENT: | |
218 | strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname); | |
219 | break; | |
220 | case REF_WORKTREE_OTHER: | |
221 | strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir, | |
222 | wtname_len, wtname, bare_refname); | |
223 | break; | |
224 | case REF_WORKTREE_SHARED: | |
225 | case REF_WORKTREE_MAIN: | |
226 | strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname); | |
227 | break; | |
228 | default: | |
229 | BUG("unknown ref type %d of ref %s", wt_type, refname); | |
230 | } | |
231 | } | |
232 | ||
233 | /* | |
234 | * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being | |
235 | * per-worktree, might not appear in the directory listing for | |
236 | * refs/ in the main repo. | |
237 | */ | |
238 | static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname) | |
239 | { | |
240 | const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" }; | |
241 | int ip; | |
242 | ||
243 | if (strcmp(dirname, "refs/")) | |
244 | return; | |
245 | ||
246 | for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) { | |
247 | const char *prefix = prefixes[ip]; | |
248 | int prefix_len = strlen(prefix); | |
249 | struct ref_entry *child_entry; | |
250 | int pos; | |
251 | ||
252 | pos = search_ref_dir(dir, prefix, prefix_len); | |
253 | if (pos >= 0) | |
254 | continue; | |
255 | child_entry = create_dir_entry(dir->cache, prefix, prefix_len); | |
256 | add_entry_to_dir(dir, child_entry); | |
257 | } | |
258 | } | |
259 | ||
260 | static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs, | |
261 | const char *refname, | |
262 | struct ref_dir *dir) | |
263 | { | |
264 | struct object_id oid; | |
265 | int flag; | |
266 | const char *referent = refs_resolve_ref_unsafe(&refs->base, | |
267 | refname, | |
268 | RESOLVE_REF_READING, | |
269 | &oid, &flag); | |
270 | ||
271 | if (!referent) { | |
272 | oidclr(&oid, refs->base.repo->hash_algo); | |
273 | flag |= REF_ISBROKEN; | |
274 | } else if (is_null_oid(&oid)) { | |
275 | /* | |
276 | * It is so astronomically unlikely | |
277 | * that null_oid is the OID of an | |
278 | * actual object that we consider its | |
279 | * appearance in a loose reference | |
280 | * file to be repo corruption | |
281 | * (probably due to a software bug). | |
282 | */ | |
283 | flag |= REF_ISBROKEN; | |
284 | } | |
285 | ||
286 | if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { | |
287 | if (!refname_is_safe(refname)) | |
288 | die("loose refname is dangerous: %s", refname); | |
289 | oidclr(&oid, refs->base.repo->hash_algo); | |
290 | flag |= REF_BAD_NAME | REF_ISBROKEN; | |
291 | } | |
292 | ||
293 | if (!(flag & REF_ISSYMREF)) | |
294 | referent = NULL; | |
295 | ||
296 | add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag)); | |
297 | } | |
298 | ||
299 | /* | |
300 | * Read the loose references from the namespace dirname into dir | |
301 | * (without recursing). dirname must end with '/'. dir must be the | |
302 | * directory entry corresponding to dirname. | |
303 | */ | |
304 | static void loose_fill_ref_dir(struct ref_store *ref_store, | |
305 | struct ref_dir *dir, const char *dirname) | |
306 | { | |
307 | struct files_ref_store *refs = | |
308 | files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir"); | |
309 | DIR *d; | |
310 | struct dirent *de; | |
311 | int dirnamelen = strlen(dirname); | |
312 | struct strbuf refname; | |
313 | struct strbuf path = STRBUF_INIT; | |
314 | ||
315 | files_ref_path(refs, &path, dirname); | |
316 | ||
317 | d = opendir(path.buf); | |
318 | if (!d) { | |
319 | strbuf_release(&path); | |
320 | return; | |
321 | } | |
322 | ||
323 | strbuf_init(&refname, dirnamelen + 257); | |
324 | strbuf_add(&refname, dirname, dirnamelen); | |
325 | ||
326 | while ((de = readdir(d)) != NULL) { | |
327 | unsigned char dtype; | |
328 | ||
329 | if (de->d_name[0] == '.') | |
330 | continue; | |
331 | if (ends_with(de->d_name, ".lock")) | |
332 | continue; | |
333 | strbuf_addstr(&refname, de->d_name); | |
334 | ||
335 | dtype = get_dtype(de, &path, 1); | |
336 | if (dtype == DT_DIR) { | |
337 | strbuf_addch(&refname, '/'); | |
338 | add_entry_to_dir(dir, | |
339 | create_dir_entry(dir->cache, refname.buf, | |
340 | refname.len)); | |
341 | } else if (dtype == DT_REG) { | |
342 | loose_fill_ref_dir_regular_file(refs, refname.buf, dir); | |
343 | } | |
344 | strbuf_setlen(&refname, dirnamelen); | |
345 | } | |
346 | strbuf_release(&refname); | |
347 | strbuf_release(&path); | |
348 | closedir(d); | |
349 | ||
350 | add_per_worktree_entries_to_dir(dir, dirname); | |
351 | } | |
352 | ||
353 | static int for_each_root_ref(struct files_ref_store *refs, | |
354 | int (*cb)(const char *refname, void *cb_data), | |
355 | void *cb_data) | |
356 | { | |
357 | struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT; | |
358 | const char *dirname = refs->loose->root->name; | |
359 | struct dirent *de; | |
360 | size_t dirnamelen; | |
361 | int ret; | |
362 | DIR *d; | |
363 | ||
364 | files_ref_path(refs, &path, dirname); | |
365 | ||
366 | d = opendir(path.buf); | |
367 | if (!d) { | |
368 | strbuf_release(&path); | |
369 | return -1; | |
370 | } | |
371 | ||
372 | strbuf_addstr(&refname, dirname); | |
373 | dirnamelen = refname.len; | |
374 | ||
375 | while ((de = readdir(d)) != NULL) { | |
376 | unsigned char dtype; | |
377 | ||
378 | if (de->d_name[0] == '.') | |
379 | continue; | |
380 | if (ends_with(de->d_name, ".lock")) | |
381 | continue; | |
382 | strbuf_addstr(&refname, de->d_name); | |
383 | ||
384 | dtype = get_dtype(de, &path, 1); | |
385 | if (dtype == DT_REG && is_root_ref(de->d_name)) { | |
386 | ret = cb(refname.buf, cb_data); | |
387 | if (ret) | |
388 | goto done; | |
389 | } | |
390 | ||
391 | strbuf_setlen(&refname, dirnamelen); | |
392 | } | |
393 | ||
394 | ret = 0; | |
395 | ||
396 | done: | |
397 | strbuf_release(&refname); | |
398 | strbuf_release(&path); | |
399 | closedir(d); | |
400 | return ret; | |
401 | } | |
402 | ||
403 | struct fill_root_ref_data { | |
404 | struct files_ref_store *refs; | |
405 | struct ref_dir *dir; | |
406 | }; | |
407 | ||
408 | static int fill_root_ref(const char *refname, void *cb_data) | |
409 | { | |
410 | struct fill_root_ref_data *data = cb_data; | |
411 | loose_fill_ref_dir_regular_file(data->refs, refname, data->dir); | |
412 | return 0; | |
413 | } | |
414 | ||
415 | /* | |
416 | * Add root refs to the ref dir by parsing the directory for any files which | |
417 | * follow the root ref syntax. | |
418 | */ | |
419 | static void add_root_refs(struct files_ref_store *refs, | |
420 | struct ref_dir *dir) | |
421 | { | |
422 | struct fill_root_ref_data data = { | |
423 | .refs = refs, | |
424 | .dir = dir, | |
425 | }; | |
426 | ||
427 | for_each_root_ref(refs, fill_root_ref, &data); | |
428 | } | |
429 | ||
430 | static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs, | |
431 | unsigned int flags) | |
432 | { | |
433 | if (!refs->loose) { | |
434 | struct ref_dir *dir; | |
435 | ||
436 | /* | |
437 | * Mark the top-level directory complete because we | |
438 | * are about to read the only subdirectory that can | |
439 | * hold references: | |
440 | */ | |
441 | refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir); | |
442 | ||
443 | /* We're going to fill the top level ourselves: */ | |
444 | refs->loose->root->flag &= ~REF_INCOMPLETE; | |
445 | ||
446 | dir = get_ref_dir(refs->loose->root); | |
447 | ||
448 | if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS) | |
449 | add_root_refs(refs, dir); | |
450 | ||
451 | /* | |
452 | * Add an incomplete entry for "refs/" (to be filled | |
453 | * lazily): | |
454 | */ | |
455 | add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5)); | |
456 | } | |
457 | return refs->loose; | |
458 | } | |
459 | ||
460 | static int read_ref_internal(struct ref_store *ref_store, const char *refname, | |
461 | struct object_id *oid, struct strbuf *referent, | |
462 | unsigned int *type, int *failure_errno, int skip_packed_refs) | |
463 | { | |
464 | struct files_ref_store *refs = | |
465 | files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); | |
466 | struct strbuf sb_contents = STRBUF_INIT; | |
467 | struct strbuf sb_path = STRBUF_INIT; | |
468 | const char *path; | |
469 | const char *buf; | |
470 | struct stat st; | |
471 | int fd; | |
472 | int ret = -1; | |
473 | int remaining_retries = 3; | |
474 | int myerr = 0; | |
475 | ||
476 | *type = 0; | |
477 | strbuf_reset(&sb_path); | |
478 | ||
479 | files_ref_path(refs, &sb_path, refname); | |
480 | ||
481 | path = sb_path.buf; | |
482 | ||
483 | stat_ref: | |
484 | /* | |
485 | * We might have to loop back here to avoid a race | |
486 | * condition: first we lstat() the file, then we try | |
487 | * to read it as a link or as a file. But if somebody | |
488 | * changes the type of the file (file <-> directory | |
489 | * <-> symlink) between the lstat() and reading, then | |
490 | * we don't want to report that as an error but rather | |
491 | * try again starting with the lstat(). | |
492 | * | |
493 | * We'll keep a count of the retries, though, just to avoid | |
494 | * any confusing situation sending us into an infinite loop. | |
495 | */ | |
496 | ||
497 | if (remaining_retries-- <= 0) | |
498 | goto out; | |
499 | ||
500 | if (lstat(path, &st) < 0) { | |
501 | int ignore_errno; | |
502 | myerr = errno; | |
503 | if (myerr != ENOENT || skip_packed_refs) | |
504 | goto out; | |
505 | if (refs_read_raw_ref(refs->packed_ref_store, refname, oid, | |
506 | referent, type, &ignore_errno)) { | |
507 | myerr = ENOENT; | |
508 | goto out; | |
509 | } | |
510 | ret = 0; | |
511 | goto out; | |
512 | } | |
513 | ||
514 | /* Follow "normalized" - ie "refs/.." symlinks by hand */ | |
515 | if (S_ISLNK(st.st_mode)) { | |
516 | strbuf_reset(&sb_contents); | |
517 | if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) { | |
518 | myerr = errno; | |
519 | if (myerr == ENOENT || myerr == EINVAL) | |
520 | /* inconsistent with lstat; retry */ | |
521 | goto stat_ref; | |
522 | else | |
523 | goto out; | |
524 | } | |
525 | if (starts_with(sb_contents.buf, "refs/") && | |
526 | !check_refname_format(sb_contents.buf, 0)) { | |
527 | strbuf_swap(&sb_contents, referent); | |
528 | *type |= REF_ISSYMREF; | |
529 | ret = 0; | |
530 | goto out; | |
531 | } | |
532 | /* | |
533 | * It doesn't look like a refname; fall through to just | |
534 | * treating it like a non-symlink, and reading whatever it | |
535 | * points to. | |
536 | */ | |
537 | } | |
538 | ||
539 | /* Is it a directory? */ | |
540 | if (S_ISDIR(st.st_mode)) { | |
541 | int ignore_errno; | |
542 | /* | |
543 | * Even though there is a directory where the loose | |
544 | * ref is supposed to be, there could still be a | |
545 | * packed ref: | |
546 | */ | |
547 | if (skip_packed_refs || | |
548 | refs_read_raw_ref(refs->packed_ref_store, refname, oid, | |
549 | referent, type, &ignore_errno)) { | |
550 | myerr = EISDIR; | |
551 | goto out; | |
552 | } | |
553 | ret = 0; | |
554 | goto out; | |
555 | } | |
556 | ||
557 | /* | |
558 | * Anything else, just open it and try to use it as | |
559 | * a ref | |
560 | */ | |
561 | fd = open(path, O_RDONLY); | |
562 | if (fd < 0) { | |
563 | myerr = errno; | |
564 | if (myerr == ENOENT && !S_ISLNK(st.st_mode)) | |
565 | /* inconsistent with lstat; retry */ | |
566 | goto stat_ref; | |
567 | else | |
568 | goto out; | |
569 | } | |
570 | strbuf_reset(&sb_contents); | |
571 | if (strbuf_read(&sb_contents, fd, 256) < 0) { | |
572 | myerr = errno; | |
573 | close(fd); | |
574 | goto out; | |
575 | } | |
576 | close(fd); | |
577 | strbuf_rtrim(&sb_contents); | |
578 | buf = sb_contents.buf; | |
579 | ||
580 | ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf, | |
581 | oid, referent, type, NULL, &myerr); | |
582 | ||
583 | out: | |
584 | if (ret && !myerr) | |
585 | BUG("returning non-zero %d, should have set myerr!", ret); | |
586 | *failure_errno = myerr; | |
587 | ||
588 | strbuf_release(&sb_path); | |
589 | strbuf_release(&sb_contents); | |
590 | errno = 0; | |
591 | return ret; | |
592 | } | |
593 | ||
594 | static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, | |
595 | struct object_id *oid, struct strbuf *referent, | |
596 | unsigned int *type, int *failure_errno) | |
597 | { | |
598 | return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0); | |
599 | } | |
600 | ||
601 | static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname, | |
602 | struct strbuf *referent) | |
603 | { | |
604 | struct object_id oid; | |
605 | int failure_errno, ret; | |
606 | unsigned int type; | |
607 | ||
608 | ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1); | |
609 | if (!ret && !(type & REF_ISSYMREF)) | |
610 | return NOT_A_SYMREF; | |
611 | return ret; | |
612 | } | |
613 | ||
614 | int parse_loose_ref_contents(const struct git_hash_algo *algop, | |
615 | const char *buf, struct object_id *oid, | |
616 | struct strbuf *referent, unsigned int *type, | |
617 | const char **trailing, int *failure_errno) | |
618 | { | |
619 | const char *p; | |
620 | if (skip_prefix(buf, "ref:", &buf)) { | |
621 | while (isspace(*buf)) | |
622 | buf++; | |
623 | ||
624 | strbuf_reset(referent); | |
625 | strbuf_addstr(referent, buf); | |
626 | *type |= REF_ISSYMREF; | |
627 | return 0; | |
628 | } | |
629 | ||
630 | /* | |
631 | * FETCH_HEAD has additional data after the sha. | |
632 | */ | |
633 | if (parse_oid_hex_algop(buf, oid, &p, algop) || | |
634 | (*p != '\0' && !isspace(*p))) { | |
635 | *type |= REF_ISBROKEN; | |
636 | *failure_errno = EINVAL; | |
637 | return -1; | |
638 | } | |
639 | ||
640 | if (trailing) | |
641 | *trailing = p; | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
646 | static void unlock_ref(struct ref_lock *lock) | |
647 | { | |
648 | lock->count--; | |
649 | if (!lock->count) { | |
650 | rollback_lock_file(&lock->lk); | |
651 | free(lock->ref_name); | |
652 | free(lock); | |
653 | } | |
654 | } | |
655 | ||
656 | /* | |
657 | * Lock refname, without following symrefs, and set *lock_p to point | |
658 | * at a newly-allocated lock object. Fill in lock->old_oid, referent, | |
659 | * and type similarly to read_raw_ref(). | |
660 | * | |
661 | * The caller must verify that refname is a "safe" reference name (in | |
662 | * the sense of refname_is_safe()) before calling this function. | |
663 | * | |
664 | * If the reference doesn't already exist, verify that refname doesn't | |
665 | * have a D/F conflict with any existing references. extras and skip | |
666 | * are passed to refs_verify_refname_available() for this check. | |
667 | * | |
668 | * If mustexist is not set and the reference is not found or is | |
669 | * broken, lock the reference anyway but clear old_oid. | |
670 | * | |
671 | * Return 0 on success. On failure, write an error message to err and | |
672 | * return REF_TRANSACTION_ERROR_NAME_CONFLICT or REF_TRANSACTION_ERROR_GENERIC. | |
673 | * | |
674 | * Implementation note: This function is basically | |
675 | * | |
676 | * lock reference | |
677 | * read_raw_ref() | |
678 | * | |
679 | * but it includes a lot more code to | |
680 | * - Deal with possible races with other processes | |
681 | * - Avoid calling refs_verify_refname_available() when it can be | |
682 | * avoided, namely if we were successfully able to read the ref | |
683 | * - Generate informative error messages in the case of failure | |
684 | */ | |
685 | static enum ref_transaction_error lock_raw_ref(struct files_ref_store *refs, | |
686 | struct ref_update *update, | |
687 | size_t update_idx, | |
688 | int mustexist, | |
689 | struct string_list *refnames_to_check, | |
690 | const struct string_list *extras, | |
691 | struct ref_lock **lock_p, | |
692 | struct strbuf *referent, | |
693 | struct strbuf *err) | |
694 | { | |
695 | enum ref_transaction_error ret = REF_TRANSACTION_ERROR_GENERIC; | |
696 | const char *refname = update->refname; | |
697 | unsigned int *type = &update->type; | |
698 | struct ref_lock *lock; | |
699 | struct strbuf ref_file = STRBUF_INIT; | |
700 | int attempts_remaining = 3; | |
701 | int failure_errno; | |
702 | ||
703 | assert(err); | |
704 | files_assert_main_repository(refs, "lock_raw_ref"); | |
705 | ||
706 | *type = 0; | |
707 | ||
708 | /* First lock the file so it can't change out from under us. */ | |
709 | ||
710 | *lock_p = CALLOC_ARRAY(lock, 1); | |
711 | ||
712 | lock->ref_name = xstrdup(refname); | |
713 | lock->count = 1; | |
714 | files_ref_path(refs, &ref_file, refname); | |
715 | ||
716 | retry: | |
717 | switch (safe_create_leading_directories(the_repository, ref_file.buf)) { | |
718 | case SCLD_OK: | |
719 | break; /* success */ | |
720 | case SCLD_EXISTS: | |
721 | /* | |
722 | * Suppose refname is "refs/foo/bar". We just failed | |
723 | * to create the containing directory, "refs/foo", | |
724 | * because there was a non-directory in the way. This | |
725 | * indicates a D/F conflict, probably because of | |
726 | * another reference such as "refs/foo". There is no | |
727 | * reason to expect this error to be transitory. | |
728 | */ | |
729 | if (refs_verify_refname_available(&refs->base, refname, | |
730 | extras, NULL, 0, err)) { | |
731 | if (mustexist) { | |
732 | /* | |
733 | * To the user the relevant error is | |
734 | * that the "mustexist" reference is | |
735 | * missing: | |
736 | */ | |
737 | strbuf_reset(err); | |
738 | strbuf_addf(err, "unable to resolve reference '%s'", | |
739 | refname); | |
740 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; | |
741 | } else { | |
742 | /* | |
743 | * The error message set by | |
744 | * refs_verify_refname_available() is | |
745 | * OK. | |
746 | */ | |
747 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
748 | } | |
749 | } else { | |
750 | /* | |
751 | * The file that is in the way isn't a loose | |
752 | * reference. Report it as a low-level | |
753 | * failure. | |
754 | */ | |
755 | strbuf_addf(err, "unable to create lock file %s.lock; " | |
756 | "non-directory in the way", | |
757 | ref_file.buf); | |
758 | } | |
759 | goto error_return; | |
760 | case SCLD_VANISHED: | |
761 | /* Maybe another process was tidying up. Try again. */ | |
762 | if (--attempts_remaining > 0) | |
763 | goto retry; | |
764 | /* fall through */ | |
765 | default: | |
766 | strbuf_addf(err, "unable to create directory for %s", | |
767 | ref_file.buf); | |
768 | goto error_return; | |
769 | } | |
770 | ||
771 | if (hold_lock_file_for_update_timeout( | |
772 | &lock->lk, ref_file.buf, LOCK_NO_DEREF, | |
773 | get_files_ref_lock_timeout_ms()) < 0) { | |
774 | int myerr = errno; | |
775 | errno = 0; | |
776 | if (myerr == ENOENT && --attempts_remaining > 0) { | |
777 | /* | |
778 | * Maybe somebody just deleted one of the | |
779 | * directories leading to ref_file. Try | |
780 | * again: | |
781 | */ | |
782 | goto retry; | |
783 | } else { | |
784 | unable_to_lock_message(ref_file.buf, myerr, err); | |
785 | goto error_return; | |
786 | } | |
787 | } | |
788 | ||
789 | /* | |
790 | * Now we hold the lock and can read the reference without | |
791 | * fear that its value will change. | |
792 | */ | |
793 | ||
794 | if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent, | |
795 | type, &failure_errno)) { | |
796 | struct string_list_item *item; | |
797 | ||
798 | if (failure_errno == ENOENT) { | |
799 | if (mustexist) { | |
800 | /* Garden variety missing reference. */ | |
801 | strbuf_addf(err, "unable to resolve reference '%s'", | |
802 | refname); | |
803 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; | |
804 | goto error_return; | |
805 | } else { | |
806 | /* | |
807 | * Reference is missing, but that's OK. We | |
808 | * know that there is not a conflict with | |
809 | * another loose reference because | |
810 | * (supposing that we are trying to lock | |
811 | * reference "refs/foo/bar"): | |
812 | * | |
813 | * - We were successfully able to create | |
814 | * the lockfile refs/foo/bar.lock, so we | |
815 | * know there cannot be a loose reference | |
816 | * named "refs/foo". | |
817 | * | |
818 | * - We got ENOENT and not EISDIR, so we | |
819 | * know that there cannot be a loose | |
820 | * reference named "refs/foo/bar/baz". | |
821 | */ | |
822 | } | |
823 | } else if (failure_errno == EISDIR) { | |
824 | /* | |
825 | * There is a directory in the way. It might have | |
826 | * contained references that have been deleted. If | |
827 | * we don't require that the reference already | |
828 | * exists, try to remove the directory so that it | |
829 | * doesn't cause trouble when we want to rename the | |
830 | * lockfile into place later. | |
831 | */ | |
832 | if (mustexist) { | |
833 | /* Garden variety missing reference. */ | |
834 | strbuf_addf(err, "unable to resolve reference '%s'", | |
835 | refname); | |
836 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; | |
837 | goto error_return; | |
838 | } else if (remove_dir_recursively(&ref_file, | |
839 | REMOVE_DIR_EMPTY_ONLY)) { | |
840 | if (refs_verify_refname_available( | |
841 | &refs->base, refname, | |
842 | extras, NULL, 0, err)) { | |
843 | /* | |
844 | * The error message set by | |
845 | * verify_refname_available() is OK. | |
846 | */ | |
847 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
848 | goto error_return; | |
849 | } else { | |
850 | /* | |
851 | * We can't delete the directory, | |
852 | * but we also don't know of any | |
853 | * references that it should | |
854 | * contain. | |
855 | */ | |
856 | strbuf_addf(err, "there is a non-empty directory '%s' " | |
857 | "blocking reference '%s'", | |
858 | ref_file.buf, refname); | |
859 | goto error_return; | |
860 | } | |
861 | } | |
862 | } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) { | |
863 | strbuf_addf(err, "unable to resolve reference '%s': " | |
864 | "reference broken", refname); | |
865 | goto error_return; | |
866 | } else { | |
867 | strbuf_addf(err, "unable to resolve reference '%s': %s", | |
868 | refname, strerror(failure_errno)); | |
869 | goto error_return; | |
870 | } | |
871 | ||
872 | /* | |
873 | * If the ref did not exist and we are creating it, we have to | |
874 | * make sure there is no existing packed ref that conflicts | |
875 | * with refname. This check is deferred so that we can batch it. | |
876 | */ | |
877 | item = string_list_append(refnames_to_check, refname); | |
878 | item->util = xmalloc(sizeof(update_idx)); | |
879 | memcpy(item->util, &update_idx, sizeof(update_idx)); | |
880 | } | |
881 | ||
882 | ret = 0; | |
883 | goto out; | |
884 | ||
885 | error_return: | |
886 | unlock_ref(lock); | |
887 | *lock_p = NULL; | |
888 | ||
889 | out: | |
890 | strbuf_release(&ref_file); | |
891 | return ret; | |
892 | } | |
893 | ||
894 | struct files_ref_iterator { | |
895 | struct ref_iterator base; | |
896 | ||
897 | struct ref_iterator *iter0; | |
898 | struct repository *repo; | |
899 | unsigned int flags; | |
900 | }; | |
901 | ||
902 | static int files_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
903 | { | |
904 | struct files_ref_iterator *iter = | |
905 | (struct files_ref_iterator *)ref_iterator; | |
906 | int ok; | |
907 | ||
908 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { | |
909 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && | |
910 | parse_worktree_ref(iter->iter0->refname, NULL, NULL, | |
911 | NULL) != REF_WORKTREE_CURRENT) | |
912 | continue; | |
913 | ||
914 | if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) && | |
915 | (iter->iter0->flags & REF_ISSYMREF) && | |
916 | (iter->iter0->flags & REF_ISBROKEN)) | |
917 | continue; | |
918 | ||
919 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && | |
920 | !ref_resolves_to_object(iter->iter0->refname, | |
921 | iter->repo, | |
922 | iter->iter0->oid, | |
923 | iter->iter0->flags)) | |
924 | continue; | |
925 | ||
926 | iter->base.refname = iter->iter0->refname; | |
927 | iter->base.oid = iter->iter0->oid; | |
928 | iter->base.flags = iter->iter0->flags; | |
929 | iter->base.referent = iter->iter0->referent; | |
930 | ||
931 | return ITER_OK; | |
932 | } | |
933 | ||
934 | return ok; | |
935 | } | |
936 | ||
937 | static int files_ref_iterator_seek(struct ref_iterator *ref_iterator, | |
938 | const char *refname, unsigned int flags) | |
939 | { | |
940 | struct files_ref_iterator *iter = | |
941 | (struct files_ref_iterator *)ref_iterator; | |
942 | return ref_iterator_seek(iter->iter0, refname, flags); | |
943 | } | |
944 | ||
945 | static int files_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
946 | struct object_id *peeled) | |
947 | { | |
948 | struct files_ref_iterator *iter = | |
949 | (struct files_ref_iterator *)ref_iterator; | |
950 | ||
951 | return ref_iterator_peel(iter->iter0, peeled); | |
952 | } | |
953 | ||
954 | static void files_ref_iterator_release(struct ref_iterator *ref_iterator) | |
955 | { | |
956 | struct files_ref_iterator *iter = | |
957 | (struct files_ref_iterator *)ref_iterator; | |
958 | ref_iterator_free(iter->iter0); | |
959 | } | |
960 | ||
961 | static struct ref_iterator_vtable files_ref_iterator_vtable = { | |
962 | .advance = files_ref_iterator_advance, | |
963 | .seek = files_ref_iterator_seek, | |
964 | .peel = files_ref_iterator_peel, | |
965 | .release = files_ref_iterator_release, | |
966 | }; | |
967 | ||
968 | static struct ref_iterator *files_ref_iterator_begin( | |
969 | struct ref_store *ref_store, | |
970 | const char *prefix, const char **exclude_patterns, | |
971 | unsigned int flags) | |
972 | { | |
973 | struct files_ref_store *refs; | |
974 | struct ref_iterator *loose_iter, *packed_iter, *overlay_iter; | |
975 | struct files_ref_iterator *iter; | |
976 | struct ref_iterator *ref_iterator; | |
977 | unsigned int required_flags = REF_STORE_READ; | |
978 | ||
979 | if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) | |
980 | required_flags |= REF_STORE_ODB; | |
981 | ||
982 | refs = files_downcast(ref_store, required_flags, "ref_iterator_begin"); | |
983 | ||
984 | /* | |
985 | * We must make sure that all loose refs are read before | |
986 | * accessing the packed-refs file; this avoids a race | |
987 | * condition if loose refs are migrated to the packed-refs | |
988 | * file by a simultaneous process, but our in-memory view is | |
989 | * from before the migration. We ensure this as follows: | |
990 | * First, we call start the loose refs iteration with its | |
991 | * `prime_ref` argument set to true. This causes the loose | |
992 | * references in the subtree to be pre-read into the cache. | |
993 | * (If they've already been read, that's OK; we only need to | |
994 | * guarantee that they're read before the packed refs, not | |
995 | * *how much* before.) After that, we call | |
996 | * packed_ref_iterator_begin(), which internally checks | |
997 | * whether the packed-ref cache is up to date with what is on | |
998 | * disk, and re-reads it if not. | |
999 | */ | |
1000 | ||
1001 | loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags), | |
1002 | prefix, ref_store->repo, 1); | |
1003 | ||
1004 | /* | |
1005 | * The packed-refs file might contain broken references, for | |
1006 | * example an old version of a reference that points at an | |
1007 | * object that has since been garbage-collected. This is OK as | |
1008 | * long as there is a corresponding loose reference that | |
1009 | * overrides it, and we don't want to emit an error message in | |
1010 | * this case. So ask the packed_ref_store for all of its | |
1011 | * references, and (if needed) do our own check for broken | |
1012 | * ones in files_ref_iterator_advance(), after we have merged | |
1013 | * the packed and loose references. | |
1014 | */ | |
1015 | packed_iter = refs_ref_iterator_begin( | |
1016 | refs->packed_ref_store, prefix, exclude_patterns, 0, | |
1017 | DO_FOR_EACH_INCLUDE_BROKEN); | |
1018 | ||
1019 | overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter); | |
1020 | ||
1021 | CALLOC_ARRAY(iter, 1); | |
1022 | ref_iterator = &iter->base; | |
1023 | base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable); | |
1024 | iter->iter0 = overlay_iter; | |
1025 | iter->repo = ref_store->repo; | |
1026 | iter->flags = flags; | |
1027 | ||
1028 | return ref_iterator; | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * Callback function for raceproof_create_file(). This function is | |
1033 | * expected to do something that makes dirname(path) permanent despite | |
1034 | * the fact that other processes might be cleaning up empty | |
1035 | * directories at the same time. Usually it will create a file named | |
1036 | * path, but alternatively it could create another file in that | |
1037 | * directory, or even chdir() into that directory. The function should | |
1038 | * return 0 if the action was completed successfully. On error, it | |
1039 | * should return a nonzero result and set errno. | |
1040 | * raceproof_create_file() treats two errno values specially: | |
1041 | * | |
1042 | * - ENOENT -- dirname(path) does not exist. In this case, | |
1043 | * raceproof_create_file() tries creating dirname(path) | |
1044 | * (and any parent directories, if necessary) and calls | |
1045 | * the function again. | |
1046 | * | |
1047 | * - EISDIR -- the file already exists and is a directory. In this | |
1048 | * case, raceproof_create_file() removes the directory if | |
1049 | * it is empty (and recursively any empty directories that | |
1050 | * it contains) and calls the function again. | |
1051 | * | |
1052 | * Any other errno causes raceproof_create_file() to fail with the | |
1053 | * callback's return value and errno. | |
1054 | * | |
1055 | * Obviously, this function should be OK with being called again if it | |
1056 | * fails with ENOENT or EISDIR. In other scenarios it will not be | |
1057 | * called again. | |
1058 | */ | |
1059 | typedef int create_file_fn(const char *path, void *cb); | |
1060 | ||
1061 | /* | |
1062 | * Create a file in dirname(path) by calling fn, creating leading | |
1063 | * directories if necessary. Retry a few times in case we are racing | |
1064 | * with another process that is trying to clean up the directory that | |
1065 | * contains path. See the documentation for create_file_fn for more | |
1066 | * details. | |
1067 | * | |
1068 | * Return the value and set the errno that resulted from the most | |
1069 | * recent call of fn. fn is always called at least once, and will be | |
1070 | * called more than once if it returns ENOENT or EISDIR. | |
1071 | */ | |
1072 | static int raceproof_create_file(const char *path, create_file_fn fn, void *cb) | |
1073 | { | |
1074 | /* | |
1075 | * The number of times we will try to remove empty directories | |
1076 | * in the way of path. This is only 1 because if another | |
1077 | * process is racily creating directories that conflict with | |
1078 | * us, we don't want to fight against them. | |
1079 | */ | |
1080 | int remove_directories_remaining = 1; | |
1081 | ||
1082 | /* | |
1083 | * The number of times that we will try to create the | |
1084 | * directories containing path. We are willing to attempt this | |
1085 | * more than once, because another process could be trying to | |
1086 | * clean up empty directories at the same time as we are | |
1087 | * trying to create them. | |
1088 | */ | |
1089 | int create_directories_remaining = 3; | |
1090 | ||
1091 | /* A scratch copy of path, filled lazily if we need it: */ | |
1092 | struct strbuf path_copy = STRBUF_INIT; | |
1093 | ||
1094 | int ret, save_errno; | |
1095 | ||
1096 | /* Sanity check: */ | |
1097 | assert(*path); | |
1098 | ||
1099 | retry_fn: | |
1100 | ret = fn(path, cb); | |
1101 | save_errno = errno; | |
1102 | if (!ret) | |
1103 | goto out; | |
1104 | ||
1105 | if (errno == EISDIR && remove_directories_remaining-- > 0) { | |
1106 | /* | |
1107 | * A directory is in the way. Maybe it is empty; try | |
1108 | * to remove it: | |
1109 | */ | |
1110 | if (!path_copy.len) | |
1111 | strbuf_addstr(&path_copy, path); | |
1112 | ||
1113 | if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) | |
1114 | goto retry_fn; | |
1115 | } else if (errno == ENOENT && create_directories_remaining-- > 0) { | |
1116 | /* | |
1117 | * Maybe the containing directory didn't exist, or | |
1118 | * maybe it was just deleted by a process that is | |
1119 | * racing with us to clean up empty directories. Try | |
1120 | * to create it: | |
1121 | */ | |
1122 | enum scld_error scld_result; | |
1123 | ||
1124 | if (!path_copy.len) | |
1125 | strbuf_addstr(&path_copy, path); | |
1126 | ||
1127 | do { | |
1128 | scld_result = safe_create_leading_directories(the_repository, path_copy.buf); | |
1129 | if (scld_result == SCLD_OK) | |
1130 | goto retry_fn; | |
1131 | } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); | |
1132 | } | |
1133 | ||
1134 | out: | |
1135 | strbuf_release(&path_copy); | |
1136 | errno = save_errno; | |
1137 | return ret; | |
1138 | } | |
1139 | ||
1140 | static int remove_empty_directories(struct strbuf *path) | |
1141 | { | |
1142 | /* | |
1143 | * we want to create a file but there is a directory there; | |
1144 | * if that is an empty directory (or a directory that contains | |
1145 | * only empty directories), remove them. | |
1146 | */ | |
1147 | return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY); | |
1148 | } | |
1149 | ||
1150 | static int create_reflock(const char *path, void *cb) | |
1151 | { | |
1152 | struct lock_file *lk = cb; | |
1153 | ||
1154 | return hold_lock_file_for_update_timeout( | |
1155 | lk, path, LOCK_NO_DEREF, | |
1156 | get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0; | |
1157 | } | |
1158 | ||
1159 | /* | |
1160 | * Locks a ref returning the lock on success and NULL on failure. | |
1161 | */ | |
1162 | static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, | |
1163 | const char *refname, | |
1164 | struct strbuf *err) | |
1165 | { | |
1166 | struct strbuf ref_file = STRBUF_INIT; | |
1167 | struct ref_lock *lock; | |
1168 | ||
1169 | files_assert_main_repository(refs, "lock_ref_oid_basic"); | |
1170 | assert(err); | |
1171 | ||
1172 | CALLOC_ARRAY(lock, 1); | |
1173 | ||
1174 | files_ref_path(refs, &ref_file, refname); | |
1175 | ||
1176 | /* | |
1177 | * If the ref did not exist and we are creating it, make sure | |
1178 | * there is no existing packed ref whose name begins with our | |
1179 | * refname, nor a packed ref whose name is a proper prefix of | |
1180 | * our refname. | |
1181 | */ | |
1182 | if (is_null_oid(&lock->old_oid) && | |
1183 | refs_verify_refname_available(refs->packed_ref_store, refname, | |
1184 | NULL, NULL, 0, err)) | |
1185 | goto error_return; | |
1186 | ||
1187 | lock->ref_name = xstrdup(refname); | |
1188 | lock->count = 1; | |
1189 | ||
1190 | if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { | |
1191 | unable_to_lock_message(ref_file.buf, errno, err); | |
1192 | goto error_return; | |
1193 | } | |
1194 | ||
1195 | if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0, | |
1196 | &lock->old_oid, NULL)) | |
1197 | oidclr(&lock->old_oid, refs->base.repo->hash_algo); | |
1198 | goto out; | |
1199 | ||
1200 | error_return: | |
1201 | unlock_ref(lock); | |
1202 | lock = NULL; | |
1203 | ||
1204 | out: | |
1205 | strbuf_release(&ref_file); | |
1206 | return lock; | |
1207 | } | |
1208 | ||
1209 | struct ref_to_prune { | |
1210 | struct ref_to_prune *next; | |
1211 | struct object_id oid; | |
1212 | char name[FLEX_ARRAY]; | |
1213 | }; | |
1214 | ||
1215 | enum { | |
1216 | REMOVE_EMPTY_PARENTS_REF = 0x01, | |
1217 | REMOVE_EMPTY_PARENTS_REFLOG = 0x02 | |
1218 | }; | |
1219 | ||
1220 | /* | |
1221 | * Remove empty parent directories associated with the specified | |
1222 | * reference and/or its reflog, but spare [logs/]refs/ and immediate | |
1223 | * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or | |
1224 | * REMOVE_EMPTY_PARENTS_REFLOG. | |
1225 | */ | |
1226 | static void try_remove_empty_parents(struct files_ref_store *refs, | |
1227 | const char *refname, | |
1228 | unsigned int flags) | |
1229 | { | |
1230 | struct strbuf buf = STRBUF_INIT; | |
1231 | struct strbuf sb = STRBUF_INIT; | |
1232 | char *p, *q; | |
1233 | int i; | |
1234 | ||
1235 | strbuf_addstr(&buf, refname); | |
1236 | p = buf.buf; | |
1237 | for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */ | |
1238 | while (*p && *p != '/') | |
1239 | p++; | |
1240 | /* tolerate duplicate slashes; see check_refname_format() */ | |
1241 | while (*p == '/') | |
1242 | p++; | |
1243 | } | |
1244 | q = buf.buf + buf.len; | |
1245 | while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) { | |
1246 | while (q > p && *q != '/') | |
1247 | q--; | |
1248 | while (q > p && *(q-1) == '/') | |
1249 | q--; | |
1250 | if (q == p) | |
1251 | break; | |
1252 | strbuf_setlen(&buf, q - buf.buf); | |
1253 | ||
1254 | strbuf_reset(&sb); | |
1255 | files_ref_path(refs, &sb, buf.buf); | |
1256 | if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf)) | |
1257 | flags &= ~REMOVE_EMPTY_PARENTS_REF; | |
1258 | ||
1259 | strbuf_reset(&sb); | |
1260 | files_reflog_path(refs, &sb, buf.buf); | |
1261 | if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf)) | |
1262 | flags &= ~REMOVE_EMPTY_PARENTS_REFLOG; | |
1263 | } | |
1264 | strbuf_release(&buf); | |
1265 | strbuf_release(&sb); | |
1266 | } | |
1267 | ||
1268 | /* make sure nobody touched the ref, and unlink */ | |
1269 | static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r) | |
1270 | { | |
1271 | struct ref_transaction *transaction; | |
1272 | struct strbuf err = STRBUF_INIT; | |
1273 | int ret = -1; | |
1274 | ||
1275 | if (check_refname_format(r->name, 0)) | |
1276 | return; | |
1277 | ||
1278 | transaction = ref_store_transaction_begin(&refs->base, 0, &err); | |
1279 | if (!transaction) | |
1280 | goto cleanup; | |
1281 | ref_transaction_add_update( | |
1282 | transaction, r->name, | |
1283 | REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING, | |
1284 | null_oid(the_hash_algo), &r->oid, NULL, NULL, NULL, NULL); | |
1285 | if (ref_transaction_commit(transaction, &err)) | |
1286 | goto cleanup; | |
1287 | ||
1288 | ret = 0; | |
1289 | ||
1290 | cleanup: | |
1291 | if (ret) | |
1292 | error("%s", err.buf); | |
1293 | strbuf_release(&err); | |
1294 | ref_transaction_free(transaction); | |
1295 | return; | |
1296 | } | |
1297 | ||
1298 | /* | |
1299 | * Prune the loose versions of the references in the linked list | |
1300 | * `*refs_to_prune`, freeing the entries in the list as we go. | |
1301 | */ | |
1302 | static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune) | |
1303 | { | |
1304 | while (*refs_to_prune) { | |
1305 | struct ref_to_prune *r = *refs_to_prune; | |
1306 | *refs_to_prune = r->next; | |
1307 | prune_ref(refs, r); | |
1308 | free(r); | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | /* | |
1313 | * Return true if the specified reference should be packed. | |
1314 | */ | |
1315 | static int should_pack_ref(struct files_ref_store *refs, | |
1316 | const char *refname, | |
1317 | const struct object_id *oid, unsigned int ref_flags, | |
1318 | struct pack_refs_opts *opts) | |
1319 | { | |
1320 | struct string_list_item *item; | |
1321 | ||
1322 | /* Do not pack per-worktree refs: */ | |
1323 | if (parse_worktree_ref(refname, NULL, NULL, NULL) != | |
1324 | REF_WORKTREE_SHARED) | |
1325 | return 0; | |
1326 | ||
1327 | /* Do not pack symbolic refs: */ | |
1328 | if (ref_flags & REF_ISSYMREF) | |
1329 | return 0; | |
1330 | ||
1331 | /* Do not pack broken refs: */ | |
1332 | if (!ref_resolves_to_object(refname, refs->base.repo, oid, ref_flags)) | |
1333 | return 0; | |
1334 | ||
1335 | if (ref_excluded(opts->exclusions, refname)) | |
1336 | return 0; | |
1337 | ||
1338 | for_each_string_list_item(item, opts->includes) | |
1339 | if (!wildmatch(item->string, refname, 0)) | |
1340 | return 1; | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
1345 | static int should_pack_refs(struct files_ref_store *refs, | |
1346 | struct pack_refs_opts *opts) | |
1347 | { | |
1348 | struct ref_iterator *iter; | |
1349 | size_t packed_size; | |
1350 | size_t refcount = 0; | |
1351 | size_t limit; | |
1352 | int ret; | |
1353 | ||
1354 | if (!(opts->flags & PACK_REFS_AUTO)) | |
1355 | return 1; | |
1356 | ||
1357 | ret = packed_refs_size(refs->packed_ref_store, &packed_size); | |
1358 | if (ret < 0) | |
1359 | die("cannot determine packed-refs size"); | |
1360 | ||
1361 | /* | |
1362 | * Packing loose references into the packed-refs file scales with the | |
1363 | * number of references we're about to write. We thus decide whether we | |
1364 | * repack refs by weighing the current size of the packed-refs file | |
1365 | * against the number of loose references. This is done such that we do | |
1366 | * not repack too often on repositories with a huge number of | |
1367 | * references, where we can expect a lot of churn in the number of | |
1368 | * references. | |
1369 | * | |
1370 | * As a heuristic, we repack if the number of loose references in the | |
1371 | * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate | |
1372 | * `nr_packed_refs = packed_size / 100`, which scales as following: | |
1373 | * | |
1374 | * - 1kB ~ 10 packed refs: 16 refs | |
1375 | * - 10kB ~ 100 packed refs: 33 refs | |
1376 | * - 100kB ~ 1k packed refs: 49 refs | |
1377 | * - 1MB ~ 10k packed refs: 66 refs | |
1378 | * - 10MB ~ 100k packed refs: 82 refs | |
1379 | * - 100MB ~ 1m packed refs: 99 refs | |
1380 | * | |
1381 | * We thus allow roughly 16 additional loose refs per factor of ten of | |
1382 | * packed refs. This heuristic may be tweaked in the future, but should | |
1383 | * serve as a sufficiently good first iteration. | |
1384 | */ | |
1385 | limit = log2u(packed_size / 100) * 5; | |
1386 | if (limit < 16) | |
1387 | limit = 16; | |
1388 | ||
1389 | iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, | |
1390 | refs->base.repo, 0); | |
1391 | while ((ret = ref_iterator_advance(iter)) == ITER_OK) { | |
1392 | if (should_pack_ref(refs, iter->refname, iter->oid, | |
1393 | iter->flags, opts)) | |
1394 | refcount++; | |
1395 | if (refcount >= limit) { | |
1396 | ref_iterator_free(iter); | |
1397 | return 1; | |
1398 | } | |
1399 | } | |
1400 | ||
1401 | if (ret != ITER_DONE) | |
1402 | die("error while iterating over references"); | |
1403 | ||
1404 | ref_iterator_free(iter); | |
1405 | return 0; | |
1406 | } | |
1407 | ||
1408 | static int files_pack_refs(struct ref_store *ref_store, | |
1409 | struct pack_refs_opts *opts) | |
1410 | { | |
1411 | struct files_ref_store *refs = | |
1412 | files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, | |
1413 | "pack_refs"); | |
1414 | struct ref_iterator *iter; | |
1415 | int ok; | |
1416 | struct ref_to_prune *refs_to_prune = NULL; | |
1417 | struct strbuf err = STRBUF_INIT; | |
1418 | struct ref_transaction *transaction; | |
1419 | ||
1420 | if (!should_pack_refs(refs, opts)) | |
1421 | return 0; | |
1422 | ||
1423 | transaction = ref_store_transaction_begin(refs->packed_ref_store, | |
1424 | 0, &err); | |
1425 | if (!transaction) | |
1426 | return -1; | |
1427 | ||
1428 | packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err); | |
1429 | ||
1430 | iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, | |
1431 | refs->base.repo, 0); | |
1432 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { | |
1433 | /* | |
1434 | * If the loose reference can be packed, add an entry | |
1435 | * in the packed ref cache. If the reference should be | |
1436 | * pruned, also add it to refs_to_prune. | |
1437 | */ | |
1438 | if (!should_pack_ref(refs, iter->refname, iter->oid, iter->flags, opts)) | |
1439 | continue; | |
1440 | ||
1441 | /* | |
1442 | * Add a reference creation for this reference to the | |
1443 | * packed-refs transaction: | |
1444 | */ | |
1445 | if (ref_transaction_update(transaction, iter->refname, | |
1446 | iter->oid, NULL, NULL, NULL, | |
1447 | REF_NO_DEREF, NULL, &err)) | |
1448 | die("failure preparing to create packed reference %s: %s", | |
1449 | iter->refname, err.buf); | |
1450 | ||
1451 | /* Schedule the loose reference for pruning if requested. */ | |
1452 | if ((opts->flags & PACK_REFS_PRUNE)) { | |
1453 | struct ref_to_prune *n; | |
1454 | FLEX_ALLOC_STR(n, name, iter->refname); | |
1455 | oidcpy(&n->oid, iter->oid); | |
1456 | n->next = refs_to_prune; | |
1457 | refs_to_prune = n; | |
1458 | } | |
1459 | } | |
1460 | if (ok != ITER_DONE) | |
1461 | die("error while iterating over references"); | |
1462 | ||
1463 | if (ref_transaction_commit(transaction, &err)) | |
1464 | die("unable to write new packed-refs: %s", err.buf); | |
1465 | ||
1466 | ref_transaction_free(transaction); | |
1467 | ||
1468 | packed_refs_unlock(refs->packed_ref_store); | |
1469 | ||
1470 | prune_refs(refs, &refs_to_prune); | |
1471 | ref_iterator_free(iter); | |
1472 | strbuf_release(&err); | |
1473 | return 0; | |
1474 | } | |
1475 | ||
1476 | /* | |
1477 | * People using contrib's git-new-workdir have .git/logs/refs -> | |
1478 | * /some/other/path/.git/logs/refs, and that may live on another device. | |
1479 | * | |
1480 | * IOW, to avoid cross device rename errors, the temporary renamed log must | |
1481 | * live into logs/refs. | |
1482 | */ | |
1483 | #define TMP_RENAMED_LOG "refs/.tmp-renamed-log" | |
1484 | ||
1485 | struct rename_cb { | |
1486 | const char *tmp_renamed_log; | |
1487 | int true_errno; | |
1488 | }; | |
1489 | ||
1490 | static int rename_tmp_log_callback(const char *path, void *cb_data) | |
1491 | { | |
1492 | struct rename_cb *cb = cb_data; | |
1493 | ||
1494 | if (rename(cb->tmp_renamed_log, path)) { | |
1495 | /* | |
1496 | * rename(a, b) when b is an existing directory ought | |
1497 | * to result in ISDIR, but Solaris 5.8 gives ENOTDIR. | |
1498 | * Sheesh. Record the true errno for error reporting, | |
1499 | * but report EISDIR to raceproof_create_file() so | |
1500 | * that it knows to retry. | |
1501 | */ | |
1502 | cb->true_errno = errno; | |
1503 | if (errno == ENOTDIR) | |
1504 | errno = EISDIR; | |
1505 | return -1; | |
1506 | } else { | |
1507 | return 0; | |
1508 | } | |
1509 | } | |
1510 | ||
1511 | static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname) | |
1512 | { | |
1513 | struct strbuf path = STRBUF_INIT; | |
1514 | struct strbuf tmp = STRBUF_INIT; | |
1515 | struct rename_cb cb; | |
1516 | int ret; | |
1517 | ||
1518 | files_reflog_path(refs, &path, newrefname); | |
1519 | files_reflog_path(refs, &tmp, TMP_RENAMED_LOG); | |
1520 | cb.tmp_renamed_log = tmp.buf; | |
1521 | ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb); | |
1522 | if (ret) { | |
1523 | if (errno == EISDIR) | |
1524 | error("directory not empty: %s", path.buf); | |
1525 | else | |
1526 | error("unable to move logfile %s to %s: %s", | |
1527 | tmp.buf, path.buf, | |
1528 | strerror(cb.true_errno)); | |
1529 | } | |
1530 | ||
1531 | strbuf_release(&path); | |
1532 | strbuf_release(&tmp); | |
1533 | return ret; | |
1534 | } | |
1535 | ||
1536 | static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, | |
1537 | struct ref_lock *lock, | |
1538 | const struct object_id *oid, | |
1539 | int skip_oid_verification, | |
1540 | struct strbuf *err); | |
1541 | static int commit_ref_update(struct files_ref_store *refs, | |
1542 | struct ref_lock *lock, | |
1543 | const struct object_id *oid, const char *logmsg, | |
1544 | int flags, | |
1545 | struct strbuf *err); | |
1546 | ||
1547 | /* | |
1548 | * Emit a better error message than lockfile.c's | |
1549 | * unable_to_lock_message() would in case there is a D/F conflict with | |
1550 | * another existing reference. If there would be a conflict, emit an error | |
1551 | * message and return false; otherwise, return true. | |
1552 | * | |
1553 | * Note that this function is not safe against all races with other | |
1554 | * processes, and that's not its job. We'll emit a more verbose error on D/f | |
1555 | * conflicts if we get past it into lock_ref_oid_basic(). | |
1556 | */ | |
1557 | static int refs_rename_ref_available(struct ref_store *refs, | |
1558 | const char *old_refname, | |
1559 | const char *new_refname) | |
1560 | { | |
1561 | struct string_list skip = STRING_LIST_INIT_NODUP; | |
1562 | struct strbuf err = STRBUF_INIT; | |
1563 | int ok; | |
1564 | ||
1565 | string_list_insert(&skip, old_refname); | |
1566 | ok = !refs_verify_refname_available(refs, new_refname, | |
1567 | NULL, &skip, 0, &err); | |
1568 | if (!ok) | |
1569 | error("%s", err.buf); | |
1570 | ||
1571 | string_list_clear(&skip, 0); | |
1572 | strbuf_release(&err); | |
1573 | return ok; | |
1574 | } | |
1575 | ||
1576 | static int files_copy_or_rename_ref(struct ref_store *ref_store, | |
1577 | const char *oldrefname, const char *newrefname, | |
1578 | const char *logmsg, int copy) | |
1579 | { | |
1580 | struct files_ref_store *refs = | |
1581 | files_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); | |
1582 | struct object_id orig_oid; | |
1583 | int flag = 0, logmoved = 0; | |
1584 | struct ref_lock *lock; | |
1585 | struct stat loginfo; | |
1586 | struct strbuf sb_oldref = STRBUF_INIT; | |
1587 | struct strbuf sb_newref = STRBUF_INIT; | |
1588 | struct strbuf tmp_renamed_log = STRBUF_INIT; | |
1589 | int log, ret; | |
1590 | struct strbuf err = STRBUF_INIT; | |
1591 | ||
1592 | files_reflog_path(refs, &sb_oldref, oldrefname); | |
1593 | files_reflog_path(refs, &sb_newref, newrefname); | |
1594 | files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG); | |
1595 | ||
1596 | log = !lstat(sb_oldref.buf, &loginfo); | |
1597 | if (log && S_ISLNK(loginfo.st_mode)) { | |
1598 | ret = error("reflog for %s is a symlink", oldrefname); | |
1599 | goto out; | |
1600 | } | |
1601 | ||
1602 | if (!refs_resolve_ref_unsafe(&refs->base, oldrefname, | |
1603 | RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, | |
1604 | &orig_oid, &flag)) { | |
1605 | ret = error("refname %s not found", oldrefname); | |
1606 | goto out; | |
1607 | } | |
1608 | ||
1609 | if (flag & REF_ISSYMREF) { | |
1610 | if (copy) | |
1611 | ret = error("refname %s is a symbolic ref, copying it is not supported", | |
1612 | oldrefname); | |
1613 | else | |
1614 | ret = error("refname %s is a symbolic ref, renaming it is not supported", | |
1615 | oldrefname); | |
1616 | goto out; | |
1617 | } | |
1618 | if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) { | |
1619 | ret = 1; | |
1620 | goto out; | |
1621 | } | |
1622 | ||
1623 | if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) { | |
1624 | ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", | |
1625 | oldrefname, strerror(errno)); | |
1626 | goto out; | |
1627 | } | |
1628 | ||
1629 | if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) { | |
1630 | ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", | |
1631 | oldrefname, strerror(errno)); | |
1632 | goto out; | |
1633 | } | |
1634 | ||
1635 | if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname, | |
1636 | &orig_oid, REF_NO_DEREF)) { | |
1637 | error("unable to delete old %s", oldrefname); | |
1638 | goto rollback; | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * Since we are doing a shallow lookup, oid is not the | |
1643 | * correct value to pass to delete_ref as old_oid. But that | |
1644 | * doesn't matter, because an old_oid check wouldn't add to | |
1645 | * the safety anyway; we want to delete the reference whatever | |
1646 | * its current value. | |
1647 | */ | |
1648 | if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname, | |
1649 | RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, | |
1650 | NULL, NULL) && | |
1651 | refs_delete_ref(&refs->base, NULL, newrefname, | |
1652 | NULL, REF_NO_DEREF)) { | |
1653 | if (errno == EISDIR) { | |
1654 | struct strbuf path = STRBUF_INIT; | |
1655 | int result; | |
1656 | ||
1657 | files_ref_path(refs, &path, newrefname); | |
1658 | result = remove_empty_directories(&path); | |
1659 | strbuf_release(&path); | |
1660 | ||
1661 | if (result) { | |
1662 | error("Directory not empty: %s", newrefname); | |
1663 | goto rollback; | |
1664 | } | |
1665 | } else { | |
1666 | error("unable to delete existing %s", newrefname); | |
1667 | goto rollback; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | if (log && rename_tmp_log(refs, newrefname)) | |
1672 | goto rollback; | |
1673 | ||
1674 | logmoved = log; | |
1675 | ||
1676 | lock = lock_ref_oid_basic(refs, newrefname, &err); | |
1677 | if (!lock) { | |
1678 | if (copy) | |
1679 | error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); | |
1680 | else | |
1681 | error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf); | |
1682 | strbuf_release(&err); | |
1683 | goto rollback; | |
1684 | } | |
1685 | oidcpy(&lock->old_oid, &orig_oid); | |
1686 | ||
1687 | if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || | |
1688 | commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) { | |
1689 | error("unable to write current sha1 into %s: %s", newrefname, err.buf); | |
1690 | strbuf_release(&err); | |
1691 | goto rollback; | |
1692 | } | |
1693 | ||
1694 | ret = 0; | |
1695 | goto out; | |
1696 | ||
1697 | rollback: | |
1698 | lock = lock_ref_oid_basic(refs, oldrefname, &err); | |
1699 | if (!lock) { | |
1700 | error("unable to lock %s for rollback: %s", oldrefname, err.buf); | |
1701 | strbuf_release(&err); | |
1702 | goto rollbacklog; | |
1703 | } | |
1704 | ||
1705 | if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || | |
1706 | commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) { | |
1707 | error("unable to write current sha1 into %s: %s", oldrefname, err.buf); | |
1708 | strbuf_release(&err); | |
1709 | } | |
1710 | ||
1711 | rollbacklog: | |
1712 | if (logmoved && rename(sb_newref.buf, sb_oldref.buf)) | |
1713 | error("unable to restore logfile %s from %s: %s", | |
1714 | oldrefname, newrefname, strerror(errno)); | |
1715 | if (!logmoved && log && | |
1716 | rename(tmp_renamed_log.buf, sb_oldref.buf)) | |
1717 | error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s", | |
1718 | oldrefname, strerror(errno)); | |
1719 | ret = 1; | |
1720 | out: | |
1721 | strbuf_release(&sb_newref); | |
1722 | strbuf_release(&sb_oldref); | |
1723 | strbuf_release(&tmp_renamed_log); | |
1724 | ||
1725 | return ret; | |
1726 | } | |
1727 | ||
1728 | static int files_rename_ref(struct ref_store *ref_store, | |
1729 | const char *oldrefname, const char *newrefname, | |
1730 | const char *logmsg) | |
1731 | { | |
1732 | return files_copy_or_rename_ref(ref_store, oldrefname, | |
1733 | newrefname, logmsg, 0); | |
1734 | } | |
1735 | ||
1736 | static int files_copy_ref(struct ref_store *ref_store, | |
1737 | const char *oldrefname, const char *newrefname, | |
1738 | const char *logmsg) | |
1739 | { | |
1740 | return files_copy_or_rename_ref(ref_store, oldrefname, | |
1741 | newrefname, logmsg, 1); | |
1742 | } | |
1743 | ||
1744 | static int close_ref_gently(struct ref_lock *lock) | |
1745 | { | |
1746 | if (close_lock_file_gently(&lock->lk)) | |
1747 | return -1; | |
1748 | return 0; | |
1749 | } | |
1750 | ||
1751 | static int commit_ref(struct ref_lock *lock) | |
1752 | { | |
1753 | char *path = get_locked_file_path(&lock->lk); | |
1754 | struct stat st; | |
1755 | ||
1756 | if (!lstat(path, &st) && S_ISDIR(st.st_mode)) { | |
1757 | /* | |
1758 | * There is a directory at the path we want to rename | |
1759 | * the lockfile to. Hopefully it is empty; try to | |
1760 | * delete it. | |
1761 | */ | |
1762 | size_t len = strlen(path); | |
1763 | struct strbuf sb_path = STRBUF_INIT; | |
1764 | ||
1765 | strbuf_attach(&sb_path, path, len, len); | |
1766 | ||
1767 | /* | |
1768 | * If this fails, commit_lock_file() will also fail | |
1769 | * and will report the problem. | |
1770 | */ | |
1771 | remove_empty_directories(&sb_path); | |
1772 | strbuf_release(&sb_path); | |
1773 | } else { | |
1774 | free(path); | |
1775 | } | |
1776 | ||
1777 | if (commit_lock_file(&lock->lk)) | |
1778 | return -1; | |
1779 | return 0; | |
1780 | } | |
1781 | ||
1782 | static int open_or_create_logfile(const char *path, void *cb) | |
1783 | { | |
1784 | int *fd = cb; | |
1785 | ||
1786 | *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666); | |
1787 | return (*fd < 0) ? -1 : 0; | |
1788 | } | |
1789 | ||
1790 | /* | |
1791 | * Create a reflog for a ref. If force_create = 0, only create the | |
1792 | * reflog for certain refs (those for which should_autocreate_reflog | |
1793 | * returns non-zero). Otherwise, create it regardless of the reference | |
1794 | * name. If the logfile already existed or was created, return 0 and | |
1795 | * set *logfd to the file descriptor opened for appending to the file. | |
1796 | * If no logfile exists and we decided not to create one, return 0 and | |
1797 | * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and | |
1798 | * return -1. | |
1799 | */ | |
1800 | static int log_ref_setup(struct files_ref_store *refs, | |
1801 | const char *refname, int force_create, | |
1802 | int *logfd, struct strbuf *err) | |
1803 | { | |
1804 | enum log_refs_config log_refs_cfg = refs->log_all_ref_updates; | |
1805 | struct strbuf logfile_sb = STRBUF_INIT; | |
1806 | char *logfile; | |
1807 | ||
1808 | if (log_refs_cfg == LOG_REFS_UNSET) | |
1809 | log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL; | |
1810 | ||
1811 | files_reflog_path(refs, &logfile_sb, refname); | |
1812 | logfile = strbuf_detach(&logfile_sb, NULL); | |
1813 | ||
1814 | if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) { | |
1815 | if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) { | |
1816 | if (errno == ENOENT) | |
1817 | strbuf_addf(err, "unable to create directory for '%s': " | |
1818 | "%s", logfile, strerror(errno)); | |
1819 | else if (errno == EISDIR) | |
1820 | strbuf_addf(err, "there are still logs under '%s'", | |
1821 | logfile); | |
1822 | else | |
1823 | strbuf_addf(err, "unable to append to '%s': %s", | |
1824 | logfile, strerror(errno)); | |
1825 | ||
1826 | goto error; | |
1827 | } | |
1828 | } else { | |
1829 | *logfd = open(logfile, O_APPEND | O_WRONLY); | |
1830 | if (*logfd < 0) { | |
1831 | if (errno == ENOENT || errno == EISDIR) { | |
1832 | /* | |
1833 | * The logfile doesn't already exist, | |
1834 | * but that is not an error; it only | |
1835 | * means that we won't write log | |
1836 | * entries to it. | |
1837 | */ | |
1838 | ; | |
1839 | } else { | |
1840 | strbuf_addf(err, "unable to append to '%s': %s", | |
1841 | logfile, strerror(errno)); | |
1842 | goto error; | |
1843 | } | |
1844 | } | |
1845 | } | |
1846 | ||
1847 | if (*logfd >= 0) | |
1848 | adjust_shared_perm(the_repository, logfile); | |
1849 | ||
1850 | free(logfile); | |
1851 | return 0; | |
1852 | ||
1853 | error: | |
1854 | free(logfile); | |
1855 | return -1; | |
1856 | } | |
1857 | ||
1858 | static int files_create_reflog(struct ref_store *ref_store, const char *refname, | |
1859 | struct strbuf *err) | |
1860 | { | |
1861 | struct files_ref_store *refs = | |
1862 | files_downcast(ref_store, REF_STORE_WRITE, "create_reflog"); | |
1863 | int fd; | |
1864 | ||
1865 | if (log_ref_setup(refs, refname, 1, &fd, err)) | |
1866 | return -1; | |
1867 | ||
1868 | if (fd >= 0) | |
1869 | close(fd); | |
1870 | ||
1871 | return 0; | |
1872 | } | |
1873 | ||
1874 | static int log_ref_write_fd(int fd, const struct object_id *old_oid, | |
1875 | const struct object_id *new_oid, | |
1876 | const char *committer, const char *msg) | |
1877 | { | |
1878 | struct strbuf sb = STRBUF_INIT; | |
1879 | int ret = 0; | |
1880 | ||
1881 | if (!committer) | |
1882 | committer = git_committer_info(0); | |
1883 | ||
1884 | strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer); | |
1885 | if (msg && *msg) { | |
1886 | strbuf_addch(&sb, '\t'); | |
1887 | strbuf_addstr(&sb, msg); | |
1888 | } | |
1889 | strbuf_addch(&sb, '\n'); | |
1890 | if (write_in_full(fd, sb.buf, sb.len) < 0) | |
1891 | ret = -1; | |
1892 | strbuf_release(&sb); | |
1893 | return ret; | |
1894 | } | |
1895 | ||
1896 | static int files_log_ref_write(struct files_ref_store *refs, | |
1897 | const char *refname, | |
1898 | const struct object_id *old_oid, | |
1899 | const struct object_id *new_oid, | |
1900 | const char *committer_info, const char *msg, | |
1901 | int flags, struct strbuf *err) | |
1902 | { | |
1903 | int logfd, result; | |
1904 | ||
1905 | if (flags & REF_SKIP_CREATE_REFLOG) | |
1906 | return 0; | |
1907 | ||
1908 | result = log_ref_setup(refs, refname, | |
1909 | flags & REF_FORCE_CREATE_REFLOG, | |
1910 | &logfd, err); | |
1911 | ||
1912 | if (result) | |
1913 | return result; | |
1914 | ||
1915 | if (logfd < 0) | |
1916 | return 0; | |
1917 | result = log_ref_write_fd(logfd, old_oid, new_oid, committer_info, msg); | |
1918 | if (result) { | |
1919 | struct strbuf sb = STRBUF_INIT; | |
1920 | int save_errno = errno; | |
1921 | ||
1922 | files_reflog_path(refs, &sb, refname); | |
1923 | strbuf_addf(err, "unable to append to '%s': %s", | |
1924 | sb.buf, strerror(save_errno)); | |
1925 | strbuf_release(&sb); | |
1926 | close(logfd); | |
1927 | return -1; | |
1928 | } | |
1929 | if (close(logfd)) { | |
1930 | struct strbuf sb = STRBUF_INIT; | |
1931 | int save_errno = errno; | |
1932 | ||
1933 | files_reflog_path(refs, &sb, refname); | |
1934 | strbuf_addf(err, "unable to append to '%s': %s", | |
1935 | sb.buf, strerror(save_errno)); | |
1936 | strbuf_release(&sb); | |
1937 | return -1; | |
1938 | } | |
1939 | return 0; | |
1940 | } | |
1941 | ||
1942 | /* | |
1943 | * Write oid into the open lockfile, then close the lockfile. On | |
1944 | * errors, rollback the lockfile, fill in *err and return -1. | |
1945 | */ | |
1946 | static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, | |
1947 | struct ref_lock *lock, | |
1948 | const struct object_id *oid, | |
1949 | int skip_oid_verification, | |
1950 | struct strbuf *err) | |
1951 | { | |
1952 | static char term = '\n'; | |
1953 | struct object *o; | |
1954 | int fd; | |
1955 | ||
1956 | if (!skip_oid_verification) { | |
1957 | o = parse_object(refs->base.repo, oid); | |
1958 | if (!o) { | |
1959 | strbuf_addf( | |
1960 | err, | |
1961 | "trying to write ref '%s' with nonexistent object %s", | |
1962 | lock->ref_name, oid_to_hex(oid)); | |
1963 | unlock_ref(lock); | |
1964 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; | |
1965 | } | |
1966 | if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) { | |
1967 | strbuf_addf( | |
1968 | err, | |
1969 | "trying to write non-commit object %s to branch '%s'", | |
1970 | oid_to_hex(oid), lock->ref_name); | |
1971 | unlock_ref(lock); | |
1972 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; | |
1973 | } | |
1974 | } | |
1975 | fd = get_lock_file_fd(&lock->lk); | |
1976 | if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 || | |
1977 | write_in_full(fd, &term, 1) < 0 || | |
1978 | fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 || | |
1979 | close_ref_gently(lock) < 0) { | |
1980 | strbuf_addf(err, | |
1981 | "couldn't write '%s'", get_lock_file_path(&lock->lk)); | |
1982 | unlock_ref(lock); | |
1983 | return REF_TRANSACTION_ERROR_GENERIC; | |
1984 | } | |
1985 | return 0; | |
1986 | } | |
1987 | ||
1988 | /* | |
1989 | * Commit a change to a loose reference that has already been written | |
1990 | * to the loose reference lockfile. Also update the reflogs if | |
1991 | * necessary, using the specified lockmsg (which can be NULL). | |
1992 | */ | |
1993 | static int commit_ref_update(struct files_ref_store *refs, | |
1994 | struct ref_lock *lock, | |
1995 | const struct object_id *oid, const char *logmsg, | |
1996 | int flags, | |
1997 | struct strbuf *err) | |
1998 | { | |
1999 | files_assert_main_repository(refs, "commit_ref_update"); | |
2000 | ||
2001 | clear_loose_ref_cache(refs); | |
2002 | if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid, oid, NULL, | |
2003 | logmsg, flags, err)) { | |
2004 | char *old_msg = strbuf_detach(err, NULL); | |
2005 | strbuf_addf(err, "cannot update the ref '%s': %s", | |
2006 | lock->ref_name, old_msg); | |
2007 | free(old_msg); | |
2008 | unlock_ref(lock); | |
2009 | return -1; | |
2010 | } | |
2011 | ||
2012 | if (strcmp(lock->ref_name, "HEAD") != 0) { | |
2013 | /* | |
2014 | * Special hack: If a branch is updated directly and HEAD | |
2015 | * points to it (may happen on the remote side of a push | |
2016 | * for example) then logically the HEAD reflog should be | |
2017 | * updated too. | |
2018 | * A generic solution implies reverse symref information, | |
2019 | * but finding all symrefs pointing to the given branch | |
2020 | * would be rather costly for this rare event (the direct | |
2021 | * update of a branch) to be worth it. So let's cheat and | |
2022 | * check with HEAD only which should cover 99% of all usage | |
2023 | * scenarios (even 100% of the default ones). | |
2024 | */ | |
2025 | int head_flag; | |
2026 | const char *head_ref; | |
2027 | ||
2028 | head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD", | |
2029 | RESOLVE_REF_READING, | |
2030 | NULL, &head_flag); | |
2031 | if (head_ref && (head_flag & REF_ISSYMREF) && | |
2032 | !strcmp(head_ref, lock->ref_name)) { | |
2033 | struct strbuf log_err = STRBUF_INIT; | |
2034 | if (files_log_ref_write(refs, "HEAD", &lock->old_oid, | |
2035 | oid, NULL, logmsg, flags, | |
2036 | &log_err)) { | |
2037 | error("%s", log_err.buf); | |
2038 | strbuf_release(&log_err); | |
2039 | } | |
2040 | } | |
2041 | } | |
2042 | ||
2043 | if (commit_ref(lock)) { | |
2044 | strbuf_addf(err, "couldn't set '%s'", lock->ref_name); | |
2045 | unlock_ref(lock); | |
2046 | return -1; | |
2047 | } | |
2048 | ||
2049 | unlock_ref(lock); | |
2050 | return 0; | |
2051 | } | |
2052 | ||
2053 | #ifdef NO_SYMLINK_HEAD | |
2054 | #define create_ref_symlink(a, b) (-1) | |
2055 | #else | |
2056 | static int create_ref_symlink(struct ref_lock *lock, const char *target) | |
2057 | { | |
2058 | int ret = -1; | |
2059 | ||
2060 | char *ref_path = get_locked_file_path(&lock->lk); | |
2061 | unlink(ref_path); | |
2062 | ret = symlink(target, ref_path); | |
2063 | free(ref_path); | |
2064 | ||
2065 | if (ret) | |
2066 | fprintf(stderr, "no symlink - falling back to symbolic ref\n"); | |
2067 | return ret; | |
2068 | } | |
2069 | #endif | |
2070 | ||
2071 | static int create_symref_lock(struct ref_lock *lock, const char *target, | |
2072 | struct strbuf *err) | |
2073 | { | |
2074 | if (!fdopen_lock_file(&lock->lk, "w")) { | |
2075 | strbuf_addf(err, "unable to fdopen %s: %s", | |
2076 | get_lock_file_path(&lock->lk), strerror(errno)); | |
2077 | return -1; | |
2078 | } | |
2079 | ||
2080 | if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) { | |
2081 | strbuf_addf(err, "unable to write to %s: %s", | |
2082 | get_lock_file_path(&lock->lk), strerror(errno)); | |
2083 | return -1; | |
2084 | } | |
2085 | ||
2086 | return 0; | |
2087 | } | |
2088 | ||
2089 | static int files_reflog_exists(struct ref_store *ref_store, | |
2090 | const char *refname) | |
2091 | { | |
2092 | struct files_ref_store *refs = | |
2093 | files_downcast(ref_store, REF_STORE_READ, "reflog_exists"); | |
2094 | struct strbuf sb = STRBUF_INIT; | |
2095 | struct stat st; | |
2096 | int ret; | |
2097 | ||
2098 | files_reflog_path(refs, &sb, refname); | |
2099 | ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode); | |
2100 | strbuf_release(&sb); | |
2101 | return ret; | |
2102 | } | |
2103 | ||
2104 | static int files_delete_reflog(struct ref_store *ref_store, | |
2105 | const char *refname) | |
2106 | { | |
2107 | struct files_ref_store *refs = | |
2108 | files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog"); | |
2109 | struct strbuf sb = STRBUF_INIT; | |
2110 | int ret; | |
2111 | ||
2112 | files_reflog_path(refs, &sb, refname); | |
2113 | ret = remove_path(sb.buf); | |
2114 | strbuf_release(&sb); | |
2115 | return ret; | |
2116 | } | |
2117 | ||
2118 | static int show_one_reflog_ent(struct files_ref_store *refs, | |
2119 | const char *refname, | |
2120 | struct strbuf *sb, | |
2121 | each_reflog_ent_fn fn, void *cb_data) | |
2122 | { | |
2123 | struct object_id ooid, noid; | |
2124 | char *email_end, *message; | |
2125 | timestamp_t timestamp; | |
2126 | int tz; | |
2127 | const char *p = sb->buf; | |
2128 | ||
2129 | /* old SP new SP name <email> SP time TAB msg LF */ | |
2130 | if (!sb->len || sb->buf[sb->len - 1] != '\n' || | |
2131 | parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || | |
2132 | parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || | |
2133 | !(email_end = strchr(p, '>')) || | |
2134 | email_end[1] != ' ' || | |
2135 | !(timestamp = parse_timestamp(email_end + 2, &message, 10)) || | |
2136 | !message || message[0] != ' ' || | |
2137 | (message[1] != '+' && message[1] != '-') || | |
2138 | !isdigit(message[2]) || !isdigit(message[3]) || | |
2139 | !isdigit(message[4]) || !isdigit(message[5])) | |
2140 | return 0; /* corrupt? */ | |
2141 | email_end[1] = '\0'; | |
2142 | tz = strtol(message + 1, NULL, 10); | |
2143 | if (message[6] != '\t') | |
2144 | message += 6; | |
2145 | else | |
2146 | message += 7; | |
2147 | return fn(refname, &ooid, &noid, p, timestamp, tz, message, cb_data); | |
2148 | } | |
2149 | ||
2150 | static char *find_beginning_of_line(char *bob, char *scan) | |
2151 | { | |
2152 | while (bob < scan && *(--scan) != '\n') | |
2153 | ; /* keep scanning backwards */ | |
2154 | /* | |
2155 | * Return either beginning of the buffer, or LF at the end of | |
2156 | * the previous line. | |
2157 | */ | |
2158 | return scan; | |
2159 | } | |
2160 | ||
2161 | static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store, | |
2162 | const char *refname, | |
2163 | each_reflog_ent_fn fn, | |
2164 | void *cb_data) | |
2165 | { | |
2166 | struct files_ref_store *refs = | |
2167 | files_downcast(ref_store, REF_STORE_READ, | |
2168 | "for_each_reflog_ent_reverse"); | |
2169 | struct strbuf sb = STRBUF_INIT; | |
2170 | FILE *logfp; | |
2171 | long pos; | |
2172 | int ret = 0, at_tail = 1; | |
2173 | ||
2174 | files_reflog_path(refs, &sb, refname); | |
2175 | logfp = fopen(sb.buf, "r"); | |
2176 | strbuf_release(&sb); | |
2177 | if (!logfp) | |
2178 | return -1; | |
2179 | ||
2180 | /* Jump to the end */ | |
2181 | if (fseek(logfp, 0, SEEK_END) < 0) | |
2182 | ret = error("cannot seek back reflog for %s: %s", | |
2183 | refname, strerror(errno)); | |
2184 | pos = ftell(logfp); | |
2185 | while (!ret && 0 < pos) { | |
2186 | int cnt; | |
2187 | size_t nread; | |
2188 | char buf[BUFSIZ]; | |
2189 | char *endp, *scanp; | |
2190 | ||
2191 | /* Fill next block from the end */ | |
2192 | cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos; | |
2193 | if (fseek(logfp, pos - cnt, SEEK_SET)) { | |
2194 | ret = error("cannot seek back reflog for %s: %s", | |
2195 | refname, strerror(errno)); | |
2196 | break; | |
2197 | } | |
2198 | nread = fread(buf, cnt, 1, logfp); | |
2199 | if (nread != 1) { | |
2200 | ret = error("cannot read %d bytes from reflog for %s: %s", | |
2201 | cnt, refname, strerror(errno)); | |
2202 | break; | |
2203 | } | |
2204 | pos -= cnt; | |
2205 | ||
2206 | scanp = endp = buf + cnt; | |
2207 | if (at_tail && scanp[-1] == '\n') | |
2208 | /* Looking at the final LF at the end of the file */ | |
2209 | scanp--; | |
2210 | at_tail = 0; | |
2211 | ||
2212 | while (buf < scanp) { | |
2213 | /* | |
2214 | * terminating LF of the previous line, or the beginning | |
2215 | * of the buffer. | |
2216 | */ | |
2217 | char *bp; | |
2218 | ||
2219 | bp = find_beginning_of_line(buf, scanp); | |
2220 | ||
2221 | if (*bp == '\n') { | |
2222 | /* | |
2223 | * The newline is the end of the previous line, | |
2224 | * so we know we have complete line starting | |
2225 | * at (bp + 1). Prefix it onto any prior data | |
2226 | * we collected for the line and process it. | |
2227 | */ | |
2228 | strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1)); | |
2229 | scanp = bp; | |
2230 | endp = bp + 1; | |
2231 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); | |
2232 | strbuf_reset(&sb); | |
2233 | if (ret) | |
2234 | break; | |
2235 | } else if (!pos) { | |
2236 | /* | |
2237 | * We are at the start of the buffer, and the | |
2238 | * start of the file; there is no previous | |
2239 | * line, and we have everything for this one. | |
2240 | * Process it, and we can end the loop. | |
2241 | */ | |
2242 | strbuf_splice(&sb, 0, 0, buf, endp - buf); | |
2243 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); | |
2244 | strbuf_reset(&sb); | |
2245 | break; | |
2246 | } | |
2247 | ||
2248 | if (bp == buf) { | |
2249 | /* | |
2250 | * We are at the start of the buffer, and there | |
2251 | * is more file to read backwards. Which means | |
2252 | * we are in the middle of a line. Note that we | |
2253 | * may get here even if *bp was a newline; that | |
2254 | * just means we are at the exact end of the | |
2255 | * previous line, rather than some spot in the | |
2256 | * middle. | |
2257 | * | |
2258 | * Save away what we have to be combined with | |
2259 | * the data from the next read. | |
2260 | */ | |
2261 | strbuf_splice(&sb, 0, 0, buf, endp - buf); | |
2262 | break; | |
2263 | } | |
2264 | } | |
2265 | ||
2266 | } | |
2267 | if (!ret && sb.len) | |
2268 | BUG("reverse reflog parser had leftover data"); | |
2269 | ||
2270 | fclose(logfp); | |
2271 | strbuf_release(&sb); | |
2272 | return ret; | |
2273 | } | |
2274 | ||
2275 | static int files_for_each_reflog_ent(struct ref_store *ref_store, | |
2276 | const char *refname, | |
2277 | each_reflog_ent_fn fn, void *cb_data) | |
2278 | { | |
2279 | struct files_ref_store *refs = | |
2280 | files_downcast(ref_store, REF_STORE_READ, | |
2281 | "for_each_reflog_ent"); | |
2282 | FILE *logfp; | |
2283 | struct strbuf sb = STRBUF_INIT; | |
2284 | int ret = 0; | |
2285 | ||
2286 | files_reflog_path(refs, &sb, refname); | |
2287 | logfp = fopen(sb.buf, "r"); | |
2288 | strbuf_release(&sb); | |
2289 | if (!logfp) | |
2290 | return -1; | |
2291 | ||
2292 | while (!ret && !strbuf_getwholeline(&sb, logfp, '\n')) | |
2293 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); | |
2294 | fclose(logfp); | |
2295 | strbuf_release(&sb); | |
2296 | return ret; | |
2297 | } | |
2298 | ||
2299 | struct files_reflog_iterator { | |
2300 | struct ref_iterator base; | |
2301 | struct ref_store *ref_store; | |
2302 | struct dir_iterator *dir_iterator; | |
2303 | }; | |
2304 | ||
2305 | static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator) | |
2306 | { | |
2307 | struct files_reflog_iterator *iter = | |
2308 | (struct files_reflog_iterator *)ref_iterator; | |
2309 | struct dir_iterator *diter = iter->dir_iterator; | |
2310 | int ok; | |
2311 | ||
2312 | while ((ok = dir_iterator_advance(diter)) == ITER_OK) { | |
2313 | if (!S_ISREG(diter->st.st_mode)) | |
2314 | continue; | |
2315 | if (check_refname_format(diter->basename, | |
2316 | REFNAME_ALLOW_ONELEVEL)) | |
2317 | continue; | |
2318 | ||
2319 | iter->base.refname = diter->relative_path; | |
2320 | return ITER_OK; | |
2321 | } | |
2322 | ||
2323 | return ok; | |
2324 | } | |
2325 | ||
2326 | static int files_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED, | |
2327 | const char *refname UNUSED, | |
2328 | unsigned int flags UNUSED) | |
2329 | { | |
2330 | BUG("ref_iterator_seek() called for reflog_iterator"); | |
2331 | } | |
2332 | ||
2333 | static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED, | |
2334 | struct object_id *peeled UNUSED) | |
2335 | { | |
2336 | BUG("ref_iterator_peel() called for reflog_iterator"); | |
2337 | } | |
2338 | ||
2339 | static void files_reflog_iterator_release(struct ref_iterator *ref_iterator) | |
2340 | { | |
2341 | struct files_reflog_iterator *iter = | |
2342 | (struct files_reflog_iterator *)ref_iterator; | |
2343 | dir_iterator_free(iter->dir_iterator); | |
2344 | } | |
2345 | ||
2346 | static struct ref_iterator_vtable files_reflog_iterator_vtable = { | |
2347 | .advance = files_reflog_iterator_advance, | |
2348 | .seek = files_reflog_iterator_seek, | |
2349 | .peel = files_reflog_iterator_peel, | |
2350 | .release = files_reflog_iterator_release, | |
2351 | }; | |
2352 | ||
2353 | static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store, | |
2354 | const char *gitdir) | |
2355 | { | |
2356 | struct dir_iterator *diter; | |
2357 | struct files_reflog_iterator *iter; | |
2358 | struct ref_iterator *ref_iterator; | |
2359 | struct strbuf sb = STRBUF_INIT; | |
2360 | ||
2361 | strbuf_addf(&sb, "%s/logs", gitdir); | |
2362 | ||
2363 | diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED); | |
2364 | if (!diter) { | |
2365 | strbuf_release(&sb); | |
2366 | return empty_ref_iterator_begin(); | |
2367 | } | |
2368 | ||
2369 | CALLOC_ARRAY(iter, 1); | |
2370 | ref_iterator = &iter->base; | |
2371 | ||
2372 | base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable); | |
2373 | iter->dir_iterator = diter; | |
2374 | iter->ref_store = ref_store; | |
2375 | strbuf_release(&sb); | |
2376 | ||
2377 | return ref_iterator; | |
2378 | } | |
2379 | ||
2380 | static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store) | |
2381 | { | |
2382 | struct files_ref_store *refs = | |
2383 | files_downcast(ref_store, REF_STORE_READ, | |
2384 | "reflog_iterator_begin"); | |
2385 | ||
2386 | if (!strcmp(refs->base.gitdir, refs->gitcommondir)) { | |
2387 | return reflog_iterator_begin(ref_store, refs->gitcommondir); | |
2388 | } else { | |
2389 | return merge_ref_iterator_begin( | |
2390 | reflog_iterator_begin(ref_store, refs->base.gitdir), | |
2391 | reflog_iterator_begin(ref_store, refs->gitcommondir), | |
2392 | ref_iterator_select, refs); | |
2393 | } | |
2394 | } | |
2395 | ||
2396 | /* | |
2397 | * If update is a direct update of head_ref (the reference pointed to | |
2398 | * by HEAD), then add an extra REF_LOG_ONLY update for HEAD. | |
2399 | */ | |
2400 | static enum ref_transaction_error split_head_update(struct ref_update *update, | |
2401 | struct ref_transaction *transaction, | |
2402 | const char *head_ref, | |
2403 | struct strbuf *err) | |
2404 | { | |
2405 | struct ref_update *new_update; | |
2406 | ||
2407 | if ((update->flags & REF_LOG_ONLY) || | |
2408 | (update->flags & REF_SKIP_CREATE_REFLOG) || | |
2409 | (update->flags & REF_IS_PRUNING) || | |
2410 | (update->flags & REF_UPDATE_VIA_HEAD)) | |
2411 | return 0; | |
2412 | ||
2413 | if (strcmp(update->refname, head_ref)) | |
2414 | return 0; | |
2415 | ||
2416 | /* | |
2417 | * First make sure that HEAD is not already in the | |
2418 | * transaction. This check is O(lg N) in the transaction | |
2419 | * size, but it happens at most once per transaction. | |
2420 | */ | |
2421 | if (string_list_has_string(&transaction->refnames, "HEAD")) { | |
2422 | /* An entry already existed */ | |
2423 | strbuf_addf(err, | |
2424 | "multiple updates for 'HEAD' (including one " | |
2425 | "via its referent '%s') are not allowed", | |
2426 | update->refname); | |
2427 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
2428 | } | |
2429 | ||
2430 | new_update = ref_transaction_add_update( | |
2431 | transaction, "HEAD", | |
2432 | update->flags | REF_LOG_ONLY | REF_NO_DEREF | REF_LOG_VIA_SPLIT, | |
2433 | &update->new_oid, &update->old_oid, | |
2434 | NULL, NULL, update->committer_info, update->msg); | |
2435 | new_update->parent_update = update; | |
2436 | ||
2437 | /* | |
2438 | * Add "HEAD". This insertion is O(N) in the transaction | |
2439 | * size, but it happens at most once per transaction. | |
2440 | * Add new_update->refname instead of a literal "HEAD". | |
2441 | */ | |
2442 | if (strcmp(new_update->refname, "HEAD")) | |
2443 | BUG("%s unexpectedly not 'HEAD'", new_update->refname); | |
2444 | ||
2445 | return 0; | |
2446 | } | |
2447 | ||
2448 | /* | |
2449 | * update is for a symref that points at referent and doesn't have | |
2450 | * REF_NO_DEREF set. Split it into two updates: | |
2451 | * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set | |
2452 | * - A new, separate update for the referent reference | |
2453 | * Note that the new update will itself be subject to splitting when | |
2454 | * the iteration gets to it. | |
2455 | */ | |
2456 | static enum ref_transaction_error split_symref_update(struct ref_update *update, | |
2457 | const char *referent, | |
2458 | struct ref_transaction *transaction, | |
2459 | struct strbuf *err) | |
2460 | { | |
2461 | struct ref_update *new_update; | |
2462 | unsigned int new_flags; | |
2463 | ||
2464 | /* | |
2465 | * First make sure that referent is not already in the | |
2466 | * transaction. This check is O(lg N) in the transaction | |
2467 | * size, but it happens at most once per symref in a | |
2468 | * transaction. | |
2469 | */ | |
2470 | if (string_list_has_string(&transaction->refnames, referent)) { | |
2471 | /* An entry already exists */ | |
2472 | strbuf_addf(err, | |
2473 | "multiple updates for '%s' (including one " | |
2474 | "via symref '%s') are not allowed", | |
2475 | referent, update->refname); | |
2476 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
2477 | } | |
2478 | ||
2479 | new_flags = update->flags; | |
2480 | if (!strcmp(update->refname, "HEAD")) { | |
2481 | /* | |
2482 | * Record that the new update came via HEAD, so that | |
2483 | * when we process it, split_head_update() doesn't try | |
2484 | * to add another reflog update for HEAD. Note that | |
2485 | * this bit will be propagated if the new_update | |
2486 | * itself needs to be split. | |
2487 | */ | |
2488 | new_flags |= REF_UPDATE_VIA_HEAD; | |
2489 | } | |
2490 | ||
2491 | new_update = ref_transaction_add_update( | |
2492 | transaction, referent, new_flags, | |
2493 | update->new_target ? NULL : &update->new_oid, | |
2494 | update->old_target ? NULL : &update->old_oid, | |
2495 | update->new_target, update->old_target, NULL, | |
2496 | update->msg); | |
2497 | ||
2498 | new_update->parent_update = update; | |
2499 | ||
2500 | /* | |
2501 | * Change the symbolic ref update to log only. Also, it | |
2502 | * doesn't need to check its old OID value, as that will be | |
2503 | * done when new_update is processed. | |
2504 | */ | |
2505 | update->flags |= REF_LOG_ONLY | REF_NO_DEREF; | |
2506 | ||
2507 | return 0; | |
2508 | } | |
2509 | ||
2510 | /* | |
2511 | * Check whether the REF_HAVE_OLD and old_oid values stored in update | |
2512 | * are consistent with oid, which is the reference's current value. If | |
2513 | * everything is OK, return 0; otherwise, write an error message to | |
2514 | * err and return -1. | |
2515 | */ | |
2516 | static enum ref_transaction_error check_old_oid(struct ref_update *update, | |
2517 | struct object_id *oid, | |
2518 | struct strbuf *referent, | |
2519 | struct strbuf *err) | |
2520 | { | |
2521 | if (update->flags & REF_LOG_ONLY || | |
2522 | !(update->flags & REF_HAVE_OLD)) | |
2523 | return 0; | |
2524 | ||
2525 | if (oideq(oid, &update->old_oid)) { | |
2526 | /* | |
2527 | * Normally matching the expected old oid is enough. Either we | |
2528 | * found the ref at the expected state, or we are creating and | |
2529 | * expect the null oid (and likewise found nothing). | |
2530 | * | |
2531 | * But there is one exception for the null oid: if we found a | |
2532 | * symref pointing to nothing we'll also get the null oid. In | |
2533 | * regular recursive mode, that's good (we'll write to what the | |
2534 | * symref points to, which doesn't exist). But in no-deref | |
2535 | * mode, it means we'll clobber the symref, even though the | |
2536 | * caller asked for this to be a creation event. So flag | |
2537 | * that case to preserve the dangling symref. | |
2538 | */ | |
2539 | if ((update->flags & REF_NO_DEREF) && referent->len && | |
2540 | is_null_oid(oid)) { | |
2541 | strbuf_addf(err, "cannot lock ref '%s': " | |
2542 | "dangling symref already exists", | |
2543 | ref_update_original_update_refname(update)); | |
2544 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; | |
2545 | } | |
2546 | return 0; | |
2547 | } | |
2548 | ||
2549 | if (is_null_oid(&update->old_oid)) { | |
2550 | strbuf_addf(err, "cannot lock ref '%s': " | |
2551 | "reference already exists", | |
2552 | ref_update_original_update_refname(update)); | |
2553 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; | |
2554 | } else if (is_null_oid(oid)) { | |
2555 | strbuf_addf(err, "cannot lock ref '%s': " | |
2556 | "reference is missing but expected %s", | |
2557 | ref_update_original_update_refname(update), | |
2558 | oid_to_hex(&update->old_oid)); | |
2559 | return REF_TRANSACTION_ERROR_NONEXISTENT_REF; | |
2560 | } | |
2561 | ||
2562 | strbuf_addf(err, "cannot lock ref '%s': is at %s but expected %s", | |
2563 | ref_update_original_update_refname(update), oid_to_hex(oid), | |
2564 | oid_to_hex(&update->old_oid)); | |
2565 | ||
2566 | return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE; | |
2567 | } | |
2568 | ||
2569 | struct files_transaction_backend_data { | |
2570 | struct ref_transaction *packed_transaction; | |
2571 | int packed_refs_locked; | |
2572 | struct strmap ref_locks; | |
2573 | }; | |
2574 | ||
2575 | /* | |
2576 | * Prepare for carrying out update: | |
2577 | * - Lock the reference referred to by update. | |
2578 | * - Read the reference under lock. | |
2579 | * - Check that its old OID value (if specified) is correct, and in | |
2580 | * any case record it in update->lock->old_oid for later use when | |
2581 | * writing the reflog. | |
2582 | * - If it is a symref update without REF_NO_DEREF, split it up into a | |
2583 | * REF_LOG_ONLY update of the symref and add a separate update for | |
2584 | * the referent to transaction. | |
2585 | * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY | |
2586 | * update of HEAD. | |
2587 | */ | |
2588 | static enum ref_transaction_error lock_ref_for_update(struct files_ref_store *refs, | |
2589 | struct ref_update *update, | |
2590 | size_t update_idx, | |
2591 | struct ref_transaction *transaction, | |
2592 | const char *head_ref, | |
2593 | struct string_list *refnames_to_check, | |
2594 | struct strbuf *err) | |
2595 | { | |
2596 | struct strbuf referent = STRBUF_INIT; | |
2597 | int mustexist = ref_update_expects_existing_old_ref(update); | |
2598 | struct files_transaction_backend_data *backend_data; | |
2599 | enum ref_transaction_error ret = 0; | |
2600 | struct ref_lock *lock; | |
2601 | ||
2602 | files_assert_main_repository(refs, "lock_ref_for_update"); | |
2603 | ||
2604 | backend_data = transaction->backend_data; | |
2605 | ||
2606 | if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update)) | |
2607 | update->flags |= REF_DELETING; | |
2608 | ||
2609 | if (head_ref) { | |
2610 | ret = split_head_update(update, transaction, head_ref, err); | |
2611 | if (ret) | |
2612 | goto out; | |
2613 | } | |
2614 | ||
2615 | lock = strmap_get(&backend_data->ref_locks, update->refname); | |
2616 | if (lock) { | |
2617 | lock->count++; | |
2618 | } else { | |
2619 | ret = lock_raw_ref(refs, update, update_idx, mustexist, | |
2620 | refnames_to_check, &transaction->refnames, | |
2621 | &lock, &referent, err); | |
2622 | if (ret) { | |
2623 | char *reason; | |
2624 | ||
2625 | reason = strbuf_detach(err, NULL); | |
2626 | strbuf_addf(err, "cannot lock ref '%s': %s", | |
2627 | ref_update_original_update_refname(update), reason); | |
2628 | free(reason); | |
2629 | goto out; | |
2630 | } | |
2631 | ||
2632 | strmap_put(&backend_data->ref_locks, update->refname, lock); | |
2633 | } | |
2634 | ||
2635 | update->backend_data = lock; | |
2636 | ||
2637 | if (update->flags & REF_LOG_VIA_SPLIT) { | |
2638 | struct ref_lock *parent_lock; | |
2639 | ||
2640 | if (!update->parent_update) | |
2641 | BUG("split update without a parent"); | |
2642 | ||
2643 | parent_lock = update->parent_update->backend_data; | |
2644 | ||
2645 | /* | |
2646 | * Check that "HEAD" didn't racily change since we have looked | |
2647 | * it up. If it did we must refuse to write the reflog entry. | |
2648 | * | |
2649 | * Note that this does not catch all races: if "HEAD" was | |
2650 | * racily changed to point to one of the refs part of the | |
2651 | * transaction then we would miss writing the split reflog | |
2652 | * entry for "HEAD". | |
2653 | */ | |
2654 | if (!(update->type & REF_ISSYMREF) || | |
2655 | strcmp(update->parent_update->refname, referent.buf)) { | |
2656 | strbuf_addstr(err, "HEAD has been racily updated"); | |
2657 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2658 | goto out; | |
2659 | } | |
2660 | ||
2661 | if (update->flags & REF_HAVE_OLD) { | |
2662 | oidcpy(&lock->old_oid, &update->old_oid); | |
2663 | } else { | |
2664 | oidcpy(&lock->old_oid, &parent_lock->old_oid); | |
2665 | } | |
2666 | } else if (update->type & REF_ISSYMREF) { | |
2667 | if (update->flags & REF_NO_DEREF) { | |
2668 | /* | |
2669 | * We won't be reading the referent as part of | |
2670 | * the transaction, so we have to read it here | |
2671 | * to record and possibly check old_oid: | |
2672 | */ | |
2673 | if (!refs_resolve_ref_unsafe(&refs->base, | |
2674 | referent.buf, 0, | |
2675 | &lock->old_oid, NULL)) { | |
2676 | if (update->flags & REF_HAVE_OLD) { | |
2677 | strbuf_addf(err, "cannot lock ref '%s': " | |
2678 | "error reading reference", | |
2679 | ref_update_original_update_refname(update)); | |
2680 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2681 | goto out; | |
2682 | } | |
2683 | } | |
2684 | ||
2685 | if (update->old_target) | |
2686 | ret = ref_update_check_old_target(referent.buf, update, err); | |
2687 | else | |
2688 | ret = check_old_oid(update, &lock->old_oid, | |
2689 | &referent, err); | |
2690 | if (ret) | |
2691 | goto out; | |
2692 | } else { | |
2693 | /* | |
2694 | * Create a new update for the reference this | |
2695 | * symref is pointing at. Also, we will record | |
2696 | * and verify old_oid for this update as part | |
2697 | * of processing the split-off update, so we | |
2698 | * don't have to do it here. | |
2699 | */ | |
2700 | ret = split_symref_update(update, referent.buf, | |
2701 | transaction, err); | |
2702 | if (ret) | |
2703 | goto out; | |
2704 | } | |
2705 | } else { | |
2706 | struct ref_update *parent_update; | |
2707 | ||
2708 | /* | |
2709 | * Even if the ref is a regular ref, if `old_target` is set, we | |
2710 | * fail with an error. | |
2711 | */ | |
2712 | if (update->old_target) { | |
2713 | strbuf_addf(err, _("cannot lock ref '%s': " | |
2714 | "expected symref with target '%s': " | |
2715 | "but is a regular ref"), | |
2716 | ref_update_original_update_refname(update), | |
2717 | update->old_target); | |
2718 | ret = REF_TRANSACTION_ERROR_EXPECTED_SYMREF; | |
2719 | goto out; | |
2720 | } else { | |
2721 | ret = check_old_oid(update, &lock->old_oid, | |
2722 | &referent, err); | |
2723 | if (ret) { | |
2724 | goto out; | |
2725 | } | |
2726 | } | |
2727 | ||
2728 | /* | |
2729 | * If this update is happening indirectly because of a | |
2730 | * symref update, record the old OID in the parent | |
2731 | * update: | |
2732 | */ | |
2733 | for (parent_update = update->parent_update; | |
2734 | parent_update; | |
2735 | parent_update = parent_update->parent_update) { | |
2736 | struct ref_lock *parent_lock = parent_update->backend_data; | |
2737 | oidcpy(&parent_lock->old_oid, &lock->old_oid); | |
2738 | } | |
2739 | } | |
2740 | ||
2741 | if (update->new_target && !(update->flags & REF_LOG_ONLY)) { | |
2742 | if (create_symref_lock(lock, update->new_target, err)) { | |
2743 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2744 | goto out; | |
2745 | } | |
2746 | ||
2747 | if (close_ref_gently(lock)) { | |
2748 | strbuf_addf(err, "couldn't close '%s.lock'", | |
2749 | update->refname); | |
2750 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2751 | goto out; | |
2752 | } | |
2753 | ||
2754 | /* | |
2755 | * Once we have created the symref lock, the commit | |
2756 | * phase of the transaction only needs to commit the lock. | |
2757 | */ | |
2758 | update->flags |= REF_NEEDS_COMMIT; | |
2759 | } else if ((update->flags & REF_HAVE_NEW) && | |
2760 | !(update->flags & REF_DELETING) && | |
2761 | !(update->flags & REF_LOG_ONLY)) { | |
2762 | if (!(update->type & REF_ISSYMREF) && | |
2763 | oideq(&lock->old_oid, &update->new_oid)) { | |
2764 | /* | |
2765 | * The reference already has the desired | |
2766 | * value, so we don't need to write it. | |
2767 | */ | |
2768 | } else { | |
2769 | ret = write_ref_to_lockfile( | |
2770 | refs, lock, &update->new_oid, | |
2771 | update->flags & REF_SKIP_OID_VERIFICATION, | |
2772 | err); | |
2773 | if (ret) { | |
2774 | char *write_err = strbuf_detach(err, NULL); | |
2775 | ||
2776 | /* | |
2777 | * The lock was freed upon failure of | |
2778 | * write_ref_to_lockfile(): | |
2779 | */ | |
2780 | update->backend_data = NULL; | |
2781 | strbuf_addf(err, | |
2782 | "cannot update ref '%s': %s", | |
2783 | update->refname, write_err); | |
2784 | free(write_err); | |
2785 | goto out; | |
2786 | } else { | |
2787 | update->flags |= REF_NEEDS_COMMIT; | |
2788 | } | |
2789 | } | |
2790 | } | |
2791 | if (!(update->flags & REF_NEEDS_COMMIT)) { | |
2792 | /* | |
2793 | * We didn't call write_ref_to_lockfile(), so | |
2794 | * the lockfile is still open. Close it to | |
2795 | * free up the file descriptor: | |
2796 | */ | |
2797 | if (close_ref_gently(lock)) { | |
2798 | strbuf_addf(err, "couldn't close '%s.lock'", | |
2799 | update->refname); | |
2800 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2801 | goto out; | |
2802 | } | |
2803 | } | |
2804 | ||
2805 | out: | |
2806 | strbuf_release(&referent); | |
2807 | return ret; | |
2808 | } | |
2809 | ||
2810 | /* | |
2811 | * Unlock any references in `transaction` that are still locked, and | |
2812 | * mark the transaction closed. | |
2813 | */ | |
2814 | static void files_transaction_cleanup(struct files_ref_store *refs, | |
2815 | struct ref_transaction *transaction) | |
2816 | { | |
2817 | size_t i; | |
2818 | struct files_transaction_backend_data *backend_data = | |
2819 | transaction->backend_data; | |
2820 | struct strbuf err = STRBUF_INIT; | |
2821 | ||
2822 | for (i = 0; i < transaction->nr; i++) { | |
2823 | struct ref_update *update = transaction->updates[i]; | |
2824 | struct ref_lock *lock = update->backend_data; | |
2825 | ||
2826 | if (lock) { | |
2827 | unlock_ref(lock); | |
2828 | try_remove_empty_parents(refs, update->refname, | |
2829 | REMOVE_EMPTY_PARENTS_REF); | |
2830 | update->backend_data = NULL; | |
2831 | } | |
2832 | } | |
2833 | ||
2834 | if (backend_data) { | |
2835 | if (backend_data->packed_transaction && | |
2836 | ref_transaction_abort(backend_data->packed_transaction, &err)) { | |
2837 | error("error aborting transaction: %s", err.buf); | |
2838 | strbuf_release(&err); | |
2839 | } | |
2840 | ||
2841 | if (backend_data->packed_refs_locked) | |
2842 | packed_refs_unlock(refs->packed_ref_store); | |
2843 | ||
2844 | strmap_clear(&backend_data->ref_locks, 0); | |
2845 | ||
2846 | free(backend_data); | |
2847 | } | |
2848 | ||
2849 | transaction->state = REF_TRANSACTION_CLOSED; | |
2850 | } | |
2851 | ||
2852 | static int files_transaction_prepare(struct ref_store *ref_store, | |
2853 | struct ref_transaction *transaction, | |
2854 | struct strbuf *err) | |
2855 | { | |
2856 | struct files_ref_store *refs = | |
2857 | files_downcast(ref_store, REF_STORE_WRITE, | |
2858 | "ref_transaction_prepare"); | |
2859 | size_t i; | |
2860 | int ret = 0; | |
2861 | struct string_list refnames_to_check = STRING_LIST_INIT_NODUP; | |
2862 | char *head_ref = NULL; | |
2863 | int head_type; | |
2864 | struct files_transaction_backend_data *backend_data; | |
2865 | struct ref_transaction *packed_transaction = NULL; | |
2866 | ||
2867 | assert(err); | |
2868 | ||
2869 | if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) | |
2870 | goto cleanup; | |
2871 | if (!transaction->nr) | |
2872 | goto cleanup; | |
2873 | ||
2874 | CALLOC_ARRAY(backend_data, 1); | |
2875 | strmap_init(&backend_data->ref_locks); | |
2876 | transaction->backend_data = backend_data; | |
2877 | ||
2878 | /* | |
2879 | * Fail if any of the updates use REF_IS_PRUNING without REF_NO_DEREF. | |
2880 | */ | |
2881 | for (i = 0; i < transaction->nr; i++) { | |
2882 | struct ref_update *update = transaction->updates[i]; | |
2883 | ||
2884 | if ((update->flags & REF_IS_PRUNING) && | |
2885 | !(update->flags & REF_NO_DEREF)) | |
2886 | BUG("REF_IS_PRUNING set without REF_NO_DEREF"); | |
2887 | } | |
2888 | ||
2889 | /* | |
2890 | * Special hack: If a branch is updated directly and HEAD | |
2891 | * points to it (may happen on the remote side of a push | |
2892 | * for example) then logically the HEAD reflog should be | |
2893 | * updated too. | |
2894 | * | |
2895 | * A generic solution would require reverse symref lookups, | |
2896 | * but finding all symrefs pointing to a given branch would be | |
2897 | * rather costly for this rare event (the direct update of a | |
2898 | * branch) to be worth it. So let's cheat and check with HEAD | |
2899 | * only, which should cover 99% of all usage scenarios (even | |
2900 | * 100% of the default ones). | |
2901 | * | |
2902 | * So if HEAD is a symbolic reference, then record the name of | |
2903 | * the reference that it points to. If we see an update of | |
2904 | * head_ref within the transaction, then split_head_update() | |
2905 | * arranges for the reflog of HEAD to be updated, too. | |
2906 | */ | |
2907 | head_ref = refs_resolve_refdup(ref_store, "HEAD", | |
2908 | RESOLVE_REF_NO_RECURSE, | |
2909 | NULL, &head_type); | |
2910 | ||
2911 | if (head_ref && !(head_type & REF_ISSYMREF)) { | |
2912 | FREE_AND_NULL(head_ref); | |
2913 | } | |
2914 | ||
2915 | /* | |
2916 | * Acquire all locks, verify old values if provided, check | |
2917 | * that new values are valid, and write new values to the | |
2918 | * lockfiles, ready to be activated. Only keep one lockfile | |
2919 | * open at a time to avoid running out of file descriptors. | |
2920 | * Note that lock_ref_for_update() might append more updates | |
2921 | * to the transaction. | |
2922 | */ | |
2923 | for (i = 0; i < transaction->nr; i++) { | |
2924 | struct ref_update *update = transaction->updates[i]; | |
2925 | ||
2926 | ret = lock_ref_for_update(refs, update, i, transaction, | |
2927 | head_ref, &refnames_to_check, | |
2928 | err); | |
2929 | if (ret) { | |
2930 | if (ref_transaction_maybe_set_rejected(transaction, i, ret)) { | |
2931 | strbuf_reset(err); | |
2932 | ret = 0; | |
2933 | ||
2934 | continue; | |
2935 | } | |
2936 | goto cleanup; | |
2937 | } | |
2938 | ||
2939 | if (update->flags & REF_DELETING && | |
2940 | !(update->flags & REF_LOG_ONLY) && | |
2941 | !(update->flags & REF_IS_PRUNING)) { | |
2942 | /* | |
2943 | * This reference has to be deleted from | |
2944 | * packed-refs if it exists there. | |
2945 | */ | |
2946 | if (!packed_transaction) { | |
2947 | packed_transaction = ref_store_transaction_begin( | |
2948 | refs->packed_ref_store, | |
2949 | transaction->flags, err); | |
2950 | if (!packed_transaction) { | |
2951 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2952 | goto cleanup; | |
2953 | } | |
2954 | ||
2955 | backend_data->packed_transaction = | |
2956 | packed_transaction; | |
2957 | } | |
2958 | ||
2959 | ref_transaction_add_update( | |
2960 | packed_transaction, update->refname, | |
2961 | REF_HAVE_NEW | REF_NO_DEREF, | |
2962 | &update->new_oid, NULL, | |
2963 | NULL, NULL, NULL, NULL); | |
2964 | } | |
2965 | } | |
2966 | ||
2967 | /* | |
2968 | * Verify that none of the loose reference that we're about to write | |
2969 | * conflict with any existing packed references. Ideally, we'd do this | |
2970 | * check after the packed-refs are locked so that the file cannot | |
2971 | * change underneath our feet. But introducing such a lock now would | |
2972 | * probably do more harm than good as users rely on there not being a | |
2973 | * global lock with the "files" backend. | |
2974 | * | |
2975 | * Another alternative would be to do the check after the (optional) | |
2976 | * lock, but that would extend the time we spend in the globally-locked | |
2977 | * state. | |
2978 | * | |
2979 | * So instead, we accept the race for now. | |
2980 | */ | |
2981 | if (refs_verify_refnames_available(refs->packed_ref_store, &refnames_to_check, | |
2982 | &transaction->refnames, NULL, transaction, | |
2983 | 0, err)) { | |
2984 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
2985 | goto cleanup; | |
2986 | } | |
2987 | ||
2988 | if (packed_transaction) { | |
2989 | if (packed_refs_lock(refs->packed_ref_store, 0, err)) { | |
2990 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
2991 | goto cleanup; | |
2992 | } | |
2993 | backend_data->packed_refs_locked = 1; | |
2994 | ||
2995 | if (is_packed_transaction_needed(refs->packed_ref_store, | |
2996 | packed_transaction)) { | |
2997 | ret = ref_transaction_prepare(packed_transaction, err); | |
2998 | /* | |
2999 | * A failure during the prepare step will abort | |
3000 | * itself, but not free. Do that now, and disconnect | |
3001 | * from the files_transaction so it does not try to | |
3002 | * abort us when we hit the cleanup code below. | |
3003 | */ | |
3004 | if (ret) { | |
3005 | ref_transaction_free(packed_transaction); | |
3006 | backend_data->packed_transaction = NULL; | |
3007 | } | |
3008 | } else { | |
3009 | /* | |
3010 | * We can skip rewriting the `packed-refs` | |
3011 | * file. But we do need to leave it locked, so | |
3012 | * that somebody else doesn't pack a reference | |
3013 | * that we are trying to delete. | |
3014 | * | |
3015 | * We need to disconnect our transaction from | |
3016 | * backend_data, since the abort (whether successful or | |
3017 | * not) will free it. | |
3018 | */ | |
3019 | backend_data->packed_transaction = NULL; | |
3020 | if (ref_transaction_abort(packed_transaction, err)) { | |
3021 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3022 | goto cleanup; | |
3023 | } | |
3024 | } | |
3025 | } | |
3026 | ||
3027 | cleanup: | |
3028 | free(head_ref); | |
3029 | string_list_clear(&refnames_to_check, 1); | |
3030 | ||
3031 | if (ret) | |
3032 | files_transaction_cleanup(refs, transaction); | |
3033 | else | |
3034 | transaction->state = REF_TRANSACTION_PREPARED; | |
3035 | ||
3036 | return ret; | |
3037 | } | |
3038 | ||
3039 | static int parse_and_write_reflog(struct files_ref_store *refs, | |
3040 | struct ref_update *update, | |
3041 | struct ref_lock *lock, | |
3042 | struct strbuf *err) | |
3043 | { | |
3044 | struct object_id *old_oid = &lock->old_oid; | |
3045 | ||
3046 | if (update->flags & REF_LOG_USE_PROVIDED_OIDS) { | |
3047 | if (!(update->flags & REF_HAVE_OLD) || | |
3048 | !(update->flags & REF_HAVE_NEW) || | |
3049 | !(update->flags & REF_LOG_ONLY)) { | |
3050 | strbuf_addf(err, _("trying to write reflog for '%s'" | |
3051 | "with incomplete values"), update->refname); | |
3052 | return REF_TRANSACTION_ERROR_GENERIC; | |
3053 | } | |
3054 | ||
3055 | old_oid = &update->old_oid; | |
3056 | } | |
3057 | ||
3058 | if (update->new_target) { | |
3059 | /* | |
3060 | * We want to get the resolved OID for the target, to ensure | |
3061 | * that the correct value is added to the reflog. | |
3062 | */ | |
3063 | if (!refs_resolve_ref_unsafe(&refs->base, update->new_target, | |
3064 | RESOLVE_REF_READING, | |
3065 | &update->new_oid, NULL)) { | |
3066 | /* | |
3067 | * TODO: currently we skip creating reflogs for dangling | |
3068 | * symref updates. It would be nice to capture this as | |
3069 | * zero oid updates however. | |
3070 | */ | |
3071 | return 0; | |
3072 | } | |
3073 | } | |
3074 | ||
3075 | if (files_log_ref_write(refs, lock->ref_name, old_oid, | |
3076 | &update->new_oid, update->committer_info, | |
3077 | update->msg, update->flags, err)) { | |
3078 | char *old_msg = strbuf_detach(err, NULL); | |
3079 | ||
3080 | strbuf_addf(err, "cannot update the ref '%s': %s", | |
3081 | lock->ref_name, old_msg); | |
3082 | free(old_msg); | |
3083 | unlock_ref(lock); | |
3084 | update->backend_data = NULL; | |
3085 | return -1; | |
3086 | } | |
3087 | ||
3088 | return 0; | |
3089 | } | |
3090 | ||
3091 | static int ref_present(const char *refname, const char *referent UNUSED, | |
3092 | const struct object_id *oid UNUSED, | |
3093 | int flags UNUSED, | |
3094 | void *cb_data) | |
3095 | { | |
3096 | struct string_list *affected_refnames = cb_data; | |
3097 | ||
3098 | return string_list_has_string(affected_refnames, refname); | |
3099 | } | |
3100 | ||
3101 | static int files_transaction_finish_initial(struct files_ref_store *refs, | |
3102 | struct ref_transaction *transaction, | |
3103 | struct strbuf *err) | |
3104 | { | |
3105 | size_t i; | |
3106 | int ret = 0; | |
3107 | struct string_list affected_refnames = STRING_LIST_INIT_NODUP; | |
3108 | struct string_list refnames_to_check = STRING_LIST_INIT_NODUP; | |
3109 | struct ref_transaction *packed_transaction = NULL; | |
3110 | struct ref_transaction *loose_transaction = NULL; | |
3111 | ||
3112 | assert(err); | |
3113 | ||
3114 | if (transaction->state != REF_TRANSACTION_PREPARED) | |
3115 | BUG("commit called for transaction that is not prepared"); | |
3116 | ||
3117 | /* | |
3118 | * It's really undefined to call this function in an active | |
3119 | * repository or when there are existing references: we are | |
3120 | * only locking and changing packed-refs, so (1) any | |
3121 | * simultaneous processes might try to change a reference at | |
3122 | * the same time we do, and (2) any existing loose versions of | |
3123 | * the references that we are setting would have precedence | |
3124 | * over our values. But some remote helpers create the remote | |
3125 | * "HEAD" and "master" branches before calling this function, | |
3126 | * so here we really only check that none of the references | |
3127 | * that we are creating already exists. | |
3128 | */ | |
3129 | if (refs_for_each_rawref(&refs->base, ref_present, | |
3130 | &transaction->refnames)) | |
3131 | BUG("initial ref transaction called with existing refs"); | |
3132 | ||
3133 | packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, | |
3134 | transaction->flags, err); | |
3135 | if (!packed_transaction) { | |
3136 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3137 | goto cleanup; | |
3138 | } | |
3139 | ||
3140 | for (i = 0; i < transaction->nr; i++) { | |
3141 | struct ref_update *update = transaction->updates[i]; | |
3142 | ||
3143 | if (!(update->flags & REF_LOG_ONLY) && | |
3144 | (update->flags & REF_HAVE_OLD) && | |
3145 | !is_null_oid(&update->old_oid)) | |
3146 | BUG("initial ref transaction with old_sha1 set"); | |
3147 | ||
3148 | string_list_append(&refnames_to_check, update->refname); | |
3149 | ||
3150 | /* | |
3151 | * packed-refs don't support symbolic refs, root refs and reflogs, | |
3152 | * so we have to queue these references via the loose transaction. | |
3153 | */ | |
3154 | if (update->new_target || | |
3155 | is_root_ref(update->refname) || | |
3156 | (update->flags & REF_LOG_ONLY)) { | |
3157 | if (!loose_transaction) { | |
3158 | loose_transaction = ref_store_transaction_begin(&refs->base, 0, err); | |
3159 | if (!loose_transaction) { | |
3160 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3161 | goto cleanup; | |
3162 | } | |
3163 | } | |
3164 | ||
3165 | if (update->flags & REF_LOG_ONLY) | |
3166 | ref_transaction_add_update(loose_transaction, update->refname, | |
3167 | update->flags, &update->new_oid, | |
3168 | &update->old_oid, NULL, NULL, | |
3169 | update->committer_info, update->msg); | |
3170 | else | |
3171 | ref_transaction_add_update(loose_transaction, update->refname, | |
3172 | update->flags & ~REF_HAVE_OLD, | |
3173 | update->new_target ? NULL : &update->new_oid, NULL, | |
3174 | update->new_target, NULL, update->committer_info, | |
3175 | NULL); | |
3176 | } else { | |
3177 | ref_transaction_add_update(packed_transaction, update->refname, | |
3178 | update->flags & ~REF_HAVE_OLD, | |
3179 | &update->new_oid, &update->old_oid, | |
3180 | NULL, NULL, update->committer_info, NULL); | |
3181 | } | |
3182 | } | |
3183 | ||
3184 | if (packed_refs_lock(refs->packed_ref_store, 0, err)) { | |
3185 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3186 | goto cleanup; | |
3187 | } | |
3188 | ||
3189 | if (refs_verify_refnames_available(&refs->base, &refnames_to_check, | |
3190 | &affected_refnames, NULL, transaction, | |
3191 | 1, err)) { | |
3192 | packed_refs_unlock(refs->packed_ref_store); | |
3193 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; | |
3194 | goto cleanup; | |
3195 | } | |
3196 | ||
3197 | if (ref_transaction_commit(packed_transaction, err)) { | |
3198 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3199 | goto cleanup; | |
3200 | } | |
3201 | packed_refs_unlock(refs->packed_ref_store); | |
3202 | ||
3203 | if (loose_transaction) { | |
3204 | if (ref_transaction_prepare(loose_transaction, err) || | |
3205 | ref_transaction_commit(loose_transaction, err)) { | |
3206 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3207 | goto cleanup; | |
3208 | } | |
3209 | } | |
3210 | ||
3211 | cleanup: | |
3212 | if (loose_transaction) | |
3213 | ref_transaction_free(loose_transaction); | |
3214 | if (packed_transaction) | |
3215 | ref_transaction_free(packed_transaction); | |
3216 | transaction->state = REF_TRANSACTION_CLOSED; | |
3217 | string_list_clear(&affected_refnames, 0); | |
3218 | string_list_clear(&refnames_to_check, 0); | |
3219 | return ret; | |
3220 | } | |
3221 | ||
3222 | static int files_transaction_finish(struct ref_store *ref_store, | |
3223 | struct ref_transaction *transaction, | |
3224 | struct strbuf *err) | |
3225 | { | |
3226 | struct files_ref_store *refs = | |
3227 | files_downcast(ref_store, 0, "ref_transaction_finish"); | |
3228 | size_t i; | |
3229 | int ret = 0; | |
3230 | struct strbuf sb = STRBUF_INIT; | |
3231 | struct files_transaction_backend_data *backend_data; | |
3232 | struct ref_transaction *packed_transaction; | |
3233 | ||
3234 | ||
3235 | assert(err); | |
3236 | ||
3237 | if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) | |
3238 | return files_transaction_finish_initial(refs, transaction, err); | |
3239 | if (!transaction->nr) { | |
3240 | transaction->state = REF_TRANSACTION_CLOSED; | |
3241 | return 0; | |
3242 | } | |
3243 | ||
3244 | backend_data = transaction->backend_data; | |
3245 | packed_transaction = backend_data->packed_transaction; | |
3246 | ||
3247 | /* Perform updates first so live commits remain referenced */ | |
3248 | for (i = 0; i < transaction->nr; i++) { | |
3249 | struct ref_update *update = transaction->updates[i]; | |
3250 | struct ref_lock *lock = update->backend_data; | |
3251 | ||
3252 | if (update->rejection_err) | |
3253 | continue; | |
3254 | ||
3255 | if (update->flags & REF_NEEDS_COMMIT || | |
3256 | update->flags & REF_LOG_ONLY) { | |
3257 | if (parse_and_write_reflog(refs, update, lock, err)) { | |
3258 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3259 | goto cleanup; | |
3260 | } | |
3261 | } | |
3262 | ||
3263 | /* | |
3264 | * We try creating a symlink, if that succeeds we continue to the | |
3265 | * next update. If not, we try and create a regular symref. | |
3266 | */ | |
3267 | if (update->new_target && refs->prefer_symlink_refs) | |
3268 | if (!create_ref_symlink(lock, update->new_target)) | |
3269 | continue; | |
3270 | ||
3271 | if (update->flags & REF_NEEDS_COMMIT) { | |
3272 | clear_loose_ref_cache(refs); | |
3273 | if (commit_ref(lock)) { | |
3274 | strbuf_addf(err, "couldn't set '%s'", lock->ref_name); | |
3275 | unlock_ref(lock); | |
3276 | update->backend_data = NULL; | |
3277 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3278 | goto cleanup; | |
3279 | } | |
3280 | } | |
3281 | } | |
3282 | ||
3283 | /* | |
3284 | * Now that updates are safely completed, we can perform | |
3285 | * deletes. First delete the reflogs of any references that | |
3286 | * will be deleted, since (in the unexpected event of an | |
3287 | * error) leaving a reference without a reflog is less bad | |
3288 | * than leaving a reflog without a reference (the latter is a | |
3289 | * mildly invalid repository state): | |
3290 | */ | |
3291 | for (i = 0; i < transaction->nr; i++) { | |
3292 | struct ref_update *update = transaction->updates[i]; | |
3293 | ||
3294 | if (update->rejection_err) | |
3295 | continue; | |
3296 | ||
3297 | if (update->flags & REF_DELETING && | |
3298 | !(update->flags & REF_LOG_ONLY) && | |
3299 | !(update->flags & REF_IS_PRUNING)) { | |
3300 | strbuf_reset(&sb); | |
3301 | files_reflog_path(refs, &sb, update->refname); | |
3302 | if (!unlink_or_warn(sb.buf)) | |
3303 | try_remove_empty_parents(refs, update->refname, | |
3304 | REMOVE_EMPTY_PARENTS_REFLOG); | |
3305 | } | |
3306 | } | |
3307 | ||
3308 | /* | |
3309 | * Perform deletes now that updates are safely completed. | |
3310 | * | |
3311 | * First delete any packed versions of the references, while | |
3312 | * retaining the packed-refs lock: | |
3313 | */ | |
3314 | if (packed_transaction) { | |
3315 | ret = ref_transaction_commit(packed_transaction, err); | |
3316 | ref_transaction_free(packed_transaction); | |
3317 | packed_transaction = NULL; | |
3318 | backend_data->packed_transaction = NULL; | |
3319 | if (ret) | |
3320 | goto cleanup; | |
3321 | } | |
3322 | ||
3323 | /* Now delete the loose versions of the references: */ | |
3324 | for (i = 0; i < transaction->nr; i++) { | |
3325 | struct ref_update *update = transaction->updates[i]; | |
3326 | struct ref_lock *lock = update->backend_data; | |
3327 | ||
3328 | if (update->rejection_err) | |
3329 | continue; | |
3330 | ||
3331 | if (update->flags & REF_DELETING && | |
3332 | !(update->flags & REF_LOG_ONLY)) { | |
3333 | update->flags |= REF_DELETED_RMDIR; | |
3334 | if (!(update->type & REF_ISPACKED) || | |
3335 | update->type & REF_ISSYMREF) { | |
3336 | /* It is a loose reference. */ | |
3337 | strbuf_reset(&sb); | |
3338 | files_ref_path(refs, &sb, lock->ref_name); | |
3339 | if (unlink_or_msg(sb.buf, err)) { | |
3340 | ret = REF_TRANSACTION_ERROR_GENERIC; | |
3341 | goto cleanup; | |
3342 | } | |
3343 | } | |
3344 | } | |
3345 | } | |
3346 | ||
3347 | clear_loose_ref_cache(refs); | |
3348 | ||
3349 | cleanup: | |
3350 | files_transaction_cleanup(refs, transaction); | |
3351 | ||
3352 | for (i = 0; i < transaction->nr; i++) { | |
3353 | struct ref_update *update = transaction->updates[i]; | |
3354 | ||
3355 | if (update->flags & REF_DELETED_RMDIR) { | |
3356 | /* | |
3357 | * The reference was deleted. Delete any | |
3358 | * empty parent directories. (Note that this | |
3359 | * can only work because we have already | |
3360 | * removed the lockfile.) | |
3361 | */ | |
3362 | try_remove_empty_parents(refs, update->refname, | |
3363 | REMOVE_EMPTY_PARENTS_REF); | |
3364 | } | |
3365 | } | |
3366 | ||
3367 | strbuf_release(&sb); | |
3368 | return ret; | |
3369 | } | |
3370 | ||
3371 | static int files_transaction_abort(struct ref_store *ref_store, | |
3372 | struct ref_transaction *transaction, | |
3373 | struct strbuf *err UNUSED) | |
3374 | { | |
3375 | struct files_ref_store *refs = | |
3376 | files_downcast(ref_store, 0, "ref_transaction_abort"); | |
3377 | ||
3378 | files_transaction_cleanup(refs, transaction); | |
3379 | return 0; | |
3380 | } | |
3381 | ||
3382 | struct expire_reflog_cb { | |
3383 | reflog_expiry_should_prune_fn *should_prune_fn; | |
3384 | void *policy_cb; | |
3385 | FILE *newlog; | |
3386 | struct object_id last_kept_oid; | |
3387 | unsigned int rewrite:1, | |
3388 | dry_run:1; | |
3389 | }; | |
3390 | ||
3391 | static int expire_reflog_ent(const char *refname UNUSED, | |
3392 | struct object_id *ooid, struct object_id *noid, | |
3393 | const char *email, timestamp_t timestamp, int tz, | |
3394 | const char *message, void *cb_data) | |
3395 | { | |
3396 | struct expire_reflog_cb *cb = cb_data; | |
3397 | reflog_expiry_should_prune_fn *fn = cb->should_prune_fn; | |
3398 | ||
3399 | if (cb->rewrite) | |
3400 | ooid = &cb->last_kept_oid; | |
3401 | ||
3402 | if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb)) | |
3403 | return 0; | |
3404 | ||
3405 | if (cb->dry_run) | |
3406 | return 0; /* --dry-run */ | |
3407 | ||
3408 | fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid), | |
3409 | oid_to_hex(noid), email, timestamp, tz, message); | |
3410 | oidcpy(&cb->last_kept_oid, noid); | |
3411 | ||
3412 | return 0; | |
3413 | } | |
3414 | ||
3415 | static int files_reflog_expire(struct ref_store *ref_store, | |
3416 | const char *refname, | |
3417 | unsigned int expire_flags, | |
3418 | reflog_expiry_prepare_fn prepare_fn, | |
3419 | reflog_expiry_should_prune_fn should_prune_fn, | |
3420 | reflog_expiry_cleanup_fn cleanup_fn, | |
3421 | void *policy_cb_data) | |
3422 | { | |
3423 | struct files_ref_store *refs = | |
3424 | files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire"); | |
3425 | struct lock_file reflog_lock = LOCK_INIT; | |
3426 | struct expire_reflog_cb cb; | |
3427 | struct ref_lock *lock; | |
3428 | struct strbuf log_file_sb = STRBUF_INIT; | |
3429 | char *log_file; | |
3430 | int status = 0; | |
3431 | struct strbuf err = STRBUF_INIT; | |
3432 | const struct object_id *oid; | |
3433 | ||
3434 | memset(&cb, 0, sizeof(cb)); | |
3435 | cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE); | |
3436 | cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN); | |
3437 | cb.policy_cb = policy_cb_data; | |
3438 | cb.should_prune_fn = should_prune_fn; | |
3439 | ||
3440 | /* | |
3441 | * The reflog file is locked by holding the lock on the | |
3442 | * reference itself, plus we might need to update the | |
3443 | * reference if --updateref was specified: | |
3444 | */ | |
3445 | lock = lock_ref_oid_basic(refs, refname, &err); | |
3446 | if (!lock) { | |
3447 | error("cannot lock ref '%s': %s", refname, err.buf); | |
3448 | strbuf_release(&err); | |
3449 | return -1; | |
3450 | } | |
3451 | oid = &lock->old_oid; | |
3452 | ||
3453 | /* | |
3454 | * When refs are deleted, their reflog is deleted before the | |
3455 | * ref itself is deleted. This is because there is no separate | |
3456 | * lock for reflog; instead we take a lock on the ref with | |
3457 | * lock_ref_oid_basic(). | |
3458 | * | |
3459 | * If a race happens and the reflog doesn't exist after we've | |
3460 | * acquired the lock that's OK. We've got nothing more to do; | |
3461 | * We were asked to delete the reflog, but someone else | |
3462 | * deleted it! The caller doesn't care that we deleted it, | |
3463 | * just that it is deleted. So we can return successfully. | |
3464 | */ | |
3465 | if (!refs_reflog_exists(ref_store, refname)) { | |
3466 | unlock_ref(lock); | |
3467 | return 0; | |
3468 | } | |
3469 | ||
3470 | files_reflog_path(refs, &log_file_sb, refname); | |
3471 | log_file = strbuf_detach(&log_file_sb, NULL); | |
3472 | if (!cb.dry_run) { | |
3473 | /* | |
3474 | * Even though holding $GIT_DIR/logs/$reflog.lock has | |
3475 | * no locking implications, we use the lock_file | |
3476 | * machinery here anyway because it does a lot of the | |
3477 | * work we need, including cleaning up if the program | |
3478 | * exits unexpectedly. | |
3479 | */ | |
3480 | if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) { | |
3481 | struct strbuf err = STRBUF_INIT; | |
3482 | unable_to_lock_message(log_file, errno, &err); | |
3483 | error("%s", err.buf); | |
3484 | strbuf_release(&err); | |
3485 | goto failure; | |
3486 | } | |
3487 | cb.newlog = fdopen_lock_file(&reflog_lock, "w"); | |
3488 | if (!cb.newlog) { | |
3489 | error("cannot fdopen %s (%s)", | |
3490 | get_lock_file_path(&reflog_lock), strerror(errno)); | |
3491 | goto failure; | |
3492 | } | |
3493 | } | |
3494 | ||
3495 | (*prepare_fn)(refname, oid, cb.policy_cb); | |
3496 | refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb); | |
3497 | (*cleanup_fn)(cb.policy_cb); | |
3498 | ||
3499 | if (!cb.dry_run) { | |
3500 | /* | |
3501 | * It doesn't make sense to adjust a reference pointed | |
3502 | * to by a symbolic ref based on expiring entries in | |
3503 | * the symbolic reference's reflog. Nor can we update | |
3504 | * a reference if there are no remaining reflog | |
3505 | * entries. | |
3506 | */ | |
3507 | int update = 0; | |
3508 | ||
3509 | if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) && | |
3510 | !is_null_oid(&cb.last_kept_oid)) { | |
3511 | int type; | |
3512 | const char *ref; | |
3513 | ||
3514 | ref = refs_resolve_ref_unsafe(&refs->base, refname, | |
3515 | RESOLVE_REF_NO_RECURSE, | |
3516 | NULL, &type); | |
3517 | update = !!(ref && !(type & REF_ISSYMREF)); | |
3518 | } | |
3519 | ||
3520 | if (close_lock_file_gently(&reflog_lock)) { | |
3521 | status |= error("couldn't write %s: %s", log_file, | |
3522 | strerror(errno)); | |
3523 | rollback_lock_file(&reflog_lock); | |
3524 | } else if (update && | |
3525 | (write_in_full(get_lock_file_fd(&lock->lk), | |
3526 | oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 || | |
3527 | write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 || | |
3528 | close_ref_gently(lock) < 0)) { | |
3529 | status |= error("couldn't write %s", | |
3530 | get_lock_file_path(&lock->lk)); | |
3531 | rollback_lock_file(&reflog_lock); | |
3532 | } else if (commit_lock_file(&reflog_lock)) { | |
3533 | status |= error("unable to write reflog '%s' (%s)", | |
3534 | log_file, strerror(errno)); | |
3535 | } else if (update && commit_ref(lock)) { | |
3536 | status |= error("couldn't set %s", lock->ref_name); | |
3537 | } | |
3538 | } | |
3539 | free(log_file); | |
3540 | unlock_ref(lock); | |
3541 | return status; | |
3542 | ||
3543 | failure: | |
3544 | rollback_lock_file(&reflog_lock); | |
3545 | free(log_file); | |
3546 | unlock_ref(lock); | |
3547 | return -1; | |
3548 | } | |
3549 | ||
3550 | static int files_ref_store_create_on_disk(struct ref_store *ref_store, | |
3551 | int flags, | |
3552 | struct strbuf *err UNUSED) | |
3553 | { | |
3554 | struct files_ref_store *refs = | |
3555 | files_downcast(ref_store, REF_STORE_WRITE, "create"); | |
3556 | struct strbuf sb = STRBUF_INIT; | |
3557 | ||
3558 | /* | |
3559 | * We need to create a "refs" dir in any case so that older versions of | |
3560 | * Git can tell that this is a repository. This serves two main purposes: | |
3561 | * | |
3562 | * - Clients will know to stop walking the parent-directory chain when | |
3563 | * detecting the Git repository. Otherwise they may end up detecting | |
3564 | * a Git repository in a parent directory instead. | |
3565 | * | |
3566 | * - Instead of failing to detect a repository with unknown reference | |
3567 | * format altogether, old clients will print an error saying that | |
3568 | * they do not understand the reference format extension. | |
3569 | */ | |
3570 | strbuf_addf(&sb, "%s/refs", ref_store->gitdir); | |
3571 | safe_create_dir(the_repository, sb.buf, 1); | |
3572 | adjust_shared_perm(the_repository, sb.buf); | |
3573 | ||
3574 | /* | |
3575 | * There is no need to create directories for common refs when creating | |
3576 | * a worktree ref store. | |
3577 | */ | |
3578 | if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) { | |
3579 | /* | |
3580 | * Create .git/refs/{heads,tags} | |
3581 | */ | |
3582 | strbuf_reset(&sb); | |
3583 | files_ref_path(refs, &sb, "refs/heads"); | |
3584 | safe_create_dir(the_repository, sb.buf, 1); | |
3585 | ||
3586 | strbuf_reset(&sb); | |
3587 | files_ref_path(refs, &sb, "refs/tags"); | |
3588 | safe_create_dir(the_repository, sb.buf, 1); | |
3589 | } | |
3590 | ||
3591 | strbuf_release(&sb); | |
3592 | return 0; | |
3593 | } | |
3594 | ||
3595 | struct remove_one_root_ref_data { | |
3596 | const char *gitdir; | |
3597 | struct strbuf *err; | |
3598 | }; | |
3599 | ||
3600 | static int remove_one_root_ref(const char *refname, | |
3601 | void *cb_data) | |
3602 | { | |
3603 | struct remove_one_root_ref_data *data = cb_data; | |
3604 | struct strbuf buf = STRBUF_INIT; | |
3605 | int ret = 0; | |
3606 | ||
3607 | strbuf_addf(&buf, "%s/%s", data->gitdir, refname); | |
3608 | ||
3609 | ret = unlink(buf.buf); | |
3610 | if (ret < 0) | |
3611 | strbuf_addf(data->err, "could not delete %s: %s\n", | |
3612 | refname, strerror(errno)); | |
3613 | ||
3614 | strbuf_release(&buf); | |
3615 | return ret; | |
3616 | } | |
3617 | ||
3618 | static int files_ref_store_remove_on_disk(struct ref_store *ref_store, | |
3619 | struct strbuf *err) | |
3620 | { | |
3621 | struct files_ref_store *refs = | |
3622 | files_downcast(ref_store, REF_STORE_WRITE, "remove"); | |
3623 | struct remove_one_root_ref_data data = { | |
3624 | .gitdir = refs->base.gitdir, | |
3625 | .err = err, | |
3626 | }; | |
3627 | struct strbuf sb = STRBUF_INIT; | |
3628 | int ret = 0; | |
3629 | ||
3630 | strbuf_addf(&sb, "%s/refs", refs->base.gitdir); | |
3631 | if (remove_dir_recursively(&sb, 0) < 0) { | |
3632 | strbuf_addf(err, "could not delete refs: %s", | |
3633 | strerror(errno)); | |
3634 | ret = -1; | |
3635 | } | |
3636 | strbuf_reset(&sb); | |
3637 | ||
3638 | strbuf_addf(&sb, "%s/logs", refs->base.gitdir); | |
3639 | if (remove_dir_recursively(&sb, 0) < 0) { | |
3640 | strbuf_addf(err, "could not delete logs: %s", | |
3641 | strerror(errno)); | |
3642 | ret = -1; | |
3643 | } | |
3644 | strbuf_reset(&sb); | |
3645 | ||
3646 | if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0) | |
3647 | ret = -1; | |
3648 | ||
3649 | if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0) | |
3650 | ret = -1; | |
3651 | ||
3652 | strbuf_release(&sb); | |
3653 | return ret; | |
3654 | } | |
3655 | ||
3656 | /* | |
3657 | * For refs and reflogs, they share a unified interface when scanning | |
3658 | * the whole directory. This function is used as the callback for each | |
3659 | * regular file or symlink in the directory. | |
3660 | */ | |
3661 | typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store, | |
3662 | struct fsck_options *o, | |
3663 | const char *refname, | |
3664 | struct dir_iterator *iter); | |
3665 | ||
3666 | static int files_fsck_symref_target(struct fsck_options *o, | |
3667 | struct fsck_ref_report *report, | |
3668 | struct strbuf *referent, | |
3669 | unsigned int symbolic_link) | |
3670 | { | |
3671 | int is_referent_root; | |
3672 | char orig_last_byte; | |
3673 | size_t orig_len; | |
3674 | int ret = 0; | |
3675 | ||
3676 | orig_len = referent->len; | |
3677 | orig_last_byte = referent->buf[orig_len - 1]; | |
3678 | if (!symbolic_link) | |
3679 | strbuf_rtrim(referent); | |
3680 | ||
3681 | is_referent_root = is_root_ref(referent->buf); | |
3682 | if (!is_referent_root && | |
3683 | !starts_with(referent->buf, "refs/") && | |
3684 | !starts_with(referent->buf, "worktrees/")) { | |
3685 | ret = fsck_report_ref(o, report, | |
3686 | FSCK_MSG_SYMREF_TARGET_IS_NOT_A_REF, | |
3687 | "points to non-ref target '%s'", referent->buf); | |
3688 | ||
3689 | } | |
3690 | ||
3691 | if (!is_referent_root && check_refname_format(referent->buf, 0)) { | |
3692 | ret = fsck_report_ref(o, report, | |
3693 | FSCK_MSG_BAD_REFERENT_NAME, | |
3694 | "points to invalid refname '%s'", referent->buf); | |
3695 | goto out; | |
3696 | } | |
3697 | ||
3698 | if (symbolic_link) | |
3699 | goto out; | |
3700 | ||
3701 | if (referent->len == orig_len || | |
3702 | (referent->len < orig_len && orig_last_byte != '\n')) { | |
3703 | ret = fsck_report_ref(o, report, | |
3704 | FSCK_MSG_REF_MISSING_NEWLINE, | |
3705 | "misses LF at the end"); | |
3706 | } | |
3707 | ||
3708 | if (referent->len != orig_len && referent->len != orig_len - 1) { | |
3709 | ret = fsck_report_ref(o, report, | |
3710 | FSCK_MSG_TRAILING_REF_CONTENT, | |
3711 | "has trailing whitespaces or newlines"); | |
3712 | } | |
3713 | ||
3714 | out: | |
3715 | return ret; | |
3716 | } | |
3717 | ||
3718 | static int files_fsck_refs_content(struct ref_store *ref_store, | |
3719 | struct fsck_options *o, | |
3720 | const char *target_name, | |
3721 | struct dir_iterator *iter) | |
3722 | { | |
3723 | struct strbuf ref_content = STRBUF_INIT; | |
3724 | struct strbuf abs_gitdir = STRBUF_INIT; | |
3725 | struct strbuf referent = STRBUF_INIT; | |
3726 | struct fsck_ref_report report = { 0 }; | |
3727 | const char *trailing = NULL; | |
3728 | unsigned int type = 0; | |
3729 | int failure_errno = 0; | |
3730 | struct object_id oid; | |
3731 | int ret = 0; | |
3732 | ||
3733 | report.path = target_name; | |
3734 | ||
3735 | if (S_ISLNK(iter->st.st_mode)) { | |
3736 | const char *relative_referent_path = NULL; | |
3737 | ||
3738 | ret = fsck_report_ref(o, &report, | |
3739 | FSCK_MSG_SYMLINK_REF, | |
3740 | "use deprecated symbolic link for symref"); | |
3741 | ||
3742 | strbuf_add_absolute_path(&abs_gitdir, ref_store->repo->gitdir); | |
3743 | strbuf_normalize_path(&abs_gitdir); | |
3744 | if (!is_dir_sep(abs_gitdir.buf[abs_gitdir.len - 1])) | |
3745 | strbuf_addch(&abs_gitdir, '/'); | |
3746 | ||
3747 | strbuf_add_real_path(&ref_content, iter->path.buf); | |
3748 | skip_prefix(ref_content.buf, abs_gitdir.buf, | |
3749 | &relative_referent_path); | |
3750 | ||
3751 | if (relative_referent_path) | |
3752 | strbuf_addstr(&referent, relative_referent_path); | |
3753 | else | |
3754 | strbuf_addbuf(&referent, &ref_content); | |
3755 | ||
3756 | ret |= files_fsck_symref_target(o, &report, &referent, 1); | |
3757 | goto cleanup; | |
3758 | } | |
3759 | ||
3760 | if (strbuf_read_file(&ref_content, iter->path.buf, 0) < 0) { | |
3761 | /* | |
3762 | * Ref file could be removed by another concurrent process. We should | |
3763 | * ignore this error and continue to the next ref. | |
3764 | */ | |
3765 | if (errno == ENOENT) | |
3766 | goto cleanup; | |
3767 | ||
3768 | ret = error_errno(_("cannot read ref file '%s'"), iter->path.buf); | |
3769 | goto cleanup; | |
3770 | } | |
3771 | ||
3772 | if (parse_loose_ref_contents(ref_store->repo->hash_algo, | |
3773 | ref_content.buf, &oid, &referent, | |
3774 | &type, &trailing, &failure_errno)) { | |
3775 | strbuf_rtrim(&ref_content); | |
3776 | ret = fsck_report_ref(o, &report, | |
3777 | FSCK_MSG_BAD_REF_CONTENT, | |
3778 | "%s", ref_content.buf); | |
3779 | goto cleanup; | |
3780 | } | |
3781 | ||
3782 | if (!(type & REF_ISSYMREF)) { | |
3783 | if (!*trailing) { | |
3784 | ret = fsck_report_ref(o, &report, | |
3785 | FSCK_MSG_REF_MISSING_NEWLINE, | |
3786 | "misses LF at the end"); | |
3787 | goto cleanup; | |
3788 | } | |
3789 | if (*trailing != '\n' || *(trailing + 1)) { | |
3790 | ret = fsck_report_ref(o, &report, | |
3791 | FSCK_MSG_TRAILING_REF_CONTENT, | |
3792 | "has trailing garbage: '%s'", trailing); | |
3793 | goto cleanup; | |
3794 | } | |
3795 | } else { | |
3796 | ret = files_fsck_symref_target(o, &report, &referent, 0); | |
3797 | goto cleanup; | |
3798 | } | |
3799 | ||
3800 | cleanup: | |
3801 | strbuf_release(&ref_content); | |
3802 | strbuf_release(&referent); | |
3803 | strbuf_release(&abs_gitdir); | |
3804 | return ret; | |
3805 | } | |
3806 | ||
3807 | static int files_fsck_refs_name(struct ref_store *ref_store UNUSED, | |
3808 | struct fsck_options *o, | |
3809 | const char *refname, | |
3810 | struct dir_iterator *iter) | |
3811 | { | |
3812 | struct strbuf sb = STRBUF_INIT; | |
3813 | int ret = 0; | |
3814 | ||
3815 | /* | |
3816 | * Ignore the files ending with ".lock" as they may be lock files | |
3817 | * However, do not allow bare ".lock" files. | |
3818 | */ | |
3819 | if (iter->basename[0] != '.' && ends_with(iter->basename, ".lock")) | |
3820 | goto cleanup; | |
3821 | ||
3822 | /* | |
3823 | * This works right now because we never check the root refs. | |
3824 | */ | |
3825 | if (check_refname_format(refname, 0)) { | |
3826 | struct fsck_ref_report report = { 0 }; | |
3827 | ||
3828 | report.path = refname; | |
3829 | ret = fsck_report_ref(o, &report, | |
3830 | FSCK_MSG_BAD_REF_NAME, | |
3831 | "invalid refname format"); | |
3832 | } | |
3833 | ||
3834 | cleanup: | |
3835 | strbuf_release(&sb); | |
3836 | return ret; | |
3837 | } | |
3838 | ||
3839 | static int files_fsck_refs_dir(struct ref_store *ref_store, | |
3840 | struct fsck_options *o, | |
3841 | const char *refs_check_dir, | |
3842 | struct worktree *wt, | |
3843 | files_fsck_refs_fn *fsck_refs_fn) | |
3844 | { | |
3845 | struct strbuf refname = STRBUF_INIT; | |
3846 | struct strbuf sb = STRBUF_INIT; | |
3847 | struct dir_iterator *iter; | |
3848 | int iter_status; | |
3849 | int ret = 0; | |
3850 | ||
3851 | strbuf_addf(&sb, "%s/%s", ref_store->gitdir, refs_check_dir); | |
3852 | ||
3853 | iter = dir_iterator_begin(sb.buf, 0); | |
3854 | if (!iter) { | |
3855 | if (errno == ENOENT && !is_main_worktree(wt)) | |
3856 | goto out; | |
3857 | ||
3858 | ret = error_errno(_("cannot open directory %s"), sb.buf); | |
3859 | goto out; | |
3860 | } | |
3861 | ||
3862 | while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) { | |
3863 | if (S_ISDIR(iter->st.st_mode)) { | |
3864 | continue; | |
3865 | } else if (S_ISREG(iter->st.st_mode) || | |
3866 | S_ISLNK(iter->st.st_mode)) { | |
3867 | strbuf_reset(&refname); | |
3868 | ||
3869 | if (!is_main_worktree(wt)) | |
3870 | strbuf_addf(&refname, "worktrees/%s/", wt->id); | |
3871 | strbuf_addf(&refname, "%s/%s", refs_check_dir, | |
3872 | iter->relative_path); | |
3873 | ||
3874 | if (o->verbose) | |
3875 | fprintf_ln(stderr, "Checking %s", refname.buf); | |
3876 | ||
3877 | for (size_t i = 0; fsck_refs_fn[i]; i++) { | |
3878 | if (fsck_refs_fn[i](ref_store, o, refname.buf, iter)) | |
3879 | ret = -1; | |
3880 | } | |
3881 | } else { | |
3882 | struct fsck_ref_report report = { .path = iter->basename }; | |
3883 | if (fsck_report_ref(o, &report, | |
3884 | FSCK_MSG_BAD_REF_FILETYPE, | |
3885 | "unexpected file type")) | |
3886 | ret = -1; | |
3887 | } | |
3888 | } | |
3889 | ||
3890 | if (iter_status != ITER_DONE) | |
3891 | ret = error(_("failed to iterate over '%s'"), sb.buf); | |
3892 | ||
3893 | out: | |
3894 | dir_iterator_free(iter); | |
3895 | strbuf_release(&sb); | |
3896 | strbuf_release(&refname); | |
3897 | return ret; | |
3898 | } | |
3899 | ||
3900 | static int files_fsck_refs(struct ref_store *ref_store, | |
3901 | struct fsck_options *o, | |
3902 | struct worktree *wt) | |
3903 | { | |
3904 | files_fsck_refs_fn fsck_refs_fn[]= { | |
3905 | files_fsck_refs_name, | |
3906 | files_fsck_refs_content, | |
3907 | NULL, | |
3908 | }; | |
3909 | ||
3910 | if (o->verbose) | |
3911 | fprintf_ln(stderr, _("Checking references consistency")); | |
3912 | return files_fsck_refs_dir(ref_store, o, "refs", wt, fsck_refs_fn); | |
3913 | } | |
3914 | ||
3915 | static int files_fsck(struct ref_store *ref_store, | |
3916 | struct fsck_options *o, | |
3917 | struct worktree *wt) | |
3918 | { | |
3919 | struct files_ref_store *refs = | |
3920 | files_downcast(ref_store, REF_STORE_READ, "fsck"); | |
3921 | ||
3922 | return files_fsck_refs(ref_store, o, wt) | | |
3923 | refs->packed_ref_store->be->fsck(refs->packed_ref_store, o, wt); | |
3924 | } | |
3925 | ||
3926 | struct ref_storage_be refs_be_files = { | |
3927 | .name = "files", | |
3928 | .init = files_ref_store_init, | |
3929 | .release = files_ref_store_release, | |
3930 | .create_on_disk = files_ref_store_create_on_disk, | |
3931 | .remove_on_disk = files_ref_store_remove_on_disk, | |
3932 | ||
3933 | .transaction_prepare = files_transaction_prepare, | |
3934 | .transaction_finish = files_transaction_finish, | |
3935 | .transaction_abort = files_transaction_abort, | |
3936 | ||
3937 | .pack_refs = files_pack_refs, | |
3938 | .rename_ref = files_rename_ref, | |
3939 | .copy_ref = files_copy_ref, | |
3940 | ||
3941 | .iterator_begin = files_ref_iterator_begin, | |
3942 | .read_raw_ref = files_read_raw_ref, | |
3943 | .read_symbolic_ref = files_read_symbolic_ref, | |
3944 | ||
3945 | .reflog_iterator_begin = files_reflog_iterator_begin, | |
3946 | .for_each_reflog_ent = files_for_each_reflog_ent, | |
3947 | .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse, | |
3948 | .reflog_exists = files_reflog_exists, | |
3949 | .create_reflog = files_create_reflog, | |
3950 | .delete_reflog = files_delete_reflog, | |
3951 | .reflog_expire = files_reflog_expire, | |
3952 | ||
3953 | .fsck = files_fsck, | |
3954 | }; |