]> git.ipfire.org Git - thirdparty/git.git/blob - builtin-read-tree.c
Merge branch 'jc/fetchupload' into next
[thirdparty/git.git] / builtin-read-tree.c
1 /*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6 #define DBRT_DEBUG 1
7
8 #include "cache.h"
9
10 #include "object.h"
11 #include "tree.h"
12 #include "cache-tree.h"
13 #include <sys/time.h>
14 #include <signal.h>
15 #include "builtin.h"
16
17 static int reset = 0;
18 static int merge = 0;
19 static int update = 0;
20 static int index_only = 0;
21 static int nontrivial_merge = 0;
22 static int trivial_merges_only = 0;
23 static int aggressive = 0;
24 static int verbose_update = 0;
25 static volatile int progress_update = 0;
26 static const char *prefix = NULL;
27
28 static int head_idx = -1;
29 static int merge_size = 0;
30
31 static struct object_list *trees = NULL;
32
33 static struct cache_entry df_conflict_entry = {
34 };
35
36 static struct tree_entry_list df_conflict_list = {
37 .name = NULL,
38 .next = &df_conflict_list
39 };
40
41 typedef int (*merge_fn_t)(struct cache_entry **src);
42
43 static int entcmp(char *name1, int dir1, char *name2, int dir2)
44 {
45 int len1 = strlen(name1);
46 int len2 = strlen(name2);
47 int len = len1 < len2 ? len1 : len2;
48 int ret = memcmp(name1, name2, len);
49 unsigned char c1, c2;
50 if (ret)
51 return ret;
52 c1 = name1[len];
53 c2 = name2[len];
54 if (!c1 && dir1)
55 c1 = '/';
56 if (!c2 && dir2)
57 c2 = '/';
58 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
59 if (c1 && c2 && !ret)
60 ret = len1 - len2;
61 return ret;
62 }
63
64 static int unpack_trees_rec(struct tree_entry_list **posns, int len,
65 const char *base, merge_fn_t fn, int *indpos)
66 {
67 int baselen = strlen(base);
68 int src_size = len + 1;
69 do {
70 int i;
71 char *first;
72 int firstdir = 0;
73 int pathlen;
74 unsigned ce_size;
75 struct tree_entry_list **subposns;
76 struct cache_entry **src;
77 int any_files = 0;
78 int any_dirs = 0;
79 char *cache_name;
80 int ce_stage;
81
82 /* Find the first name in the input. */
83
84 first = NULL;
85 cache_name = NULL;
86
87 /* Check the cache */
88 if (merge && *indpos < active_nr) {
89 /* This is a bit tricky: */
90 /* If the index has a subdirectory (with
91 * contents) as the first name, it'll get a
92 * filename like "foo/bar". But that's after
93 * "foo", so the entry in trees will get
94 * handled first, at which point we'll go into
95 * "foo", and deal with "bar" from the index,
96 * because the base will be "foo/". The only
97 * way we can actually have "foo/bar" first of
98 * all the things is if the trees don't
99 * contain "foo" at all, in which case we'll
100 * handle "foo/bar" without going into the
101 * directory, but that's fine (and will return
102 * an error anyway, with the added unknown
103 * file case.
104 */
105
106 cache_name = active_cache[*indpos]->name;
107 if (strlen(cache_name) > baselen &&
108 !memcmp(cache_name, base, baselen)) {
109 cache_name += baselen;
110 first = cache_name;
111 } else {
112 cache_name = NULL;
113 }
114 }
115
116 #if DBRT_DEBUG > 1
117 if (first)
118 printf("index %s\n", first);
119 #endif
120 for (i = 0; i < len; i++) {
121 if (!posns[i] || posns[i] == &df_conflict_list)
122 continue;
123 #if DBRT_DEBUG > 1
124 printf("%d %s\n", i + 1, posns[i]->name);
125 #endif
126 if (!first || entcmp(first, firstdir,
127 posns[i]->name,
128 posns[i]->directory) > 0) {
129 first = posns[i]->name;
130 firstdir = posns[i]->directory;
131 }
132 }
133 /* No name means we're done */
134 if (!first)
135 return 0;
136
137 pathlen = strlen(first);
138 ce_size = cache_entry_size(baselen + pathlen);
139
140 src = xcalloc(src_size, sizeof(struct cache_entry *));
141
142 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
143
144 if (cache_name && !strcmp(cache_name, first)) {
145 any_files = 1;
146 src[0] = active_cache[*indpos];
147 remove_cache_entry_at(*indpos);
148 }
149
150 for (i = 0; i < len; i++) {
151 struct cache_entry *ce;
152
153 if (!posns[i] ||
154 (posns[i] != &df_conflict_list &&
155 strcmp(first, posns[i]->name))) {
156 continue;
157 }
158
159 if (posns[i] == &df_conflict_list) {
160 src[i + merge] = &df_conflict_entry;
161 continue;
162 }
163
164 if (posns[i]->directory) {
165 any_dirs = 1;
166 parse_tree(posns[i]->item.tree);
167 subposns[i] = posns[i]->item.tree->entries;
168 posns[i] = posns[i]->next;
169 src[i + merge] = &df_conflict_entry;
170 continue;
171 }
172
173 if (!merge)
174 ce_stage = 0;
175 else if (i + 1 < head_idx)
176 ce_stage = 1;
177 else if (i + 1 > head_idx)
178 ce_stage = 3;
179 else
180 ce_stage = 2;
181
182 ce = xcalloc(1, ce_size);
183 ce->ce_mode = create_ce_mode(posns[i]->mode);
184 ce->ce_flags = create_ce_flags(baselen + pathlen,
185 ce_stage);
186 memcpy(ce->name, base, baselen);
187 memcpy(ce->name + baselen, first, pathlen + 1);
188
189 any_files = 1;
190
191 memcpy(ce->sha1, posns[i]->item.any->sha1, 20);
192 src[i + merge] = ce;
193 subposns[i] = &df_conflict_list;
194 posns[i] = posns[i]->next;
195 }
196 if (any_files) {
197 if (merge) {
198 int ret;
199
200 #if DBRT_DEBUG > 1
201 printf("%s:\n", first);
202 for (i = 0; i < src_size; i++) {
203 printf(" %d ", i);
204 if (src[i])
205 printf("%s\n", sha1_to_hex(src[i]->sha1));
206 else
207 printf("\n");
208 }
209 #endif
210 ret = fn(src);
211
212 #if DBRT_DEBUG > 1
213 printf("Added %d entries\n", ret);
214 #endif
215 *indpos += ret;
216 } else {
217 for (i = 0; i < src_size; i++) {
218 if (src[i]) {
219 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
220 }
221 }
222 }
223 }
224 if (any_dirs) {
225 char *newbase = xmalloc(baselen + 2 + pathlen);
226 memcpy(newbase, base, baselen);
227 memcpy(newbase + baselen, first, pathlen);
228 newbase[baselen + pathlen] = '/';
229 newbase[baselen + pathlen + 1] = '\0';
230 if (unpack_trees_rec(subposns, len, newbase, fn,
231 indpos))
232 return -1;
233 free(newbase);
234 }
235 free(subposns);
236 free(src);
237 } while (1);
238 }
239
240 static void reject_merge(struct cache_entry *ce)
241 {
242 die("Entry '%s' would be overwritten by merge. Cannot merge.",
243 ce->name);
244 }
245
246 /* Unlink the last component and attempt to remove leading
247 * directories, in case this unlink is the removal of the
248 * last entry in the directory -- empty directories are removed.
249 */
250 static void unlink_entry(char *name)
251 {
252 char *cp, *prev;
253
254 if (unlink(name))
255 return;
256 prev = NULL;
257 while (1) {
258 int status;
259 cp = strrchr(name, '/');
260 if (prev)
261 *prev = '/';
262 if (!cp)
263 break;
264
265 *cp = 0;
266 status = rmdir(name);
267 if (status) {
268 *cp = '/';
269 break;
270 }
271 prev = cp;
272 }
273 }
274
275 static void progress_interval(int signum)
276 {
277 progress_update = 1;
278 }
279
280 static void setup_progress_signal(void)
281 {
282 struct sigaction sa;
283 struct itimerval v;
284
285 memset(&sa, 0, sizeof(sa));
286 sa.sa_handler = progress_interval;
287 sigemptyset(&sa.sa_mask);
288 sa.sa_flags = SA_RESTART;
289 sigaction(SIGALRM, &sa, NULL);
290
291 v.it_interval.tv_sec = 1;
292 v.it_interval.tv_usec = 0;
293 v.it_value = v.it_interval;
294 setitimer(ITIMER_REAL, &v, NULL);
295 }
296
297 static void check_updates(struct cache_entry **src, int nr)
298 {
299 static struct checkout state = {
300 .base_dir = "",
301 .force = 1,
302 .quiet = 1,
303 .refresh_cache = 1,
304 };
305 unsigned short mask = htons(CE_UPDATE);
306 unsigned last_percent = 200, cnt = 0, total = 0;
307
308 if (update && verbose_update) {
309 for (total = cnt = 0; cnt < nr; cnt++) {
310 struct cache_entry *ce = src[cnt];
311 if (!ce->ce_mode || ce->ce_flags & mask)
312 total++;
313 }
314
315 /* Don't bother doing this for very small updates */
316 if (total < 250)
317 total = 0;
318
319 if (total) {
320 fprintf(stderr, "Checking files out...\n");
321 setup_progress_signal();
322 progress_update = 1;
323 }
324 cnt = 0;
325 }
326
327 while (nr--) {
328 struct cache_entry *ce = *src++;
329
330 if (total) {
331 if (!ce->ce_mode || ce->ce_flags & mask) {
332 unsigned percent;
333 cnt++;
334 percent = (cnt * 100) / total;
335 if (percent != last_percent ||
336 progress_update) {
337 fprintf(stderr, "%4u%% (%u/%u) done\r",
338 percent, cnt, total);
339 last_percent = percent;
340 }
341 }
342 }
343 if (!ce->ce_mode) {
344 if (update)
345 unlink_entry(ce->name);
346 continue;
347 }
348 if (ce->ce_flags & mask) {
349 ce->ce_flags &= ~mask;
350 if (update)
351 checkout_entry(ce, &state, NULL);
352 }
353 }
354 if (total) {
355 signal(SIGALRM, SIG_IGN);
356 fputc('\n', stderr);
357 }
358 }
359
360 static int unpack_trees(merge_fn_t fn)
361 {
362 int indpos = 0;
363 unsigned len = object_list_length(trees);
364 struct tree_entry_list **posns;
365 int i;
366 struct object_list *posn = trees;
367 merge_size = len;
368
369 if (len) {
370 posns = xmalloc(len * sizeof(struct tree_entry_list *));
371 for (i = 0; i < len; i++) {
372 posns[i] = ((struct tree *) posn->item)->entries;
373 posn = posn->next;
374 }
375 if (unpack_trees_rec(posns, len, prefix ? prefix : "",
376 fn, &indpos))
377 return -1;
378 }
379
380 if (trivial_merges_only && nontrivial_merge)
381 die("Merge requires file-level merging");
382
383 check_updates(active_cache, active_nr);
384 return 0;
385 }
386
387 static int list_tree(unsigned char *sha1)
388 {
389 struct tree *tree = parse_tree_indirect(sha1);
390 if (!tree)
391 return -1;
392 object_list_append(&tree->object, &trees);
393 return 0;
394 }
395
396 static int same(struct cache_entry *a, struct cache_entry *b)
397 {
398 if (!!a != !!b)
399 return 0;
400 if (!a && !b)
401 return 1;
402 return a->ce_mode == b->ce_mode &&
403 !memcmp(a->sha1, b->sha1, 20);
404 }
405
406
407 /*
408 * When a CE gets turned into an unmerged entry, we
409 * want it to be up-to-date
410 */
411 static void verify_uptodate(struct cache_entry *ce)
412 {
413 struct stat st;
414
415 if (index_only || reset)
416 return;
417
418 if (!lstat(ce->name, &st)) {
419 unsigned changed = ce_match_stat(ce, &st, 1);
420 if (!changed)
421 return;
422 errno = 0;
423 }
424 if (reset) {
425 ce->ce_flags |= htons(CE_UPDATE);
426 return;
427 }
428 if (errno == ENOENT)
429 return;
430 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
431 }
432
433 static void invalidate_ce_path(struct cache_entry *ce)
434 {
435 if (ce)
436 cache_tree_invalidate_path(active_cache_tree, ce->name);
437 }
438
439 /*
440 * We do not want to remove or overwrite a working tree file that
441 * is not tracked.
442 */
443 static void verify_absent(const char *path, const char *action)
444 {
445 struct stat st;
446
447 if (index_only || reset || !update)
448 return;
449 if (!lstat(path, &st))
450 die("Untracked working tree file '%s' "
451 "would be %s by merge.", path, action);
452 }
453
454 static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
455 {
456 merge->ce_flags |= htons(CE_UPDATE);
457 if (old) {
458 /*
459 * See if we can re-use the old CE directly?
460 * That way we get the uptodate stat info.
461 *
462 * This also removes the UPDATE flag on
463 * a match.
464 */
465 if (same(old, merge)) {
466 *merge = *old;
467 } else {
468 verify_uptodate(old);
469 invalidate_ce_path(old);
470 }
471 }
472 else {
473 verify_absent(merge->name, "overwritten");
474 invalidate_ce_path(merge);
475 }
476
477 merge->ce_flags &= ~htons(CE_STAGEMASK);
478 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD);
479 return 1;
480 }
481
482 static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
483 {
484 if (old)
485 verify_uptodate(old);
486 else
487 verify_absent(ce->name, "removed");
488 ce->ce_mode = 0;
489 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
490 invalidate_ce_path(ce);
491 return 1;
492 }
493
494 static int keep_entry(struct cache_entry *ce)
495 {
496 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
497 return 1;
498 }
499
500 #if DBRT_DEBUG
501 static void show_stage_entry(FILE *o,
502 const char *label, const struct cache_entry *ce)
503 {
504 if (!ce)
505 fprintf(o, "%s (missing)\n", label);
506 else
507 fprintf(o, "%s%06o %s %d\t%s\n",
508 label,
509 ntohl(ce->ce_mode),
510 sha1_to_hex(ce->sha1),
511 ce_stage(ce),
512 ce->name);
513 }
514 #endif
515
516 static int threeway_merge(struct cache_entry **stages)
517 {
518 struct cache_entry *index;
519 struct cache_entry *head;
520 struct cache_entry *remote = stages[head_idx + 1];
521 int count;
522 int head_match = 0;
523 int remote_match = 0;
524 const char *path = NULL;
525
526 int df_conflict_head = 0;
527 int df_conflict_remote = 0;
528
529 int any_anc_missing = 0;
530 int no_anc_exists = 1;
531 int i;
532
533 for (i = 1; i < head_idx; i++) {
534 if (!stages[i])
535 any_anc_missing = 1;
536 else {
537 if (!path)
538 path = stages[i]->name;
539 no_anc_exists = 0;
540 }
541 }
542
543 index = stages[0];
544 head = stages[head_idx];
545
546 if (head == &df_conflict_entry) {
547 df_conflict_head = 1;
548 head = NULL;
549 }
550
551 if (remote == &df_conflict_entry) {
552 df_conflict_remote = 1;
553 remote = NULL;
554 }
555
556 if (!path && index)
557 path = index->name;
558 if (!path && head)
559 path = head->name;
560 if (!path && remote)
561 path = remote->name;
562
563 /* First, if there's a #16 situation, note that to prevent #13
564 * and #14.
565 */
566 if (!same(remote, head)) {
567 for (i = 1; i < head_idx; i++) {
568 if (same(stages[i], head)) {
569 head_match = i;
570 }
571 if (same(stages[i], remote)) {
572 remote_match = i;
573 }
574 }
575 }
576
577 /* We start with cases where the index is allowed to match
578 * something other than the head: #14(ALT) and #2ALT, where it
579 * is permitted to match the result instead.
580 */
581 /* #14, #14ALT, #2ALT */
582 if (remote && !df_conflict_head && head_match && !remote_match) {
583 if (index && !same(index, remote) && !same(index, head))
584 reject_merge(index);
585 return merged_entry(remote, index);
586 }
587 /*
588 * If we have an entry in the index cache, then we want to
589 * make sure that it matches head.
590 */
591 if (index && !same(index, head)) {
592 reject_merge(index);
593 }
594
595 if (head) {
596 /* #5ALT, #15 */
597 if (same(head, remote))
598 return merged_entry(head, index);
599 /* #13, #3ALT */
600 if (!df_conflict_remote && remote_match && !head_match)
601 return merged_entry(head, index);
602 }
603
604 /* #1 */
605 if (!head && !remote && any_anc_missing)
606 return 0;
607
608 /* Under the new "aggressive" rule, we resolve mostly trivial
609 * cases that we historically had git-merge-one-file resolve.
610 */
611 if (aggressive) {
612 int head_deleted = !head && !df_conflict_head;
613 int remote_deleted = !remote && !df_conflict_remote;
614 /*
615 * Deleted in both.
616 * Deleted in one and unchanged in the other.
617 */
618 if ((head_deleted && remote_deleted) ||
619 (head_deleted && remote && remote_match) ||
620 (remote_deleted && head && head_match)) {
621 if (index)
622 return deleted_entry(index, index);
623 else if (path)
624 verify_absent(path, "removed");
625 return 0;
626 }
627 /*
628 * Added in both, identically.
629 */
630 if (no_anc_exists && head && remote && same(head, remote))
631 return merged_entry(head, index);
632
633 }
634
635 /* Below are "no merge" cases, which require that the index be
636 * up-to-date to avoid the files getting overwritten with
637 * conflict resolution files.
638 */
639 if (index) {
640 verify_uptodate(index);
641 }
642 else if (path)
643 verify_absent(path, "overwritten");
644
645 nontrivial_merge = 1;
646
647 /* #2, #3, #4, #6, #7, #9, #11. */
648 count = 0;
649 if (!head_match || !remote_match) {
650 for (i = 1; i < head_idx; i++) {
651 if (stages[i]) {
652 keep_entry(stages[i]);
653 count++;
654 break;
655 }
656 }
657 }
658 #if DBRT_DEBUG
659 else {
660 fprintf(stderr, "read-tree: warning #16 detected\n");
661 show_stage_entry(stderr, "head ", stages[head_match]);
662 show_stage_entry(stderr, "remote ", stages[remote_match]);
663 }
664 #endif
665 if (head) { count += keep_entry(head); }
666 if (remote) { count += keep_entry(remote); }
667 return count;
668 }
669
670 /*
671 * Two-way merge.
672 *
673 * The rule is to "carry forward" what is in the index without losing
674 * information across a "fast forward", favoring a successful merge
675 * over a merge failure when it makes sense. For details of the
676 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
677 *
678 */
679 static int twoway_merge(struct cache_entry **src)
680 {
681 struct cache_entry *current = src[0];
682 struct cache_entry *oldtree = src[1], *newtree = src[2];
683
684 if (merge_size != 2)
685 return error("Cannot do a twoway merge of %d trees",
686 merge_size);
687
688 if (current) {
689 if ((!oldtree && !newtree) || /* 4 and 5 */
690 (!oldtree && newtree &&
691 same(current, newtree)) || /* 6 and 7 */
692 (oldtree && newtree &&
693 same(oldtree, newtree)) || /* 14 and 15 */
694 (oldtree && newtree &&
695 !same(oldtree, newtree) && /* 18 and 19*/
696 same(current, newtree))) {
697 return keep_entry(current);
698 }
699 else if (oldtree && !newtree && same(current, oldtree)) {
700 /* 10 or 11 */
701 return deleted_entry(oldtree, current);
702 }
703 else if (oldtree && newtree &&
704 same(current, oldtree) && !same(current, newtree)) {
705 /* 20 or 21 */
706 return merged_entry(newtree, current);
707 }
708 else {
709 /* all other failures */
710 if (oldtree)
711 reject_merge(oldtree);
712 if (current)
713 reject_merge(current);
714 if (newtree)
715 reject_merge(newtree);
716 return -1;
717 }
718 }
719 else if (newtree)
720 return merged_entry(newtree, current);
721 else
722 return deleted_entry(oldtree, current);
723 }
724
725 /*
726 * Bind merge.
727 *
728 * Keep the index entries at stage0, collapse stage1 but make sure
729 * stage0 does not have anything there.
730 */
731 static int bind_merge(struct cache_entry **src)
732 {
733 struct cache_entry *old = src[0];
734 struct cache_entry *a = src[1];
735
736 if (merge_size != 1)
737 return error("Cannot do a bind merge of %d trees\n",
738 merge_size);
739 if (a && old)
740 die("Entry '%s' overlaps. Cannot bind.", a->name);
741 if (!a)
742 return keep_entry(old);
743 else
744 return merged_entry(a, NULL);
745 }
746
747 /*
748 * One-way merge.
749 *
750 * The rule is:
751 * - take the stat information from stage0, take the data from stage1
752 */
753 static int oneway_merge(struct cache_entry **src)
754 {
755 struct cache_entry *old = src[0];
756 struct cache_entry *a = src[1];
757
758 if (merge_size != 1)
759 return error("Cannot do a oneway merge of %d trees",
760 merge_size);
761
762 if (!a) {
763 invalidate_ce_path(old);
764 return deleted_entry(old, old);
765 }
766 if (old && same(old, a)) {
767 if (reset) {
768 struct stat st;
769 if (lstat(old->name, &st) ||
770 ce_match_stat(old, &st, 1))
771 old->ce_flags |= htons(CE_UPDATE);
772 }
773 return keep_entry(old);
774 }
775 return merged_entry(a, old);
776 }
777
778 static int read_cache_unmerged(void)
779 {
780 int i, deleted;
781 struct cache_entry **dst;
782
783 read_cache();
784 dst = active_cache;
785 deleted = 0;
786 for (i = 0; i < active_nr; i++) {
787 struct cache_entry *ce = active_cache[i];
788 if (ce_stage(ce)) {
789 deleted++;
790 invalidate_ce_path(ce);
791 continue;
792 }
793 if (deleted)
794 *dst = ce;
795 dst++;
796 }
797 active_nr -= deleted;
798 return deleted;
799 }
800
801 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
802 {
803 struct tree_entry_list *ent;
804 int cnt;
805
806 memcpy(it->sha1, tree->object.sha1, 20);
807 for (cnt = 0, ent = tree->entries; ent; ent = ent->next) {
808 if (!ent->directory)
809 cnt++;
810 else {
811 struct cache_tree_sub *sub;
812 struct tree *subtree = (struct tree *)ent->item.tree;
813 if (!subtree->object.parsed)
814 parse_tree(subtree);
815 sub = cache_tree_sub(it, ent->name);
816 sub->cache_tree = cache_tree();
817 prime_cache_tree_rec(sub->cache_tree, subtree);
818 cnt += sub->cache_tree->entry_count;
819 }
820 }
821 it->entry_count = cnt;
822 }
823
824 static void prime_cache_tree(void)
825 {
826 struct tree *tree = (struct tree *)trees->item;
827 if (!tree)
828 return;
829 active_cache_tree = cache_tree();
830 prime_cache_tree_rec(active_cache_tree, tree);
831
832 }
833
834 static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
835
836 static struct cache_file cache_file;
837
838 int cmd_read_tree(int argc, const char **argv, char **envp)
839 {
840 int i, newfd, stage = 0;
841 unsigned char sha1[20];
842 merge_fn_t fn = NULL;
843
844 setup_git_directory();
845 git_config(git_default_config);
846
847 newfd = hold_index_file_for_update(&cache_file, get_index_file());
848 if (newfd < 0)
849 die("unable to create new cachefile");
850
851 git_config(git_default_config);
852
853 merge = 0;
854 reset = 0;
855 for (i = 1; i < argc; i++) {
856 const char *arg = argv[i];
857
858 /* "-u" means "update", meaning that a merge will update
859 * the working tree.
860 */
861 if (!strcmp(arg, "-u")) {
862 update = 1;
863 continue;
864 }
865
866 if (!strcmp(arg, "-v")) {
867 verbose_update = 1;
868 continue;
869 }
870
871 /* "-i" means "index only", meaning that a merge will
872 * not even look at the working tree.
873 */
874 if (!strcmp(arg, "-i")) {
875 index_only = 1;
876 continue;
877 }
878
879 /* "--prefix=<subdirectory>/" means keep the current index
880 * entries and put the entries from the tree under the
881 * given subdirectory.
882 */
883 if (!strncmp(arg, "--prefix=", 9)) {
884 if (stage || merge || prefix)
885 usage(read_tree_usage);
886 prefix = arg + 9;
887 merge = 1;
888 stage = 1;
889 if (read_cache_unmerged())
890 die("you need to resolve your current index first");
891 continue;
892 }
893
894 /* This differs from "-m" in that we'll silently ignore unmerged entries */
895 if (!strcmp(arg, "--reset")) {
896 if (stage || merge || prefix)
897 usage(read_tree_usage);
898 reset = 1;
899 merge = 1;
900 stage = 1;
901 read_cache_unmerged();
902 continue;
903 }
904
905 if (!strcmp(arg, "--trivial")) {
906 trivial_merges_only = 1;
907 continue;
908 }
909
910 if (!strcmp(arg, "--aggressive")) {
911 aggressive = 1;
912 continue;
913 }
914
915 /* "-m" stands for "merge", meaning we start in stage 1 */
916 if (!strcmp(arg, "-m")) {
917 if (stage || merge || prefix)
918 usage(read_tree_usage);
919 if (read_cache_unmerged())
920 die("you need to resolve your current index first");
921 stage = 1;
922 merge = 1;
923 continue;
924 }
925
926 /* using -u and -i at the same time makes no sense */
927 if (1 < index_only + update)
928 usage(read_tree_usage);
929
930 if (get_sha1(arg, sha1))
931 die("Not a valid object name %s", arg);
932 if (list_tree(sha1) < 0)
933 die("failed to unpack tree object %s", arg);
934 stage++;
935 }
936 if ((update||index_only) && !merge)
937 usage(read_tree_usage);
938
939 if (prefix) {
940 int pfxlen = strlen(prefix);
941 int pos;
942 if (prefix[pfxlen-1] != '/')
943 die("prefix must end with /");
944 if (stage != 2)
945 die("binding merge takes only one tree");
946 pos = cache_name_pos(prefix, pfxlen);
947 if (0 <= pos)
948 die("corrupt index file");
949 pos = -pos-1;
950 if (pos < active_nr &&
951 !strncmp(active_cache[pos]->name, prefix, pfxlen))
952 die("subdirectory '%s' already exists.", prefix);
953 pos = cache_name_pos(prefix, pfxlen-1);
954 if (0 <= pos)
955 die("file '%.*s' already exists.", pfxlen-1, prefix);
956 }
957
958 if (merge) {
959 if (stage < 2)
960 die("just how do you expect me to merge %d trees?", stage-1);
961 switch (stage - 1) {
962 case 1:
963 fn = prefix ? bind_merge : oneway_merge;
964 break;
965 case 2:
966 fn = twoway_merge;
967 break;
968 case 3:
969 default:
970 fn = threeway_merge;
971 cache_tree_free(&active_cache_tree);
972 break;
973 }
974
975 if (stage - 1 >= 3)
976 head_idx = stage - 2;
977 else
978 head_idx = 1;
979 }
980
981 unpack_trees(fn);
982
983 /*
984 * When reading only one tree (either the most basic form,
985 * "-m ent" or "--reset ent" form), we can obtain a fully
986 * valid cache-tree because the index must match exactly
987 * what came from the tree.
988 */
989 if (trees && trees->item && (!merge || (stage == 2))) {
990 cache_tree_free(&active_cache_tree);
991 prime_cache_tree();
992 }
993
994 if (write_cache(newfd, active_cache, active_nr) ||
995 commit_index_file(&cache_file))
996 die("unable to write new index file");
997 return 0;
998 }