2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
12 #include "cache-tree.h"
19 static int update
= 0;
20 static int index_only
= 0;
21 static int nontrivial_merge
= 0;
22 static int trivial_merges_only
= 0;
23 static int aggressive
= 0;
24 static int verbose_update
= 0;
25 static volatile int progress_update
= 0;
26 static const char *prefix
= NULL
;
28 static int head_idx
= -1;
29 static int merge_size
= 0;
31 static struct object_list
*trees
= NULL
;
33 static struct cache_entry df_conflict_entry
= {
36 static struct tree_entry_list df_conflict_list
= {
38 .next
= &df_conflict_list
41 typedef int (*merge_fn_t
)(struct cache_entry
**src
);
43 static int entcmp(char *name1
, int dir1
, char *name2
, int dir2
)
45 int len1
= strlen(name1
);
46 int len2
= strlen(name2
);
47 int len
= len1
< len2
? len1
: len2
;
48 int ret
= memcmp(name1
, name2
, len
);
58 ret
= (c1
< c2
) ? -1 : (c1
> c2
) ? 1 : 0;
64 static int unpack_trees_rec(struct tree_entry_list
**posns
, int len
,
65 const char *base
, merge_fn_t fn
, int *indpos
)
67 int baselen
= strlen(base
);
68 int src_size
= len
+ 1;
75 struct tree_entry_list
**subposns
;
76 struct cache_entry
**src
;
82 /* Find the first name in the input. */
88 if (merge
&& *indpos
< active_nr
) {
89 /* This is a bit tricky: */
90 /* If the index has a subdirectory (with
91 * contents) as the first name, it'll get a
92 * filename like "foo/bar". But that's after
93 * "foo", so the entry in trees will get
94 * handled first, at which point we'll go into
95 * "foo", and deal with "bar" from the index,
96 * because the base will be "foo/". The only
97 * way we can actually have "foo/bar" first of
98 * all the things is if the trees don't
99 * contain "foo" at all, in which case we'll
100 * handle "foo/bar" without going into the
101 * directory, but that's fine (and will return
102 * an error anyway, with the added unknown
106 cache_name
= active_cache
[*indpos
]->name
;
107 if (strlen(cache_name
) > baselen
&&
108 !memcmp(cache_name
, base
, baselen
)) {
109 cache_name
+= baselen
;
118 printf("index %s\n", first
);
120 for (i
= 0; i
< len
; i
++) {
121 if (!posns
[i
] || posns
[i
] == &df_conflict_list
)
124 printf("%d %s\n", i
+ 1, posns
[i
]->name
);
126 if (!first
|| entcmp(first
, firstdir
,
128 posns
[i
]->directory
) > 0) {
129 first
= posns
[i
]->name
;
130 firstdir
= posns
[i
]->directory
;
133 /* No name means we're done */
137 pathlen
= strlen(first
);
138 ce_size
= cache_entry_size(baselen
+ pathlen
);
140 src
= xcalloc(src_size
, sizeof(struct cache_entry
*));
142 subposns
= xcalloc(len
, sizeof(struct tree_list_entry
*));
144 if (cache_name
&& !strcmp(cache_name
, first
)) {
146 src
[0] = active_cache
[*indpos
];
147 remove_cache_entry_at(*indpos
);
150 for (i
= 0; i
< len
; i
++) {
151 struct cache_entry
*ce
;
154 (posns
[i
] != &df_conflict_list
&&
155 strcmp(first
, posns
[i
]->name
))) {
159 if (posns
[i
] == &df_conflict_list
) {
160 src
[i
+ merge
] = &df_conflict_entry
;
164 if (posns
[i
]->directory
) {
166 parse_tree(posns
[i
]->item
.tree
);
167 subposns
[i
] = posns
[i
]->item
.tree
->entries
;
168 posns
[i
] = posns
[i
]->next
;
169 src
[i
+ merge
] = &df_conflict_entry
;
175 else if (i
+ 1 < head_idx
)
177 else if (i
+ 1 > head_idx
)
182 ce
= xcalloc(1, ce_size
);
183 ce
->ce_mode
= create_ce_mode(posns
[i
]->mode
);
184 ce
->ce_flags
= create_ce_flags(baselen
+ pathlen
,
186 memcpy(ce
->name
, base
, baselen
);
187 memcpy(ce
->name
+ baselen
, first
, pathlen
+ 1);
191 memcpy(ce
->sha1
, posns
[i
]->item
.any
->sha1
, 20);
193 subposns
[i
] = &df_conflict_list
;
194 posns
[i
] = posns
[i
]->next
;
201 printf("%s:\n", first
);
202 for (i
= 0; i
< src_size
; i
++) {
205 printf("%s\n", sha1_to_hex(src
[i
]->sha1
));
213 printf("Added %d entries\n", ret
);
217 for (i
= 0; i
< src_size
; i
++) {
219 add_cache_entry(src
[i
], ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
225 char *newbase
= xmalloc(baselen
+ 2 + pathlen
);
226 memcpy(newbase
, base
, baselen
);
227 memcpy(newbase
+ baselen
, first
, pathlen
);
228 newbase
[baselen
+ pathlen
] = '/';
229 newbase
[baselen
+ pathlen
+ 1] = '\0';
230 if (unpack_trees_rec(subposns
, len
, newbase
, fn
,
240 static void reject_merge(struct cache_entry
*ce
)
242 die("Entry '%s' would be overwritten by merge. Cannot merge.",
246 /* Unlink the last component and attempt to remove leading
247 * directories, in case this unlink is the removal of the
248 * last entry in the directory -- empty directories are removed.
250 static void unlink_entry(char *name
)
259 cp
= strrchr(name
, '/');
266 status
= rmdir(name
);
275 static void progress_interval(int signum
)
280 static void setup_progress_signal(void)
285 memset(&sa
, 0, sizeof(sa
));
286 sa
.sa_handler
= progress_interval
;
287 sigemptyset(&sa
.sa_mask
);
288 sa
.sa_flags
= SA_RESTART
;
289 sigaction(SIGALRM
, &sa
, NULL
);
291 v
.it_interval
.tv_sec
= 1;
292 v
.it_interval
.tv_usec
= 0;
293 v
.it_value
= v
.it_interval
;
294 setitimer(ITIMER_REAL
, &v
, NULL
);
297 static void check_updates(struct cache_entry
**src
, int nr
)
299 static struct checkout state
= {
305 unsigned short mask
= htons(CE_UPDATE
);
306 unsigned last_percent
= 200, cnt
= 0, total
= 0;
308 if (update
&& verbose_update
) {
309 for (total
= cnt
= 0; cnt
< nr
; cnt
++) {
310 struct cache_entry
*ce
= src
[cnt
];
311 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
)
315 /* Don't bother doing this for very small updates */
320 fprintf(stderr
, "Checking files out...\n");
321 setup_progress_signal();
328 struct cache_entry
*ce
= *src
++;
331 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
) {
334 percent
= (cnt
* 100) / total
;
335 if (percent
!= last_percent
||
337 fprintf(stderr
, "%4u%% (%u/%u) done\r",
338 percent
, cnt
, total
);
339 last_percent
= percent
;
345 unlink_entry(ce
->name
);
348 if (ce
->ce_flags
& mask
) {
349 ce
->ce_flags
&= ~mask
;
351 checkout_entry(ce
, &state
, NULL
);
355 signal(SIGALRM
, SIG_IGN
);
360 static int unpack_trees(merge_fn_t fn
)
363 unsigned len
= object_list_length(trees
);
364 struct tree_entry_list
**posns
;
366 struct object_list
*posn
= trees
;
370 posns
= xmalloc(len
* sizeof(struct tree_entry_list
*));
371 for (i
= 0; i
< len
; i
++) {
372 posns
[i
] = ((struct tree
*) posn
->item
)->entries
;
375 if (unpack_trees_rec(posns
, len
, prefix
? prefix
: "",
380 if (trivial_merges_only
&& nontrivial_merge
)
381 die("Merge requires file-level merging");
383 check_updates(active_cache
, active_nr
);
387 static int list_tree(unsigned char *sha1
)
389 struct tree
*tree
= parse_tree_indirect(sha1
);
392 object_list_append(&tree
->object
, &trees
);
396 static int same(struct cache_entry
*a
, struct cache_entry
*b
)
402 return a
->ce_mode
== b
->ce_mode
&&
403 !memcmp(a
->sha1
, b
->sha1
, 20);
408 * When a CE gets turned into an unmerged entry, we
409 * want it to be up-to-date
411 static void verify_uptodate(struct cache_entry
*ce
)
415 if (index_only
|| reset
)
418 if (!lstat(ce
->name
, &st
)) {
419 unsigned changed
= ce_match_stat(ce
, &st
, 1);
425 ce
->ce_flags
|= htons(CE_UPDATE
);
430 die("Entry '%s' not uptodate. Cannot merge.", ce
->name
);
433 static void invalidate_ce_path(struct cache_entry
*ce
)
436 cache_tree_invalidate_path(active_cache_tree
, ce
->name
);
440 * We do not want to remove or overwrite a working tree file that
443 static void verify_absent(const char *path
, const char *action
)
447 if (index_only
|| reset
|| !update
)
449 if (!lstat(path
, &st
))
450 die("Untracked working tree file '%s' "
451 "would be %s by merge.", path
, action
);
454 static int merged_entry(struct cache_entry
*merge
, struct cache_entry
*old
)
456 merge
->ce_flags
|= htons(CE_UPDATE
);
459 * See if we can re-use the old CE directly?
460 * That way we get the uptodate stat info.
462 * This also removes the UPDATE flag on
465 if (same(old
, merge
)) {
468 verify_uptodate(old
);
469 invalidate_ce_path(old
);
473 verify_absent(merge
->name
, "overwritten");
474 invalidate_ce_path(merge
);
477 merge
->ce_flags
&= ~htons(CE_STAGEMASK
);
478 add_cache_entry(merge
, ADD_CACHE_OK_TO_ADD
);
482 static int deleted_entry(struct cache_entry
*ce
, struct cache_entry
*old
)
485 verify_uptodate(old
);
487 verify_absent(ce
->name
, "removed");
489 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
490 invalidate_ce_path(ce
);
494 static int keep_entry(struct cache_entry
*ce
)
496 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
501 static void show_stage_entry(FILE *o
,
502 const char *label
, const struct cache_entry
*ce
)
505 fprintf(o
, "%s (missing)\n", label
);
507 fprintf(o
, "%s%06o %s %d\t%s\n",
510 sha1_to_hex(ce
->sha1
),
516 static int threeway_merge(struct cache_entry
**stages
)
518 struct cache_entry
*index
;
519 struct cache_entry
*head
;
520 struct cache_entry
*remote
= stages
[head_idx
+ 1];
523 int remote_match
= 0;
524 const char *path
= NULL
;
526 int df_conflict_head
= 0;
527 int df_conflict_remote
= 0;
529 int any_anc_missing
= 0;
530 int no_anc_exists
= 1;
533 for (i
= 1; i
< head_idx
; i
++) {
538 path
= stages
[i
]->name
;
544 head
= stages
[head_idx
];
546 if (head
== &df_conflict_entry
) {
547 df_conflict_head
= 1;
551 if (remote
== &df_conflict_entry
) {
552 df_conflict_remote
= 1;
563 /* First, if there's a #16 situation, note that to prevent #13
566 if (!same(remote
, head
)) {
567 for (i
= 1; i
< head_idx
; i
++) {
568 if (same(stages
[i
], head
)) {
571 if (same(stages
[i
], remote
)) {
577 /* We start with cases where the index is allowed to match
578 * something other than the head: #14(ALT) and #2ALT, where it
579 * is permitted to match the result instead.
581 /* #14, #14ALT, #2ALT */
582 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
583 if (index
&& !same(index
, remote
) && !same(index
, head
))
585 return merged_entry(remote
, index
);
588 * If we have an entry in the index cache, then we want to
589 * make sure that it matches head.
591 if (index
&& !same(index
, head
)) {
597 if (same(head
, remote
))
598 return merged_entry(head
, index
);
600 if (!df_conflict_remote
&& remote_match
&& !head_match
)
601 return merged_entry(head
, index
);
605 if (!head
&& !remote
&& any_anc_missing
)
608 /* Under the new "aggressive" rule, we resolve mostly trivial
609 * cases that we historically had git-merge-one-file resolve.
612 int head_deleted
= !head
&& !df_conflict_head
;
613 int remote_deleted
= !remote
&& !df_conflict_remote
;
616 * Deleted in one and unchanged in the other.
618 if ((head_deleted
&& remote_deleted
) ||
619 (head_deleted
&& remote
&& remote_match
) ||
620 (remote_deleted
&& head
&& head_match
)) {
622 return deleted_entry(index
, index
);
624 verify_absent(path
, "removed");
628 * Added in both, identically.
630 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
631 return merged_entry(head
, index
);
635 /* Below are "no merge" cases, which require that the index be
636 * up-to-date to avoid the files getting overwritten with
637 * conflict resolution files.
640 verify_uptodate(index
);
643 verify_absent(path
, "overwritten");
645 nontrivial_merge
= 1;
647 /* #2, #3, #4, #6, #7, #9, #11. */
649 if (!head_match
|| !remote_match
) {
650 for (i
= 1; i
< head_idx
; i
++) {
652 keep_entry(stages
[i
]);
660 fprintf(stderr
, "read-tree: warning #16 detected\n");
661 show_stage_entry(stderr
, "head ", stages
[head_match
]);
662 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
665 if (head
) { count
+= keep_entry(head
); }
666 if (remote
) { count
+= keep_entry(remote
); }
673 * The rule is to "carry forward" what is in the index without losing
674 * information across a "fast forward", favoring a successful merge
675 * over a merge failure when it makes sense. For details of the
676 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
679 static int twoway_merge(struct cache_entry
**src
)
681 struct cache_entry
*current
= src
[0];
682 struct cache_entry
*oldtree
= src
[1], *newtree
= src
[2];
685 return error("Cannot do a twoway merge of %d trees",
689 if ((!oldtree
&& !newtree
) || /* 4 and 5 */
690 (!oldtree
&& newtree
&&
691 same(current
, newtree
)) || /* 6 and 7 */
692 (oldtree
&& newtree
&&
693 same(oldtree
, newtree
)) || /* 14 and 15 */
694 (oldtree
&& newtree
&&
695 !same(oldtree
, newtree
) && /* 18 and 19*/
696 same(current
, newtree
))) {
697 return keep_entry(current
);
699 else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
701 return deleted_entry(oldtree
, current
);
703 else if (oldtree
&& newtree
&&
704 same(current
, oldtree
) && !same(current
, newtree
)) {
706 return merged_entry(newtree
, current
);
709 /* all other failures */
711 reject_merge(oldtree
);
713 reject_merge(current
);
715 reject_merge(newtree
);
720 return merged_entry(newtree
, current
);
722 return deleted_entry(oldtree
, current
);
728 * Keep the index entries at stage0, collapse stage1 but make sure
729 * stage0 does not have anything there.
731 static int bind_merge(struct cache_entry
**src
)
733 struct cache_entry
*old
= src
[0];
734 struct cache_entry
*a
= src
[1];
737 return error("Cannot do a bind merge of %d trees\n",
740 die("Entry '%s' overlaps. Cannot bind.", a
->name
);
742 return keep_entry(old
);
744 return merged_entry(a
, NULL
);
751 * - take the stat information from stage0, take the data from stage1
753 static int oneway_merge(struct cache_entry
**src
)
755 struct cache_entry
*old
= src
[0];
756 struct cache_entry
*a
= src
[1];
759 return error("Cannot do a oneway merge of %d trees",
763 invalidate_ce_path(old
);
764 return deleted_entry(old
, old
);
766 if (old
&& same(old
, a
)) {
769 if (lstat(old
->name
, &st
) ||
770 ce_match_stat(old
, &st
, 1))
771 old
->ce_flags
|= htons(CE_UPDATE
);
773 return keep_entry(old
);
775 return merged_entry(a
, old
);
778 static int read_cache_unmerged(void)
781 struct cache_entry
**dst
;
786 for (i
= 0; i
< active_nr
; i
++) {
787 struct cache_entry
*ce
= active_cache
[i
];
790 invalidate_ce_path(ce
);
797 active_nr
-= deleted
;
801 static void prime_cache_tree_rec(struct cache_tree
*it
, struct tree
*tree
)
803 struct tree_entry_list
*ent
;
806 memcpy(it
->sha1
, tree
->object
.sha1
, 20);
807 for (cnt
= 0, ent
= tree
->entries
; ent
; ent
= ent
->next
) {
811 struct cache_tree_sub
*sub
;
812 struct tree
*subtree
= (struct tree
*)ent
->item
.tree
;
813 if (!subtree
->object
.parsed
)
815 sub
= cache_tree_sub(it
, ent
->name
);
816 sub
->cache_tree
= cache_tree();
817 prime_cache_tree_rec(sub
->cache_tree
, subtree
);
818 cnt
+= sub
->cache_tree
->entry_count
;
821 it
->entry_count
= cnt
;
824 static void prime_cache_tree(void)
826 struct tree
*tree
= (struct tree
*)trees
->item
;
829 active_cache_tree
= cache_tree();
830 prime_cache_tree_rec(active_cache_tree
, tree
);
834 static const char read_tree_usage
[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
836 static struct cache_file cache_file
;
838 int cmd_read_tree(int argc
, const char **argv
, char **envp
)
840 int i
, newfd
, stage
= 0;
841 unsigned char sha1
[20];
842 merge_fn_t fn
= NULL
;
844 setup_git_directory();
845 git_config(git_default_config
);
847 newfd
= hold_index_file_for_update(&cache_file
, get_index_file());
849 die("unable to create new cachefile");
851 git_config(git_default_config
);
855 for (i
= 1; i
< argc
; i
++) {
856 const char *arg
= argv
[i
];
858 /* "-u" means "update", meaning that a merge will update
861 if (!strcmp(arg
, "-u")) {
866 if (!strcmp(arg
, "-v")) {
871 /* "-i" means "index only", meaning that a merge will
872 * not even look at the working tree.
874 if (!strcmp(arg
, "-i")) {
879 /* "--prefix=<subdirectory>/" means keep the current index
880 * entries and put the entries from the tree under the
881 * given subdirectory.
883 if (!strncmp(arg
, "--prefix=", 9)) {
884 if (stage
|| merge
|| prefix
)
885 usage(read_tree_usage
);
889 if (read_cache_unmerged())
890 die("you need to resolve your current index first");
894 /* This differs from "-m" in that we'll silently ignore unmerged entries */
895 if (!strcmp(arg
, "--reset")) {
896 if (stage
|| merge
|| prefix
)
897 usage(read_tree_usage
);
901 read_cache_unmerged();
905 if (!strcmp(arg
, "--trivial")) {
906 trivial_merges_only
= 1;
910 if (!strcmp(arg
, "--aggressive")) {
915 /* "-m" stands for "merge", meaning we start in stage 1 */
916 if (!strcmp(arg
, "-m")) {
917 if (stage
|| merge
|| prefix
)
918 usage(read_tree_usage
);
919 if (read_cache_unmerged())
920 die("you need to resolve your current index first");
926 /* using -u and -i at the same time makes no sense */
927 if (1 < index_only
+ update
)
928 usage(read_tree_usage
);
930 if (get_sha1(arg
, sha1
))
931 die("Not a valid object name %s", arg
);
932 if (list_tree(sha1
) < 0)
933 die("failed to unpack tree object %s", arg
);
936 if ((update
||index_only
) && !merge
)
937 usage(read_tree_usage
);
940 int pfxlen
= strlen(prefix
);
942 if (prefix
[pfxlen
-1] != '/')
943 die("prefix must end with /");
945 die("binding merge takes only one tree");
946 pos
= cache_name_pos(prefix
, pfxlen
);
948 die("corrupt index file");
950 if (pos
< active_nr
&&
951 !strncmp(active_cache
[pos
]->name
, prefix
, pfxlen
))
952 die("subdirectory '%s' already exists.", prefix
);
953 pos
= cache_name_pos(prefix
, pfxlen
-1);
955 die("file '%.*s' already exists.", pfxlen
-1, prefix
);
960 die("just how do you expect me to merge %d trees?", stage
-1);
963 fn
= prefix
? bind_merge
: oneway_merge
;
971 cache_tree_free(&active_cache_tree
);
976 head_idx
= stage
- 2;
984 * When reading only one tree (either the most basic form,
985 * "-m ent" or "--reset ent" form), we can obtain a fully
986 * valid cache-tree because the index must match exactly
987 * what came from the tree.
989 if (trees
&& trees
->item
&& (!merge
|| (stage
== 2))) {
990 cache_tree_free(&active_cache_tree
);
994 if (write_cache(newfd
, active_cache
, active_nr
) ||
995 commit_index_file(&cache_file
))
996 die("unable to write new index file");