]> git.ipfire.org Git - thirdparty/git.git/blob - tree.c
Merge branch 'fc/t3200-fixes'
[thirdparty/git.git] / tree.c
1 #include "cache.h"
2 #include "cache-tree.h"
3 #include "tree.h"
4 #include "blob.h"
5 #include "commit.h"
6 #include "tag.h"
7 #include "tree-walk.h"
8
9 const char *tree_type = "tree";
10
11 static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
12 {
13 int len;
14 unsigned int size;
15 struct cache_entry *ce;
16
17 if (S_ISDIR(mode))
18 return READ_TREE_RECURSIVE;
19
20 len = strlen(pathname);
21 size = cache_entry_size(baselen + len);
22 ce = xcalloc(1, size);
23
24 ce->ce_mode = create_ce_mode(mode);
25 ce->ce_flags = create_ce_flags(stage);
26 ce->ce_namelen = baselen + len;
27 memcpy(ce->name, base, baselen);
28 memcpy(ce->name + baselen, pathname, len+1);
29 hashcpy(ce->sha1, sha1);
30 return add_cache_entry(ce, opt);
31 }
32
33 static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
34 {
35 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
36 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
37 }
38
39 /*
40 * This is used when the caller knows there is no existing entries at
41 * the stage that will conflict with the entry being added.
42 */
43 static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
44 {
45 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
46 ADD_CACHE_JUST_APPEND);
47 }
48
49 static int read_tree_1(struct tree *tree, struct strbuf *base,
50 int stage, const struct pathspec *pathspec,
51 read_tree_fn_t fn, void *context)
52 {
53 struct tree_desc desc;
54 struct name_entry entry;
55 unsigned char sha1[20];
56 int len, oldlen = base->len;
57 enum interesting retval = entry_not_interesting;
58
59 if (parse_tree(tree))
60 return -1;
61
62 init_tree_desc(&desc, tree->buffer, tree->size);
63
64 while (tree_entry(&desc, &entry)) {
65 if (retval != all_entries_interesting) {
66 retval = tree_entry_interesting(&entry, base, 0, pathspec);
67 if (retval == all_entries_not_interesting)
68 break;
69 if (retval == entry_not_interesting)
70 continue;
71 }
72
73 switch (fn(entry.sha1, base->buf, base->len,
74 entry.path, entry.mode, stage, context)) {
75 case 0:
76 continue;
77 case READ_TREE_RECURSIVE:
78 break;
79 default:
80 return -1;
81 }
82
83 if (S_ISDIR(entry.mode))
84 hashcpy(sha1, entry.sha1);
85 else if (S_ISGITLINK(entry.mode)) {
86 struct commit *commit;
87
88 commit = lookup_commit(entry.sha1);
89 if (!commit)
90 die("Commit %s in submodule path %s%s not found",
91 sha1_to_hex(entry.sha1),
92 base->buf, entry.path);
93
94 if (parse_commit(commit))
95 die("Invalid commit %s in submodule path %s%s",
96 sha1_to_hex(entry.sha1),
97 base->buf, entry.path);
98
99 hashcpy(sha1, commit->tree->object.sha1);
100 }
101 else
102 continue;
103
104 len = tree_entry_len(&entry);
105 strbuf_add(base, entry.path, len);
106 strbuf_addch(base, '/');
107 retval = read_tree_1(lookup_tree(sha1),
108 base, stage, pathspec,
109 fn, context);
110 strbuf_setlen(base, oldlen);
111 if (retval)
112 return -1;
113 }
114 return 0;
115 }
116
117 int read_tree_recursive(struct tree *tree,
118 const char *base, int baselen,
119 int stage, const struct pathspec *pathspec,
120 read_tree_fn_t fn, void *context)
121 {
122 struct strbuf sb = STRBUF_INIT;
123 int ret;
124
125 strbuf_add(&sb, base, baselen);
126 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
127 strbuf_release(&sb);
128 return ret;
129 }
130
131 static int cmp_cache_name_compare(const void *a_, const void *b_)
132 {
133 const struct cache_entry *ce1, *ce2;
134
135 ce1 = *((const struct cache_entry **)a_);
136 ce2 = *((const struct cache_entry **)b_);
137 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
138 ce2->name, ce2->ce_namelen, ce_stage(ce2));
139 }
140
141 int read_tree(struct tree *tree, int stage, struct pathspec *match)
142 {
143 read_tree_fn_t fn = NULL;
144 int i, err;
145
146 /*
147 * Currently the only existing callers of this function all
148 * call it with stage=1 and after making sure there is nothing
149 * at that stage; we could always use read_one_entry_quick().
150 *
151 * But when we decide to straighten out git-read-tree not to
152 * use unpack_trees() in some cases, this will probably start
153 * to matter.
154 */
155
156 /*
157 * See if we have cache entry at the stage. If so,
158 * do it the original slow way, otherwise, append and then
159 * sort at the end.
160 */
161 for (i = 0; !fn && i < active_nr; i++) {
162 const struct cache_entry *ce = active_cache[i];
163 if (ce_stage(ce) == stage)
164 fn = read_one_entry;
165 }
166
167 if (!fn)
168 fn = read_one_entry_quick;
169 err = read_tree_recursive(tree, "", 0, stage, match, fn, NULL);
170 if (fn == read_one_entry || err)
171 return err;
172
173 /*
174 * Sort the cache entry -- we need to nuke the cache tree, though.
175 */
176 cache_tree_free(&active_cache_tree);
177 qsort(active_cache, active_nr, sizeof(active_cache[0]),
178 cmp_cache_name_compare);
179 return 0;
180 }
181
182 struct tree *lookup_tree(const unsigned char *sha1)
183 {
184 struct object *obj = lookup_object(sha1);
185 if (!obj)
186 return create_object(sha1, OBJ_TREE, alloc_tree_node());
187 if (!obj->type)
188 obj->type = OBJ_TREE;
189 if (obj->type != OBJ_TREE) {
190 error("Object %s is a %s, not a tree",
191 sha1_to_hex(sha1), typename(obj->type));
192 return NULL;
193 }
194 return (struct tree *) obj;
195 }
196
197 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
198 {
199 if (item->object.parsed)
200 return 0;
201 item->object.parsed = 1;
202 item->buffer = buffer;
203 item->size = size;
204
205 return 0;
206 }
207
208 int parse_tree(struct tree *item)
209 {
210 enum object_type type;
211 void *buffer;
212 unsigned long size;
213
214 if (item->object.parsed)
215 return 0;
216 buffer = read_sha1_file(item->object.sha1, &type, &size);
217 if (!buffer)
218 return error("Could not read %s",
219 sha1_to_hex(item->object.sha1));
220 if (type != OBJ_TREE) {
221 free(buffer);
222 return error("Object %s not a tree",
223 sha1_to_hex(item->object.sha1));
224 }
225 return parse_tree_buffer(item, buffer, size);
226 }
227
228 void free_tree_buffer(struct tree *tree)
229 {
230 free(tree->buffer);
231 tree->buffer = NULL;
232 tree->size = 0;
233 tree->object.parsed = 0;
234 }
235
236 struct tree *parse_tree_indirect(const unsigned char *sha1)
237 {
238 struct object *obj = parse_object(sha1);
239 do {
240 if (!obj)
241 return NULL;
242 if (obj->type == OBJ_TREE)
243 return (struct tree *) obj;
244 else if (obj->type == OBJ_COMMIT)
245 obj = &(((struct commit *) obj)->tree->object);
246 else if (obj->type == OBJ_TAG)
247 obj = ((struct tag *) obj)->tagged;
248 else
249 return NULL;
250 if (!obj->parsed)
251 parse_object(obj->sha1);
252 } while (1);
253 }