]>
Commit | Line | Data |
---|---|---|
8f3f9b09 JH |
1 | #include "refs.h" |
2 | #include "cache.h" | |
3 | #include "rev-cache.h" | |
4 | ||
5 | struct rev_cache **rev_cache; | |
6 | int nr_revs, alloc_revs; | |
7 | ||
4d8fa916 | 8 | static struct rev_list_elem *rle_free; |
8f3f9b09 JH |
9 | |
10 | #define BATCH_SIZE 512 | |
11 | ||
12 | int find_rev_cache(const unsigned char *sha1) | |
13 | { | |
14 | int lo = 0, hi = nr_revs; | |
15 | while (lo < hi) { | |
16 | int mi = (lo + hi) / 2; | |
17 | struct rev_cache *ri = rev_cache[mi]; | |
18 | int cmp = memcmp(sha1, ri->sha1, 20); | |
19 | if (!cmp) | |
20 | return mi; | |
21 | if (cmp < 0) | |
22 | hi = mi; | |
23 | else | |
24 | lo = mi + 1; | |
25 | } | |
26 | return -lo - 1; | |
27 | } | |
28 | ||
29 | static struct rev_list_elem *alloc_list_elem(void) | |
30 | { | |
31 | struct rev_list_elem *rle; | |
32 | if (!rle_free) { | |
33 | int i; | |
34 | ||
35 | rle = xmalloc(sizeof(*rle) * BATCH_SIZE); | |
36 | for (i = 0; i < BATCH_SIZE - 1; i++) { | |
37 | rle[i].ri = NULL; | |
38 | rle[i].next = &rle[i + 1]; | |
39 | } | |
40 | rle[BATCH_SIZE - 1].ri = NULL; | |
41 | rle[BATCH_SIZE - 1].next = NULL; | |
42 | rle_free = rle; | |
43 | } | |
44 | rle = rle_free; | |
45 | rle_free = rle->next; | |
46 | return rle; | |
47 | } | |
48 | ||
49 | static struct rev_cache *create_rev_cache(const unsigned char *sha1) | |
50 | { | |
51 | struct rev_cache *ri; | |
52 | int pos = find_rev_cache(sha1); | |
53 | ||
54 | if (0 <= pos) | |
55 | return rev_cache[pos]; | |
56 | pos = -pos - 1; | |
57 | if (alloc_revs <= ++nr_revs) { | |
58 | alloc_revs = alloc_nr(alloc_revs); | |
59 | rev_cache = xrealloc(rev_cache, sizeof(ri) * alloc_revs); | |
60 | } | |
61 | if (pos < nr_revs) | |
62 | memmove(rev_cache + pos + 1, rev_cache + pos, | |
63 | (nr_revs - pos - 1) * sizeof(ri)); | |
64 | ri = xcalloc(1, sizeof(*ri)); | |
65 | memcpy(ri->sha1, sha1, 20); | |
66 | rev_cache[pos] = ri; | |
67 | return ri; | |
68 | } | |
69 | ||
70 | static unsigned char last_sha1[20]; | |
71 | ||
72 | static void write_one_rev_cache(FILE *rev_cache_file, struct rev_cache *ri) | |
73 | { | |
74 | unsigned char flag; | |
75 | struct rev_list_elem *rle; | |
76 | ||
77 | if (ri->written) | |
78 | return; | |
79 | ||
80 | if (ri->parsed) { | |
81 | /* We use last_sha1 compression only for the first parent; | |
82 | * otherwise the resulting rev-cache would lose the parent | |
83 | * order information. | |
84 | */ | |
85 | if (ri->parents && | |
86 | !memcmp(ri->parents->ri->sha1, last_sha1, 20)) | |
87 | flag = (ri->num_parents - 1) | 0x80; | |
88 | else | |
89 | flag = ri->num_parents; | |
90 | ||
91 | fwrite(ri->sha1, 20, 1, rev_cache_file); | |
92 | fwrite(&flag, 1, 1, rev_cache_file); | |
93 | for (rle = ri->parents; rle; rle = rle->next) { | |
94 | if (flag & 0x80 && rle == ri->parents) | |
95 | continue; | |
96 | fwrite(rle->ri->sha1, 20, 1, rev_cache_file); | |
97 | } | |
98 | memcpy(last_sha1, ri->sha1, 20); | |
99 | ri->written = 1; | |
100 | } | |
101 | /* recursively write children depth first */ | |
102 | for (rle = ri->children; rle; rle = rle->next) | |
103 | write_one_rev_cache(rev_cache_file, rle->ri); | |
104 | } | |
105 | ||
106 | void write_rev_cache(const char *newpath, const char *oldpath) | |
107 | { | |
108 | /* write the following commit ancestry information in | |
109 | * $GIT_DIR/info/rev-cache. | |
110 | * | |
111 | * The format is: | |
112 | * 20-byte SHA1 (commit ID) | |
113 | * 1-byte flag: | |
114 | * - bit 0-6 records "number of parent commit SHA1s to | |
115 | * follow" (i.e. up to 127 children can be listed). | |
116 | * - when the bit 7 is on, then "the entry immediately | |
117 | * before this entry is one of the parents of this | |
118 | * commit". | |
119 | * N x 20-byte SHA1 (parent commit IDs) | |
120 | */ | |
121 | FILE *rev_cache_file; | |
122 | int i; | |
123 | struct rev_cache *ri; | |
124 | ||
125 | if (!strcmp(newpath, oldpath)) { | |
126 | /* If we are doing it in place */ | |
127 | rev_cache_file = fopen(newpath, "a"); | |
128 | } | |
129 | else { | |
130 | char buf[8096]; | |
131 | size_t sz; | |
132 | FILE *oldfp = fopen(oldpath, "r"); | |
133 | rev_cache_file = fopen(newpath, "w"); | |
134 | if (oldfp) { | |
135 | while (1) { | |
136 | sz = fread(buf, 1, sizeof(buf), oldfp); | |
137 | if (sz == 0) | |
138 | break; | |
139 | fwrite(buf, 1, sz, rev_cache_file); | |
140 | } | |
141 | fclose(oldfp); | |
142 | } | |
143 | } | |
144 | ||
145 | memset(last_sha1, 0, 20); | |
146 | ||
147 | /* Go through available rev_cache structures, starting from | |
148 | * parentless ones first, so that we would get most out of | |
149 | * last_sha1 optimization by the depth first behaviour of | |
150 | * write_one_rev_cache(). | |
151 | */ | |
152 | for (i = 0; i < nr_revs; i++) { | |
153 | ri = rev_cache[i]; | |
154 | if (ri->num_parents) | |
155 | continue; | |
156 | write_one_rev_cache(rev_cache_file, ri); | |
157 | } | |
158 | /* Then the rest */ | |
159 | for (i = 0; i < nr_revs; i++) { | |
160 | ri = rev_cache[i]; | |
161 | write_one_rev_cache(rev_cache_file, ri); | |
162 | } | |
163 | fclose(rev_cache_file); | |
164 | } | |
165 | ||
166 | static void add_parent(struct rev_cache *child, | |
167 | const unsigned char *parent_sha1) | |
168 | { | |
169 | struct rev_cache *parent = create_rev_cache(parent_sha1); | |
170 | struct rev_list_elem *e = alloc_list_elem(); | |
171 | ||
172 | /* Keep the parent list ordered in the same way the commit | |
173 | * object records them. | |
174 | */ | |
175 | e->ri = parent; | |
176 | e->next = NULL; | |
177 | if (!child->parents_tail) | |
178 | child->parents = e; | |
179 | else | |
180 | child->parents_tail->next = e; | |
181 | child->parents_tail = e; | |
182 | child->num_parents++; | |
183 | ||
184 | /* There is no inherent order of the children so we just | |
185 | * LIFO them together. | |
186 | */ | |
187 | e = alloc_list_elem(); | |
188 | e->next = parent->children; | |
189 | parent->children = e; | |
190 | e->ri = child; | |
191 | parent->num_children++; | |
192 | } | |
193 | ||
194 | int read_rev_cache(const char *path, FILE *dumpfile, int dry_run) | |
195 | { | |
196 | unsigned char *map; | |
197 | int fd; | |
198 | struct stat st; | |
199 | unsigned long ofs, len; | |
200 | struct rev_cache *ri = NULL; | |
201 | ||
202 | fd = open(path, O_RDONLY); | |
203 | if (fd < 0) { | |
204 | if (dry_run) | |
205 | return error("cannot open %s", path); | |
206 | if (errno == ENOENT) | |
207 | return 0; | |
208 | return -1; | |
209 | } | |
210 | if (fstat(fd, &st)) { | |
211 | close(fd); | |
212 | return -1; | |
213 | } | |
214 | map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); | |
8f3f9b09 | 215 | close(fd); |
e35f9824 PR |
216 | if (map == MAP_FAILED) |
217 | return -1; | |
8f3f9b09 JH |
218 | |
219 | memset(last_sha1, 0, 20); | |
220 | ofs = 0; | |
221 | len = st.st_size; | |
222 | while (ofs < len) { | |
223 | unsigned char sha1[20]; | |
224 | int flag, cnt, i; | |
225 | if (len < ofs + 21) | |
226 | die("rev-cache too short"); | |
227 | memcpy(sha1, map + ofs, 20); | |
228 | flag = map[ofs + 20]; | |
229 | ofs += 21; | |
230 | cnt = (flag & 0x7f) + ((flag & 0x80) != 0); | |
231 | if (len < ofs + (flag & 0x7f) * 20) | |
232 | die("rev-cache too short to have %d more parents", | |
233 | (flag & 0x7f)); | |
234 | if (dumpfile) | |
235 | fprintf(dumpfile, "%s", sha1_to_hex(sha1)); | |
236 | if (!dry_run) { | |
237 | ri = create_rev_cache(sha1); | |
238 | if (!ri) | |
239 | die("cannot create rev-cache for %s", | |
240 | sha1_to_hex(sha1)); | |
241 | ri->written = ri->parsed = 1; | |
242 | } | |
243 | i = 0; | |
244 | if (flag & 0x80) { | |
245 | if (!dry_run) | |
246 | add_parent(ri, last_sha1); | |
247 | if (dumpfile) | |
248 | fprintf(dumpfile, " %s", | |
249 | sha1_to_hex(last_sha1)); | |
250 | i++; | |
251 | } | |
252 | while (i++ < cnt) { | |
253 | if (!dry_run) | |
254 | add_parent(ri, map + ofs); | |
255 | if (dumpfile) | |
256 | fprintf(dumpfile, " %s", | |
257 | sha1_to_hex(last_sha1)); | |
258 | ofs += 20; | |
259 | } | |
260 | if (dumpfile) | |
261 | fprintf(dumpfile, "\n"); | |
262 | memcpy(last_sha1, sha1, 20); | |
263 | } | |
264 | if (ofs != len) | |
265 | die("rev-cache truncated?"); | |
266 | munmap(map, len); | |
267 | return 0; | |
268 | } | |
269 | ||
270 | int record_rev_cache(const unsigned char *sha1, FILE *dumpfile) | |
271 | { | |
272 | unsigned char parent[20]; | |
273 | char type[20]; | |
274 | unsigned long size, ofs; | |
275 | unsigned int cnt, i; | |
276 | void *buf; | |
277 | struct rev_cache *ri; | |
278 | ||
279 | buf = read_sha1_file(sha1, type, &size); | |
280 | if (!buf) | |
281 | return error("%s: not found", sha1_to_hex(sha1)); | |
282 | if (strcmp(type, "commit")) { | |
283 | free(buf); | |
284 | return error("%s: not a commit but a %s", | |
285 | sha1_to_hex(sha1), type); | |
286 | } | |
287 | ri = create_rev_cache(sha1); | |
288 | if (ri->parsed) | |
289 | return 0; | |
290 | if (dumpfile) | |
291 | fprintf(dumpfile, "commit %s\n", sha1_to_hex(sha1)); | |
292 | ||
293 | cnt = 0; | |
294 | ofs = 46; /* "tree " + hex-sha1 + "\n" */ | |
295 | while (!memcmp(buf + ofs, "parent ", 7) && | |
296 | !get_sha1_hex(buf + ofs + 7, parent)) { | |
297 | ofs += 48; | |
298 | cnt++; | |
299 | } | |
300 | if (cnt * 48 + 46 != ofs) { | |
301 | free(buf); | |
302 | die("internal error in record_rev_cache"); | |
303 | } | |
304 | ||
305 | ri = create_rev_cache(sha1); | |
306 | ri->parsed = 1; | |
307 | ||
308 | for (i = 0; i < cnt; i++) { | |
309 | unsigned char parent_sha1[20]; | |
310 | ||
311 | ofs = 46 + i * 48 + 7; | |
312 | get_sha1_hex(buf + ofs, parent_sha1); | |
313 | add_parent(ri, parent_sha1); | |
314 | record_rev_cache(parent_sha1, dumpfile); | |
315 | } | |
316 | free(buf); | |
317 | return 0; | |
318 | } |