]>
Commit | Line | Data |
---|---|---|
1 | #include "cache.h" | |
2 | #include "repository.h" | |
3 | #include "config.h" | |
4 | #include "lockfile.h" | |
5 | #include "refs.h" | |
6 | #include "pkt-line.h" | |
7 | #include "commit.h" | |
8 | #include "tag.h" | |
9 | #include "exec-cmd.h" | |
10 | #include "pack.h" | |
11 | #include "sideband.h" | |
12 | #include "fetch-pack.h" | |
13 | #include "remote.h" | |
14 | #include "run-command.h" | |
15 | #include "connect.h" | |
16 | #include "transport.h" | |
17 | #include "version.h" | |
18 | #include "sha1-array.h" | |
19 | #include "oidset.h" | |
20 | #include "packfile.h" | |
21 | #include "object-store.h" | |
22 | #include "connected.h" | |
23 | #include "fetch-negotiator.h" | |
24 | #include "fsck.h" | |
25 | ||
26 | static int transfer_unpack_limit = -1; | |
27 | static int fetch_unpack_limit = -1; | |
28 | static int unpack_limit = 100; | |
29 | static int prefer_ofs_delta = 1; | |
30 | static int no_done; | |
31 | static int deepen_since_ok; | |
32 | static int deepen_not_ok; | |
33 | static int fetch_fsck_objects = -1; | |
34 | static int transfer_fsck_objects = -1; | |
35 | static int agent_supported; | |
36 | static int server_supports_filtering; | |
37 | static struct lock_file shallow_lock; | |
38 | static const char *alternate_shallow_file; | |
39 | static struct strbuf fsck_msg_types = STRBUF_INIT; | |
40 | ||
41 | /* Remember to update object flag allocation in object.h */ | |
42 | #define COMPLETE (1U << 0) | |
43 | #define ALTERNATE (1U << 1) | |
44 | ||
45 | /* | |
46 | * After sending this many "have"s if we do not get any new ACK , we | |
47 | * give up traversing our history. | |
48 | */ | |
49 | #define MAX_IN_VAIN 256 | |
50 | ||
51 | static int multi_ack, use_sideband; | |
52 | /* Allow specifying sha1 if it is a ref tip. */ | |
53 | #define ALLOW_TIP_SHA1 01 | |
54 | /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */ | |
55 | #define ALLOW_REACHABLE_SHA1 02 | |
56 | static unsigned int allow_unadvertised_object_request; | |
57 | ||
58 | __attribute__((format (printf, 2, 3))) | |
59 | static inline void print_verbose(const struct fetch_pack_args *args, | |
60 | const char *fmt, ...) | |
61 | { | |
62 | va_list params; | |
63 | ||
64 | if (!args->verbose) | |
65 | return; | |
66 | ||
67 | va_start(params, fmt); | |
68 | vfprintf(stderr, fmt, params); | |
69 | va_end(params); | |
70 | fputc('\n', stderr); | |
71 | } | |
72 | ||
73 | struct alternate_object_cache { | |
74 | struct object **items; | |
75 | size_t nr, alloc; | |
76 | }; | |
77 | ||
78 | static void cache_one_alternate(const struct object_id *oid, | |
79 | void *vcache) | |
80 | { | |
81 | struct alternate_object_cache *cache = vcache; | |
82 | struct object *obj = parse_object(the_repository, oid); | |
83 | ||
84 | if (!obj || (obj->flags & ALTERNATE)) | |
85 | return; | |
86 | ||
87 | obj->flags |= ALTERNATE; | |
88 | ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc); | |
89 | cache->items[cache->nr++] = obj; | |
90 | } | |
91 | ||
92 | static void for_each_cached_alternate(struct fetch_negotiator *negotiator, | |
93 | void (*cb)(struct fetch_negotiator *, | |
94 | struct object *)) | |
95 | { | |
96 | static int initialized; | |
97 | static struct alternate_object_cache cache; | |
98 | size_t i; | |
99 | ||
100 | if (!initialized) { | |
101 | for_each_alternate_ref(cache_one_alternate, &cache); | |
102 | initialized = 1; | |
103 | } | |
104 | ||
105 | for (i = 0; i < cache.nr; i++) | |
106 | cb(negotiator, cache.items[i]); | |
107 | } | |
108 | ||
109 | static int rev_list_insert_ref(struct fetch_negotiator *negotiator, | |
110 | const char *refname, | |
111 | const struct object_id *oid) | |
112 | { | |
113 | struct object *o = deref_tag(the_repository, | |
114 | parse_object(the_repository, oid), | |
115 | refname, 0); | |
116 | ||
117 | if (o && o->type == OBJ_COMMIT) | |
118 | negotiator->add_tip(negotiator, (struct commit *)o); | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
123 | static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid, | |
124 | int flag, void *cb_data) | |
125 | { | |
126 | return rev_list_insert_ref(cb_data, refname, oid); | |
127 | } | |
128 | ||
129 | enum ack_type { | |
130 | NAK = 0, | |
131 | ACK, | |
132 | ACK_continue, | |
133 | ACK_common, | |
134 | ACK_ready | |
135 | }; | |
136 | ||
137 | static void consume_shallow_list(struct fetch_pack_args *args, | |
138 | struct packet_reader *reader) | |
139 | { | |
140 | if (args->stateless_rpc && args->deepen) { | |
141 | /* If we sent a depth we will get back "duplicate" | |
142 | * shallow and unshallow commands every time there | |
143 | * is a block of have lines exchanged. | |
144 | */ | |
145 | while (packet_reader_read(reader) == PACKET_READ_NORMAL) { | |
146 | if (starts_with(reader->line, "shallow ")) | |
147 | continue; | |
148 | if (starts_with(reader->line, "unshallow ")) | |
149 | continue; | |
150 | die(_("git fetch-pack: expected shallow list")); | |
151 | } | |
152 | if (reader->status != PACKET_READ_FLUSH) | |
153 | die(_("git fetch-pack: expected a flush packet after shallow list")); | |
154 | } | |
155 | } | |
156 | ||
157 | static enum ack_type get_ack(struct packet_reader *reader, | |
158 | struct object_id *result_oid) | |
159 | { | |
160 | int len; | |
161 | const char *arg; | |
162 | ||
163 | if (packet_reader_read(reader) != PACKET_READ_NORMAL) | |
164 | die(_("git fetch-pack: expected ACK/NAK, got a flush packet")); | |
165 | len = reader->pktlen; | |
166 | ||
167 | if (!strcmp(reader->line, "NAK")) | |
168 | return NAK; | |
169 | if (skip_prefix(reader->line, "ACK ", &arg)) { | |
170 | const char *p; | |
171 | if (!parse_oid_hex(arg, result_oid, &p)) { | |
172 | len -= p - reader->line; | |
173 | if (len < 1) | |
174 | return ACK; | |
175 | if (strstr(p, "continue")) | |
176 | return ACK_continue; | |
177 | if (strstr(p, "common")) | |
178 | return ACK_common; | |
179 | if (strstr(p, "ready")) | |
180 | return ACK_ready; | |
181 | return ACK; | |
182 | } | |
183 | } | |
184 | die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line); | |
185 | } | |
186 | ||
187 | static void send_request(struct fetch_pack_args *args, | |
188 | int fd, struct strbuf *buf) | |
189 | { | |
190 | if (args->stateless_rpc) { | |
191 | send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX); | |
192 | packet_flush(fd); | |
193 | } else { | |
194 | if (write_in_full(fd, buf->buf, buf->len) < 0) | |
195 | die_errno(_("unable to write to remote")); | |
196 | } | |
197 | } | |
198 | ||
199 | static void insert_one_alternate_object(struct fetch_negotiator *negotiator, | |
200 | struct object *obj) | |
201 | { | |
202 | rev_list_insert_ref(negotiator, NULL, &obj->oid); | |
203 | } | |
204 | ||
205 | #define INITIAL_FLUSH 16 | |
206 | #define PIPESAFE_FLUSH 32 | |
207 | #define LARGE_FLUSH 16384 | |
208 | ||
209 | static int next_flush(int stateless_rpc, int count) | |
210 | { | |
211 | if (stateless_rpc) { | |
212 | if (count < LARGE_FLUSH) | |
213 | count <<= 1; | |
214 | else | |
215 | count = count * 11 / 10; | |
216 | } else { | |
217 | if (count < PIPESAFE_FLUSH) | |
218 | count <<= 1; | |
219 | else | |
220 | count += PIPESAFE_FLUSH; | |
221 | } | |
222 | return count; | |
223 | } | |
224 | ||
225 | static void mark_tips(struct fetch_negotiator *negotiator, | |
226 | const struct oid_array *negotiation_tips) | |
227 | { | |
228 | int i; | |
229 | ||
230 | if (!negotiation_tips) { | |
231 | for_each_ref(rev_list_insert_ref_oid, negotiator); | |
232 | return; | |
233 | } | |
234 | ||
235 | for (i = 0; i < negotiation_tips->nr; i++) | |
236 | rev_list_insert_ref(negotiator, NULL, | |
237 | &negotiation_tips->oid[i]); | |
238 | return; | |
239 | } | |
240 | ||
241 | static int find_common(struct fetch_negotiator *negotiator, | |
242 | struct fetch_pack_args *args, | |
243 | int fd[2], struct object_id *result_oid, | |
244 | struct ref *refs) | |
245 | { | |
246 | int fetching; | |
247 | int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval; | |
248 | const struct object_id *oid; | |
249 | unsigned in_vain = 0; | |
250 | int got_continue = 0; | |
251 | int got_ready = 0; | |
252 | struct strbuf req_buf = STRBUF_INIT; | |
253 | size_t state_len = 0; | |
254 | struct packet_reader reader; | |
255 | ||
256 | if (args->stateless_rpc && multi_ack == 1) | |
257 | die(_("--stateless-rpc requires multi_ack_detailed")); | |
258 | ||
259 | packet_reader_init(&reader, fd[0], NULL, 0, | |
260 | PACKET_READ_CHOMP_NEWLINE | | |
261 | PACKET_READ_DIE_ON_ERR_PACKET); | |
262 | ||
263 | if (!args->no_dependents) { | |
264 | mark_tips(negotiator, args->negotiation_tips); | |
265 | for_each_cached_alternate(negotiator, insert_one_alternate_object); | |
266 | } | |
267 | ||
268 | fetching = 0; | |
269 | for ( ; refs ; refs = refs->next) { | |
270 | struct object_id *remote = &refs->old_oid; | |
271 | const char *remote_hex; | |
272 | struct object *o; | |
273 | ||
274 | /* | |
275 | * If that object is complete (i.e. it is an ancestor of a | |
276 | * local ref), we tell them we have it but do not have to | |
277 | * tell them about its ancestors, which they already know | |
278 | * about. | |
279 | * | |
280 | * We use lookup_object here because we are only | |
281 | * interested in the case we *know* the object is | |
282 | * reachable and we have already scanned it. | |
283 | * | |
284 | * Do this only if args->no_dependents is false (if it is true, | |
285 | * we cannot trust the object flags). | |
286 | */ | |
287 | if (!args->no_dependents && | |
288 | ((o = lookup_object(the_repository, remote)) != NULL) && | |
289 | (o->flags & COMPLETE)) { | |
290 | continue; | |
291 | } | |
292 | ||
293 | remote_hex = oid_to_hex(remote); | |
294 | if (!fetching) { | |
295 | struct strbuf c = STRBUF_INIT; | |
296 | if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed"); | |
297 | if (multi_ack == 1) strbuf_addstr(&c, " multi_ack"); | |
298 | if (no_done) strbuf_addstr(&c, " no-done"); | |
299 | if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k"); | |
300 | if (use_sideband == 1) strbuf_addstr(&c, " side-band"); | |
301 | if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative"); | |
302 | if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack"); | |
303 | if (args->no_progress) strbuf_addstr(&c, " no-progress"); | |
304 | if (args->include_tag) strbuf_addstr(&c, " include-tag"); | |
305 | if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta"); | |
306 | if (deepen_since_ok) strbuf_addstr(&c, " deepen-since"); | |
307 | if (deepen_not_ok) strbuf_addstr(&c, " deepen-not"); | |
308 | if (agent_supported) strbuf_addf(&c, " agent=%s", | |
309 | git_user_agent_sanitized()); | |
310 | if (args->filter_options.choice) | |
311 | strbuf_addstr(&c, " filter"); | |
312 | packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf); | |
313 | strbuf_release(&c); | |
314 | } else | |
315 | packet_buf_write(&req_buf, "want %s\n", remote_hex); | |
316 | fetching++; | |
317 | } | |
318 | ||
319 | if (!fetching) { | |
320 | strbuf_release(&req_buf); | |
321 | packet_flush(fd[1]); | |
322 | return 1; | |
323 | } | |
324 | ||
325 | if (is_repository_shallow(the_repository)) | |
326 | write_shallow_commits(&req_buf, 1, NULL); | |
327 | if (args->depth > 0) | |
328 | packet_buf_write(&req_buf, "deepen %d", args->depth); | |
329 | if (args->deepen_since) { | |
330 | timestamp_t max_age = approxidate(args->deepen_since); | |
331 | packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age); | |
332 | } | |
333 | if (args->deepen_not) { | |
334 | int i; | |
335 | for (i = 0; i < args->deepen_not->nr; i++) { | |
336 | struct string_list_item *s = args->deepen_not->items + i; | |
337 | packet_buf_write(&req_buf, "deepen-not %s", s->string); | |
338 | } | |
339 | } | |
340 | if (server_supports_filtering && args->filter_options.choice) { | |
341 | const char *spec = | |
342 | expand_list_objects_filter_spec(&args->filter_options); | |
343 | packet_buf_write(&req_buf, "filter %s", spec); | |
344 | } | |
345 | packet_buf_flush(&req_buf); | |
346 | state_len = req_buf.len; | |
347 | ||
348 | if (args->deepen) { | |
349 | const char *arg; | |
350 | struct object_id oid; | |
351 | ||
352 | send_request(args, fd[1], &req_buf); | |
353 | while (packet_reader_read(&reader) == PACKET_READ_NORMAL) { | |
354 | if (skip_prefix(reader.line, "shallow ", &arg)) { | |
355 | if (get_oid_hex(arg, &oid)) | |
356 | die(_("invalid shallow line: %s"), reader.line); | |
357 | register_shallow(the_repository, &oid); | |
358 | continue; | |
359 | } | |
360 | if (skip_prefix(reader.line, "unshallow ", &arg)) { | |
361 | if (get_oid_hex(arg, &oid)) | |
362 | die(_("invalid unshallow line: %s"), reader.line); | |
363 | if (!lookup_object(the_repository, &oid)) | |
364 | die(_("object not found: %s"), reader.line); | |
365 | /* make sure that it is parsed as shallow */ | |
366 | if (!parse_object(the_repository, &oid)) | |
367 | die(_("error in object: %s"), reader.line); | |
368 | if (unregister_shallow(&oid)) | |
369 | die(_("no shallow found: %s"), reader.line); | |
370 | continue; | |
371 | } | |
372 | die(_("expected shallow/unshallow, got %s"), reader.line); | |
373 | } | |
374 | } else if (!args->stateless_rpc) | |
375 | send_request(args, fd[1], &req_buf); | |
376 | ||
377 | if (!args->stateless_rpc) { | |
378 | /* If we aren't using the stateless-rpc interface | |
379 | * we don't need to retain the headers. | |
380 | */ | |
381 | strbuf_setlen(&req_buf, 0); | |
382 | state_len = 0; | |
383 | } | |
384 | ||
385 | flushes = 0; | |
386 | retval = -1; | |
387 | if (args->no_dependents) | |
388 | goto done; | |
389 | while ((oid = negotiator->next(negotiator))) { | |
390 | packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid)); | |
391 | print_verbose(args, "have %s", oid_to_hex(oid)); | |
392 | in_vain++; | |
393 | if (flush_at <= ++count) { | |
394 | int ack; | |
395 | ||
396 | packet_buf_flush(&req_buf); | |
397 | send_request(args, fd[1], &req_buf); | |
398 | strbuf_setlen(&req_buf, state_len); | |
399 | flushes++; | |
400 | flush_at = next_flush(args->stateless_rpc, count); | |
401 | ||
402 | /* | |
403 | * We keep one window "ahead" of the other side, and | |
404 | * will wait for an ACK only on the next one | |
405 | */ | |
406 | if (!args->stateless_rpc && count == INITIAL_FLUSH) | |
407 | continue; | |
408 | ||
409 | consume_shallow_list(args, &reader); | |
410 | do { | |
411 | ack = get_ack(&reader, result_oid); | |
412 | if (ack) | |
413 | print_verbose(args, _("got %s %d %s"), "ack", | |
414 | ack, oid_to_hex(result_oid)); | |
415 | switch (ack) { | |
416 | case ACK: | |
417 | flushes = 0; | |
418 | multi_ack = 0; | |
419 | retval = 0; | |
420 | goto done; | |
421 | case ACK_common: | |
422 | case ACK_ready: | |
423 | case ACK_continue: { | |
424 | struct commit *commit = | |
425 | lookup_commit(the_repository, | |
426 | result_oid); | |
427 | int was_common; | |
428 | ||
429 | if (!commit) | |
430 | die(_("invalid commit %s"), oid_to_hex(result_oid)); | |
431 | was_common = negotiator->ack(negotiator, commit); | |
432 | if (args->stateless_rpc | |
433 | && ack == ACK_common | |
434 | && !was_common) { | |
435 | /* We need to replay the have for this object | |
436 | * on the next RPC request so the peer knows | |
437 | * it is in common with us. | |
438 | */ | |
439 | const char *hex = oid_to_hex(result_oid); | |
440 | packet_buf_write(&req_buf, "have %s\n", hex); | |
441 | state_len = req_buf.len; | |
442 | /* | |
443 | * Reset in_vain because an ack | |
444 | * for this commit has not been | |
445 | * seen. | |
446 | */ | |
447 | in_vain = 0; | |
448 | } else if (!args->stateless_rpc | |
449 | || ack != ACK_common) | |
450 | in_vain = 0; | |
451 | retval = 0; | |
452 | got_continue = 1; | |
453 | if (ack == ACK_ready) | |
454 | got_ready = 1; | |
455 | break; | |
456 | } | |
457 | } | |
458 | } while (ack); | |
459 | flushes--; | |
460 | if (got_continue && MAX_IN_VAIN < in_vain) { | |
461 | print_verbose(args, _("giving up")); | |
462 | break; /* give up */ | |
463 | } | |
464 | if (got_ready) | |
465 | break; | |
466 | } | |
467 | } | |
468 | done: | |
469 | if (!got_ready || !no_done) { | |
470 | packet_buf_write(&req_buf, "done\n"); | |
471 | send_request(args, fd[1], &req_buf); | |
472 | } | |
473 | print_verbose(args, _("done")); | |
474 | if (retval != 0) { | |
475 | multi_ack = 0; | |
476 | flushes++; | |
477 | } | |
478 | strbuf_release(&req_buf); | |
479 | ||
480 | if (!got_ready || !no_done) | |
481 | consume_shallow_list(args, &reader); | |
482 | while (flushes || multi_ack) { | |
483 | int ack = get_ack(&reader, result_oid); | |
484 | if (ack) { | |
485 | print_verbose(args, _("got %s (%d) %s"), "ack", | |
486 | ack, oid_to_hex(result_oid)); | |
487 | if (ack == ACK) | |
488 | return 0; | |
489 | multi_ack = 1; | |
490 | continue; | |
491 | } | |
492 | flushes--; | |
493 | } | |
494 | /* it is no error to fetch into a completely empty repo */ | |
495 | return count ? retval : 0; | |
496 | } | |
497 | ||
498 | static struct commit_list *complete; | |
499 | ||
500 | static int mark_complete(const struct object_id *oid) | |
501 | { | |
502 | struct object *o = parse_object(the_repository, oid); | |
503 | ||
504 | while (o && o->type == OBJ_TAG) { | |
505 | struct tag *t = (struct tag *) o; | |
506 | if (!t->tagged) | |
507 | break; /* broken repository */ | |
508 | o->flags |= COMPLETE; | |
509 | o = parse_object(the_repository, &t->tagged->oid); | |
510 | } | |
511 | if (o && o->type == OBJ_COMMIT) { | |
512 | struct commit *commit = (struct commit *)o; | |
513 | if (!(commit->object.flags & COMPLETE)) { | |
514 | commit->object.flags |= COMPLETE; | |
515 | commit_list_insert(commit, &complete); | |
516 | } | |
517 | } | |
518 | return 0; | |
519 | } | |
520 | ||
521 | static int mark_complete_oid(const char *refname, const struct object_id *oid, | |
522 | int flag, void *cb_data) | |
523 | { | |
524 | return mark_complete(oid); | |
525 | } | |
526 | ||
527 | static void mark_recent_complete_commits(struct fetch_pack_args *args, | |
528 | timestamp_t cutoff) | |
529 | { | |
530 | while (complete && cutoff <= complete->item->date) { | |
531 | print_verbose(args, _("Marking %s as complete"), | |
532 | oid_to_hex(&complete->item->object.oid)); | |
533 | pop_most_recent_commit(&complete, COMPLETE); | |
534 | } | |
535 | } | |
536 | ||
537 | static void add_refs_to_oidset(struct oidset *oids, struct ref *refs) | |
538 | { | |
539 | for (; refs; refs = refs->next) | |
540 | oidset_insert(oids, &refs->old_oid); | |
541 | } | |
542 | ||
543 | static int is_unmatched_ref(const struct ref *ref) | |
544 | { | |
545 | struct object_id oid; | |
546 | const char *p; | |
547 | return ref->match_status == REF_NOT_MATCHED && | |
548 | !parse_oid_hex(ref->name, &oid, &p) && | |
549 | *p == '\0' && | |
550 | oideq(&oid, &ref->old_oid); | |
551 | } | |
552 | ||
553 | static void filter_refs(struct fetch_pack_args *args, | |
554 | struct ref **refs, | |
555 | struct ref **sought, int nr_sought) | |
556 | { | |
557 | struct ref *newlist = NULL; | |
558 | struct ref **newtail = &newlist; | |
559 | struct ref *unmatched = NULL; | |
560 | struct ref *ref, *next; | |
561 | struct oidset tip_oids = OIDSET_INIT; | |
562 | int i; | |
563 | int strict = !(allow_unadvertised_object_request & | |
564 | (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)); | |
565 | ||
566 | i = 0; | |
567 | for (ref = *refs; ref; ref = next) { | |
568 | int keep = 0; | |
569 | next = ref->next; | |
570 | ||
571 | if (starts_with(ref->name, "refs/") && | |
572 | check_refname_format(ref->name, 0)) { | |
573 | /* | |
574 | * trash or a peeled value; do not even add it to | |
575 | * unmatched list | |
576 | */ | |
577 | free_one_ref(ref); | |
578 | continue; | |
579 | } else { | |
580 | while (i < nr_sought) { | |
581 | int cmp = strcmp(ref->name, sought[i]->name); | |
582 | if (cmp < 0) | |
583 | break; /* definitely do not have it */ | |
584 | else if (cmp == 0) { | |
585 | keep = 1; /* definitely have it */ | |
586 | sought[i]->match_status = REF_MATCHED; | |
587 | } | |
588 | i++; | |
589 | } | |
590 | ||
591 | if (!keep && args->fetch_all && | |
592 | (!args->deepen || !starts_with(ref->name, "refs/tags/"))) | |
593 | keep = 1; | |
594 | } | |
595 | ||
596 | if (keep) { | |
597 | *newtail = ref; | |
598 | ref->next = NULL; | |
599 | newtail = &ref->next; | |
600 | } else { | |
601 | ref->next = unmatched; | |
602 | unmatched = ref; | |
603 | } | |
604 | } | |
605 | ||
606 | if (strict) { | |
607 | for (i = 0; i < nr_sought; i++) { | |
608 | ref = sought[i]; | |
609 | if (!is_unmatched_ref(ref)) | |
610 | continue; | |
611 | ||
612 | add_refs_to_oidset(&tip_oids, unmatched); | |
613 | add_refs_to_oidset(&tip_oids, newlist); | |
614 | break; | |
615 | } | |
616 | } | |
617 | ||
618 | /* Append unmatched requests to the list */ | |
619 | for (i = 0; i < nr_sought; i++) { | |
620 | ref = sought[i]; | |
621 | if (!is_unmatched_ref(ref)) | |
622 | continue; | |
623 | ||
624 | if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) { | |
625 | ref->match_status = REF_MATCHED; | |
626 | *newtail = copy_ref(ref); | |
627 | newtail = &(*newtail)->next; | |
628 | } else { | |
629 | ref->match_status = REF_UNADVERTISED_NOT_ALLOWED; | |
630 | } | |
631 | } | |
632 | ||
633 | oidset_clear(&tip_oids); | |
634 | free_refs(unmatched); | |
635 | ||
636 | *refs = newlist; | |
637 | } | |
638 | ||
639 | static void mark_alternate_complete(struct fetch_negotiator *unused, | |
640 | struct object *obj) | |
641 | { | |
642 | mark_complete(&obj->oid); | |
643 | } | |
644 | ||
645 | struct loose_object_iter { | |
646 | struct oidset *loose_object_set; | |
647 | struct ref *refs; | |
648 | }; | |
649 | ||
650 | /* | |
651 | * Mark recent commits available locally and reachable from a local ref as | |
652 | * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as | |
653 | * COMMON_REF (otherwise, we are not planning to participate in negotiation, and | |
654 | * thus do not need COMMON_REF marks). | |
655 | * | |
656 | * The cutoff time for recency is determined by this heuristic: it is the | |
657 | * earliest commit time of the objects in refs that are commits and that we know | |
658 | * the commit time of. | |
659 | */ | |
660 | static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator, | |
661 | struct fetch_pack_args *args, | |
662 | struct ref **refs) | |
663 | { | |
664 | struct ref *ref; | |
665 | int old_save_commit_buffer = save_commit_buffer; | |
666 | timestamp_t cutoff = 0; | |
667 | ||
668 | save_commit_buffer = 0; | |
669 | ||
670 | for (ref = *refs; ref; ref = ref->next) { | |
671 | struct object *o; | |
672 | ||
673 | if (!has_object_file_with_flags(&ref->old_oid, | |
674 | OBJECT_INFO_QUICK)) | |
675 | continue; | |
676 | o = parse_object(the_repository, &ref->old_oid); | |
677 | if (!o) | |
678 | continue; | |
679 | ||
680 | /* We already have it -- which may mean that we were | |
681 | * in sync with the other side at some time after | |
682 | * that (it is OK if we guess wrong here). | |
683 | */ | |
684 | if (o->type == OBJ_COMMIT) { | |
685 | struct commit *commit = (struct commit *)o; | |
686 | if (!cutoff || cutoff < commit->date) | |
687 | cutoff = commit->date; | |
688 | } | |
689 | } | |
690 | ||
691 | if (!args->deepen) { | |
692 | for_each_ref(mark_complete_oid, NULL); | |
693 | for_each_cached_alternate(NULL, mark_alternate_complete); | |
694 | commit_list_sort_by_date(&complete); | |
695 | if (cutoff) | |
696 | mark_recent_complete_commits(args, cutoff); | |
697 | } | |
698 | ||
699 | /* | |
700 | * Mark all complete remote refs as common refs. | |
701 | * Don't mark them common yet; the server has to be told so first. | |
702 | */ | |
703 | for (ref = *refs; ref; ref = ref->next) { | |
704 | struct object *o = deref_tag(the_repository, | |
705 | lookup_object(the_repository, | |
706 | &ref->old_oid), | |
707 | NULL, 0); | |
708 | ||
709 | if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) | |
710 | continue; | |
711 | ||
712 | negotiator->known_common(negotiator, | |
713 | (struct commit *)o); | |
714 | } | |
715 | ||
716 | save_commit_buffer = old_save_commit_buffer; | |
717 | } | |
718 | ||
719 | /* | |
720 | * Returns 1 if every object pointed to by the given remote refs is available | |
721 | * locally and reachable from a local ref, and 0 otherwise. | |
722 | */ | |
723 | static int everything_local(struct fetch_pack_args *args, | |
724 | struct ref **refs) | |
725 | { | |
726 | struct ref *ref; | |
727 | int retval; | |
728 | ||
729 | for (retval = 1, ref = *refs; ref ; ref = ref->next) { | |
730 | const struct object_id *remote = &ref->old_oid; | |
731 | struct object *o; | |
732 | ||
733 | o = lookup_object(the_repository, remote); | |
734 | if (!o || !(o->flags & COMPLETE)) { | |
735 | retval = 0; | |
736 | print_verbose(args, "want %s (%s)", oid_to_hex(remote), | |
737 | ref->name); | |
738 | continue; | |
739 | } | |
740 | print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote), | |
741 | ref->name); | |
742 | } | |
743 | ||
744 | return retval; | |
745 | } | |
746 | ||
747 | static int sideband_demux(int in, int out, void *data) | |
748 | { | |
749 | int *xd = data; | |
750 | int ret; | |
751 | ||
752 | ret = recv_sideband("fetch-pack", xd[0], out); | |
753 | close(out); | |
754 | return ret; | |
755 | } | |
756 | ||
757 | static int get_pack(struct fetch_pack_args *args, | |
758 | int xd[2], char **pack_lockfile) | |
759 | { | |
760 | struct async demux; | |
761 | int do_keep = args->keep_pack; | |
762 | const char *cmd_name; | |
763 | struct pack_header header; | |
764 | int pass_header = 0; | |
765 | struct child_process cmd = CHILD_PROCESS_INIT; | |
766 | int ret; | |
767 | ||
768 | memset(&demux, 0, sizeof(demux)); | |
769 | if (use_sideband) { | |
770 | /* xd[] is talking with upload-pack; subprocess reads from | |
771 | * xd[0], spits out band#2 to stderr, and feeds us band#1 | |
772 | * through demux->out. | |
773 | */ | |
774 | demux.proc = sideband_demux; | |
775 | demux.data = xd; | |
776 | demux.out = -1; | |
777 | demux.isolate_sigpipe = 1; | |
778 | if (start_async(&demux)) | |
779 | die(_("fetch-pack: unable to fork off sideband demultiplexer")); | |
780 | } | |
781 | else | |
782 | demux.out = xd[0]; | |
783 | ||
784 | if (!args->keep_pack && unpack_limit) { | |
785 | ||
786 | if (read_pack_header(demux.out, &header)) | |
787 | die(_("protocol error: bad pack header")); | |
788 | pass_header = 1; | |
789 | if (ntohl(header.hdr_entries) < unpack_limit) | |
790 | do_keep = 0; | |
791 | else | |
792 | do_keep = 1; | |
793 | } | |
794 | ||
795 | if (alternate_shallow_file) { | |
796 | argv_array_push(&cmd.args, "--shallow-file"); | |
797 | argv_array_push(&cmd.args, alternate_shallow_file); | |
798 | } | |
799 | ||
800 | if (do_keep || args->from_promisor) { | |
801 | if (pack_lockfile) | |
802 | cmd.out = -1; | |
803 | cmd_name = "index-pack"; | |
804 | argv_array_push(&cmd.args, cmd_name); | |
805 | argv_array_push(&cmd.args, "--stdin"); | |
806 | if (!args->quiet && !args->no_progress) | |
807 | argv_array_push(&cmd.args, "-v"); | |
808 | if (args->use_thin_pack) | |
809 | argv_array_push(&cmd.args, "--fix-thin"); | |
810 | if (do_keep && (args->lock_pack || unpack_limit)) { | |
811 | char hostname[HOST_NAME_MAX + 1]; | |
812 | if (xgethostname(hostname, sizeof(hostname))) | |
813 | xsnprintf(hostname, sizeof(hostname), "localhost"); | |
814 | argv_array_pushf(&cmd.args, | |
815 | "--keep=fetch-pack %"PRIuMAX " on %s", | |
816 | (uintmax_t)getpid(), hostname); | |
817 | } | |
818 | if (args->check_self_contained_and_connected) | |
819 | argv_array_push(&cmd.args, "--check-self-contained-and-connected"); | |
820 | if (args->from_promisor) | |
821 | argv_array_push(&cmd.args, "--promisor"); | |
822 | } | |
823 | else { | |
824 | cmd_name = "unpack-objects"; | |
825 | argv_array_push(&cmd.args, cmd_name); | |
826 | if (args->quiet || args->no_progress) | |
827 | argv_array_push(&cmd.args, "-q"); | |
828 | args->check_self_contained_and_connected = 0; | |
829 | } | |
830 | ||
831 | if (pass_header) | |
832 | argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32, | |
833 | ntohl(header.hdr_version), | |
834 | ntohl(header.hdr_entries)); | |
835 | if (fetch_fsck_objects >= 0 | |
836 | ? fetch_fsck_objects | |
837 | : transfer_fsck_objects >= 0 | |
838 | ? transfer_fsck_objects | |
839 | : 0) { | |
840 | if (args->from_promisor) | |
841 | /* | |
842 | * We cannot use --strict in index-pack because it | |
843 | * checks both broken objects and links, but we only | |
844 | * want to check for broken objects. | |
845 | */ | |
846 | argv_array_push(&cmd.args, "--fsck-objects"); | |
847 | else | |
848 | argv_array_pushf(&cmd.args, "--strict%s", | |
849 | fsck_msg_types.buf); | |
850 | } | |
851 | ||
852 | cmd.in = demux.out; | |
853 | cmd.git_cmd = 1; | |
854 | if (start_command(&cmd)) | |
855 | die(_("fetch-pack: unable to fork off %s"), cmd_name); | |
856 | if (do_keep && pack_lockfile) { | |
857 | *pack_lockfile = index_pack_lockfile(cmd.out); | |
858 | close(cmd.out); | |
859 | } | |
860 | ||
861 | if (!use_sideband) | |
862 | /* Closed by start_command() */ | |
863 | xd[0] = -1; | |
864 | ||
865 | ret = finish_command(&cmd); | |
866 | if (!ret || (args->check_self_contained_and_connected && ret == 1)) | |
867 | args->self_contained_and_connected = | |
868 | args->check_self_contained_and_connected && | |
869 | ret == 0; | |
870 | else | |
871 | die(_("%s failed"), cmd_name); | |
872 | if (use_sideband && finish_async(&demux)) | |
873 | die(_("error in sideband demultiplexer")); | |
874 | return 0; | |
875 | } | |
876 | ||
877 | static int cmp_ref_by_name(const void *a_, const void *b_) | |
878 | { | |
879 | const struct ref *a = *((const struct ref **)a_); | |
880 | const struct ref *b = *((const struct ref **)b_); | |
881 | return strcmp(a->name, b->name); | |
882 | } | |
883 | ||
884 | static struct ref *do_fetch_pack(struct fetch_pack_args *args, | |
885 | int fd[2], | |
886 | const struct ref *orig_ref, | |
887 | struct ref **sought, int nr_sought, | |
888 | struct shallow_info *si, | |
889 | char **pack_lockfile) | |
890 | { | |
891 | struct repository *r = the_repository; | |
892 | struct ref *ref = copy_ref_list(orig_ref); | |
893 | struct object_id oid; | |
894 | const char *agent_feature; | |
895 | int agent_len; | |
896 | struct fetch_negotiator negotiator; | |
897 | fetch_negotiator_init(r, &negotiator); | |
898 | ||
899 | sort_ref_list(&ref, ref_compare_name); | |
900 | QSORT(sought, nr_sought, cmp_ref_by_name); | |
901 | ||
902 | if ((agent_feature = server_feature_value("agent", &agent_len))) { | |
903 | agent_supported = 1; | |
904 | if (agent_len) | |
905 | print_verbose(args, _("Server version is %.*s"), | |
906 | agent_len, agent_feature); | |
907 | } | |
908 | ||
909 | if (server_supports("shallow")) | |
910 | print_verbose(args, _("Server supports %s"), "shallow"); | |
911 | else if (args->depth > 0 || is_repository_shallow(r)) | |
912 | die(_("Server does not support shallow clients")); | |
913 | if (args->depth > 0 || args->deepen_since || args->deepen_not) | |
914 | args->deepen = 1; | |
915 | if (server_supports("multi_ack_detailed")) { | |
916 | print_verbose(args, _("Server supports %s"), "multi_ack_detailed"); | |
917 | multi_ack = 2; | |
918 | if (server_supports("no-done")) { | |
919 | print_verbose(args, _("Server supports %s"), "no-done"); | |
920 | if (args->stateless_rpc) | |
921 | no_done = 1; | |
922 | } | |
923 | } | |
924 | else if (server_supports("multi_ack")) { | |
925 | print_verbose(args, _("Server supports %s"), "multi_ack"); | |
926 | multi_ack = 1; | |
927 | } | |
928 | if (server_supports("side-band-64k")) { | |
929 | print_verbose(args, _("Server supports %s"), "side-band-64k"); | |
930 | use_sideband = 2; | |
931 | } | |
932 | else if (server_supports("side-band")) { | |
933 | print_verbose(args, _("Server supports %s"), "side-band"); | |
934 | use_sideband = 1; | |
935 | } | |
936 | if (server_supports("allow-tip-sha1-in-want")) { | |
937 | print_verbose(args, _("Server supports %s"), "allow-tip-sha1-in-want"); | |
938 | allow_unadvertised_object_request |= ALLOW_TIP_SHA1; | |
939 | } | |
940 | if (server_supports("allow-reachable-sha1-in-want")) { | |
941 | print_verbose(args, _("Server supports %s"), "allow-reachable-sha1-in-want"); | |
942 | allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1; | |
943 | } | |
944 | if (server_supports("thin-pack")) | |
945 | print_verbose(args, _("Server supports %s"), "thin-pack"); | |
946 | else | |
947 | args->use_thin_pack = 0; | |
948 | if (server_supports("no-progress")) | |
949 | print_verbose(args, _("Server supports %s"), "no-progress"); | |
950 | else | |
951 | args->no_progress = 0; | |
952 | if (server_supports("include-tag")) | |
953 | print_verbose(args, _("Server supports %s"), "include-tag"); | |
954 | else | |
955 | args->include_tag = 0; | |
956 | if (server_supports("ofs-delta")) | |
957 | print_verbose(args, _("Server supports %s"), "ofs-delta"); | |
958 | else | |
959 | prefer_ofs_delta = 0; | |
960 | ||
961 | if (server_supports("filter")) { | |
962 | server_supports_filtering = 1; | |
963 | print_verbose(args, _("Server supports %s"), "filter"); | |
964 | } else if (args->filter_options.choice) { | |
965 | warning("filtering not recognized by server, ignoring"); | |
966 | } | |
967 | ||
968 | if (server_supports("deepen-since")) { | |
969 | print_verbose(args, _("Server supports %s"), "deepen-since"); | |
970 | deepen_since_ok = 1; | |
971 | } else if (args->deepen_since) | |
972 | die(_("Server does not support --shallow-since")); | |
973 | if (server_supports("deepen-not")) { | |
974 | print_verbose(args, _("Server supports %s"), "deepen-not"); | |
975 | deepen_not_ok = 1; | |
976 | } else if (args->deepen_not) | |
977 | die(_("Server does not support --shallow-exclude")); | |
978 | if (server_supports("deepen-relative")) | |
979 | print_verbose(args, _("Server supports %s"), "deepen-relative"); | |
980 | else if (args->deepen_relative) | |
981 | die(_("Server does not support --deepen")); | |
982 | ||
983 | if (!args->no_dependents) { | |
984 | mark_complete_and_common_ref(&negotiator, args, &ref); | |
985 | filter_refs(args, &ref, sought, nr_sought); | |
986 | if (everything_local(args, &ref)) { | |
987 | packet_flush(fd[1]); | |
988 | goto all_done; | |
989 | } | |
990 | } else { | |
991 | filter_refs(args, &ref, sought, nr_sought); | |
992 | } | |
993 | if (find_common(&negotiator, args, fd, &oid, ref) < 0) | |
994 | if (!args->keep_pack) | |
995 | /* When cloning, it is not unusual to have | |
996 | * no common commit. | |
997 | */ | |
998 | warning(_("no common commits")); | |
999 | ||
1000 | if (args->stateless_rpc) | |
1001 | packet_flush(fd[1]); | |
1002 | if (args->deepen) | |
1003 | setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, | |
1004 | NULL); | |
1005 | else if (si->nr_ours || si->nr_theirs) | |
1006 | alternate_shallow_file = setup_temporary_shallow(si->shallow); | |
1007 | else | |
1008 | alternate_shallow_file = NULL; | |
1009 | if (get_pack(args, fd, pack_lockfile)) | |
1010 | die(_("git fetch-pack: fetch failed.")); | |
1011 | ||
1012 | all_done: | |
1013 | negotiator.release(&negotiator); | |
1014 | return ref; | |
1015 | } | |
1016 | ||
1017 | static void add_shallow_requests(struct strbuf *req_buf, | |
1018 | const struct fetch_pack_args *args) | |
1019 | { | |
1020 | if (is_repository_shallow(the_repository)) | |
1021 | write_shallow_commits(req_buf, 1, NULL); | |
1022 | if (args->depth > 0) | |
1023 | packet_buf_write(req_buf, "deepen %d", args->depth); | |
1024 | if (args->deepen_since) { | |
1025 | timestamp_t max_age = approxidate(args->deepen_since); | |
1026 | packet_buf_write(req_buf, "deepen-since %"PRItime, max_age); | |
1027 | } | |
1028 | if (args->deepen_not) { | |
1029 | int i; | |
1030 | for (i = 0; i < args->deepen_not->nr; i++) { | |
1031 | struct string_list_item *s = args->deepen_not->items + i; | |
1032 | packet_buf_write(req_buf, "deepen-not %s", s->string); | |
1033 | } | |
1034 | } | |
1035 | if (args->deepen_relative) | |
1036 | packet_buf_write(req_buf, "deepen-relative\n"); | |
1037 | } | |
1038 | ||
1039 | static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf) | |
1040 | { | |
1041 | int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0); | |
1042 | ||
1043 | for ( ; wants ; wants = wants->next) { | |
1044 | const struct object_id *remote = &wants->old_oid; | |
1045 | struct object *o; | |
1046 | ||
1047 | /* | |
1048 | * If that object is complete (i.e. it is an ancestor of a | |
1049 | * local ref), we tell them we have it but do not have to | |
1050 | * tell them about its ancestors, which they already know | |
1051 | * about. | |
1052 | * | |
1053 | * We use lookup_object here because we are only | |
1054 | * interested in the case we *know* the object is | |
1055 | * reachable and we have already scanned it. | |
1056 | * | |
1057 | * Do this only if args->no_dependents is false (if it is true, | |
1058 | * we cannot trust the object flags). | |
1059 | */ | |
1060 | if (!no_dependents && | |
1061 | ((o = lookup_object(the_repository, remote)) != NULL) && | |
1062 | (o->flags & COMPLETE)) { | |
1063 | continue; | |
1064 | } | |
1065 | ||
1066 | if (!use_ref_in_want || wants->exact_oid) | |
1067 | packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote)); | |
1068 | else | |
1069 | packet_buf_write(req_buf, "want-ref %s\n", wants->name); | |
1070 | } | |
1071 | } | |
1072 | ||
1073 | static void add_common(struct strbuf *req_buf, struct oidset *common) | |
1074 | { | |
1075 | struct oidset_iter iter; | |
1076 | const struct object_id *oid; | |
1077 | oidset_iter_init(common, &iter); | |
1078 | ||
1079 | while ((oid = oidset_iter_next(&iter))) { | |
1080 | packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid)); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | static int add_haves(struct fetch_negotiator *negotiator, | |
1085 | struct strbuf *req_buf, | |
1086 | int *haves_to_send, int *in_vain) | |
1087 | { | |
1088 | int ret = 0; | |
1089 | int haves_added = 0; | |
1090 | const struct object_id *oid; | |
1091 | ||
1092 | while ((oid = negotiator->next(negotiator))) { | |
1093 | packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid)); | |
1094 | if (++haves_added >= *haves_to_send) | |
1095 | break; | |
1096 | } | |
1097 | ||
1098 | *in_vain += haves_added; | |
1099 | if (!haves_added || *in_vain >= MAX_IN_VAIN) { | |
1100 | /* Send Done */ | |
1101 | packet_buf_write(req_buf, "done\n"); | |
1102 | ret = 1; | |
1103 | } | |
1104 | ||
1105 | /* Increase haves to send on next round */ | |
1106 | *haves_to_send = next_flush(1, *haves_to_send); | |
1107 | ||
1108 | return ret; | |
1109 | } | |
1110 | ||
1111 | static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, | |
1112 | struct fetch_pack_args *args, | |
1113 | const struct ref *wants, struct oidset *common, | |
1114 | int *haves_to_send, int *in_vain, | |
1115 | int sideband_all) | |
1116 | { | |
1117 | int ret = 0; | |
1118 | struct strbuf req_buf = STRBUF_INIT; | |
1119 | ||
1120 | if (server_supports_v2("fetch", 1)) | |
1121 | packet_buf_write(&req_buf, "command=fetch"); | |
1122 | if (server_supports_v2("agent", 0)) | |
1123 | packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized()); | |
1124 | if (args->server_options && args->server_options->nr && | |
1125 | server_supports_v2("server-option", 1)) { | |
1126 | int i; | |
1127 | for (i = 0; i < args->server_options->nr; i++) | |
1128 | packet_buf_write(&req_buf, "server-option=%s", | |
1129 | args->server_options->items[i].string); | |
1130 | } | |
1131 | ||
1132 | packet_buf_delim(&req_buf); | |
1133 | if (args->use_thin_pack) | |
1134 | packet_buf_write(&req_buf, "thin-pack"); | |
1135 | if (args->no_progress) | |
1136 | packet_buf_write(&req_buf, "no-progress"); | |
1137 | if (args->include_tag) | |
1138 | packet_buf_write(&req_buf, "include-tag"); | |
1139 | if (prefer_ofs_delta) | |
1140 | packet_buf_write(&req_buf, "ofs-delta"); | |
1141 | if (sideband_all) | |
1142 | packet_buf_write(&req_buf, "sideband-all"); | |
1143 | ||
1144 | /* Add shallow-info and deepen request */ | |
1145 | if (server_supports_feature("fetch", "shallow", 0)) | |
1146 | add_shallow_requests(&req_buf, args); | |
1147 | else if (is_repository_shallow(the_repository) || args->deepen) | |
1148 | die(_("Server does not support shallow requests")); | |
1149 | ||
1150 | /* Add filter */ | |
1151 | if (server_supports_feature("fetch", "filter", 0) && | |
1152 | args->filter_options.choice) { | |
1153 | const char *spec = | |
1154 | expand_list_objects_filter_spec(&args->filter_options); | |
1155 | print_verbose(args, _("Server supports filter")); | |
1156 | packet_buf_write(&req_buf, "filter %s", spec); | |
1157 | } else if (args->filter_options.choice) { | |
1158 | warning("filtering not recognized by server, ignoring"); | |
1159 | } | |
1160 | ||
1161 | /* add wants */ | |
1162 | add_wants(args->no_dependents, wants, &req_buf); | |
1163 | ||
1164 | if (args->no_dependents) { | |
1165 | packet_buf_write(&req_buf, "done"); | |
1166 | ret = 1; | |
1167 | } else { | |
1168 | /* Add all of the common commits we've found in previous rounds */ | |
1169 | add_common(&req_buf, common); | |
1170 | ||
1171 | /* Add initial haves */ | |
1172 | ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain); | |
1173 | } | |
1174 | ||
1175 | /* Send request */ | |
1176 | packet_buf_flush(&req_buf); | |
1177 | if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0) | |
1178 | die_errno(_("unable to write request to remote")); | |
1179 | ||
1180 | strbuf_release(&req_buf); | |
1181 | return ret; | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * Processes a section header in a server's response and checks if it matches | |
1186 | * `section`. If the value of `peek` is 1, the header line will be peeked (and | |
1187 | * not consumed); if 0, the line will be consumed and the function will die if | |
1188 | * the section header doesn't match what was expected. | |
1189 | */ | |
1190 | static int process_section_header(struct packet_reader *reader, | |
1191 | const char *section, int peek) | |
1192 | { | |
1193 | int ret; | |
1194 | ||
1195 | if (packet_reader_peek(reader) != PACKET_READ_NORMAL) | |
1196 | die(_("error reading section header '%s'"), section); | |
1197 | ||
1198 | ret = !strcmp(reader->line, section); | |
1199 | ||
1200 | if (!peek) { | |
1201 | if (!ret) | |
1202 | die(_("expected '%s', received '%s'"), | |
1203 | section, reader->line); | |
1204 | packet_reader_read(reader); | |
1205 | } | |
1206 | ||
1207 | return ret; | |
1208 | } | |
1209 | ||
1210 | static int process_acks(struct fetch_negotiator *negotiator, | |
1211 | struct packet_reader *reader, | |
1212 | struct oidset *common) | |
1213 | { | |
1214 | /* received */ | |
1215 | int received_ready = 0; | |
1216 | int received_ack = 0; | |
1217 | ||
1218 | process_section_header(reader, "acknowledgments", 0); | |
1219 | while (packet_reader_read(reader) == PACKET_READ_NORMAL) { | |
1220 | const char *arg; | |
1221 | ||
1222 | if (!strcmp(reader->line, "NAK")) | |
1223 | continue; | |
1224 | ||
1225 | if (skip_prefix(reader->line, "ACK ", &arg)) { | |
1226 | struct object_id oid; | |
1227 | if (!get_oid_hex(arg, &oid)) { | |
1228 | struct commit *commit; | |
1229 | oidset_insert(common, &oid); | |
1230 | commit = lookup_commit(the_repository, &oid); | |
1231 | negotiator->ack(negotiator, commit); | |
1232 | } | |
1233 | continue; | |
1234 | } | |
1235 | ||
1236 | if (!strcmp(reader->line, "ready")) { | |
1237 | received_ready = 1; | |
1238 | continue; | |
1239 | } | |
1240 | ||
1241 | die(_("unexpected acknowledgment line: '%s'"), reader->line); | |
1242 | } | |
1243 | ||
1244 | if (reader->status != PACKET_READ_FLUSH && | |
1245 | reader->status != PACKET_READ_DELIM) | |
1246 | die(_("error processing acks: %d"), reader->status); | |
1247 | ||
1248 | /* | |
1249 | * If an "acknowledgments" section is sent, a packfile is sent if and | |
1250 | * only if "ready" was sent in this section. The other sections | |
1251 | * ("shallow-info" and "wanted-refs") are sent only if a packfile is | |
1252 | * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH | |
1253 | * otherwise. | |
1254 | */ | |
1255 | if (received_ready && reader->status != PACKET_READ_DELIM) | |
1256 | die(_("expected packfile to be sent after 'ready'")); | |
1257 | if (!received_ready && reader->status != PACKET_READ_FLUSH) | |
1258 | die(_("expected no other sections to be sent after no 'ready'")); | |
1259 | ||
1260 | /* return 0 if no common, 1 if there are common, or 2 if ready */ | |
1261 | return received_ready ? 2 : (received_ack ? 1 : 0); | |
1262 | } | |
1263 | ||
1264 | static void receive_shallow_info(struct fetch_pack_args *args, | |
1265 | struct packet_reader *reader, | |
1266 | struct oid_array *shallows, | |
1267 | struct shallow_info *si) | |
1268 | { | |
1269 | int unshallow_received = 0; | |
1270 | ||
1271 | process_section_header(reader, "shallow-info", 0); | |
1272 | while (packet_reader_read(reader) == PACKET_READ_NORMAL) { | |
1273 | const char *arg; | |
1274 | struct object_id oid; | |
1275 | ||
1276 | if (skip_prefix(reader->line, "shallow ", &arg)) { | |
1277 | if (get_oid_hex(arg, &oid)) | |
1278 | die(_("invalid shallow line: %s"), reader->line); | |
1279 | oid_array_append(shallows, &oid); | |
1280 | continue; | |
1281 | } | |
1282 | if (skip_prefix(reader->line, "unshallow ", &arg)) { | |
1283 | if (get_oid_hex(arg, &oid)) | |
1284 | die(_("invalid unshallow line: %s"), reader->line); | |
1285 | if (!lookup_object(the_repository, &oid)) | |
1286 | die(_("object not found: %s"), reader->line); | |
1287 | /* make sure that it is parsed as shallow */ | |
1288 | if (!parse_object(the_repository, &oid)) | |
1289 | die(_("error in object: %s"), reader->line); | |
1290 | if (unregister_shallow(&oid)) | |
1291 | die(_("no shallow found: %s"), reader->line); | |
1292 | unshallow_received = 1; | |
1293 | continue; | |
1294 | } | |
1295 | die(_("expected shallow/unshallow, got %s"), reader->line); | |
1296 | } | |
1297 | ||
1298 | if (reader->status != PACKET_READ_FLUSH && | |
1299 | reader->status != PACKET_READ_DELIM) | |
1300 | die(_("error processing shallow info: %d"), reader->status); | |
1301 | ||
1302 | if (args->deepen || unshallow_received) { | |
1303 | /* | |
1304 | * Treat these as shallow lines caused by our depth settings. | |
1305 | * In v0, these lines cannot cause refs to be rejected; do the | |
1306 | * same. | |
1307 | */ | |
1308 | int i; | |
1309 | ||
1310 | for (i = 0; i < shallows->nr; i++) | |
1311 | register_shallow(the_repository, &shallows->oid[i]); | |
1312 | setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, | |
1313 | NULL); | |
1314 | args->deepen = 1; | |
1315 | } else if (shallows->nr) { | |
1316 | /* | |
1317 | * Treat these as shallow lines caused by the remote being | |
1318 | * shallow. In v0, remote refs that reach these objects are | |
1319 | * rejected (unless --update-shallow is set); do the same. | |
1320 | */ | |
1321 | prepare_shallow_info(si, shallows); | |
1322 | if (si->nr_ours || si->nr_theirs) | |
1323 | alternate_shallow_file = | |
1324 | setup_temporary_shallow(si->shallow); | |
1325 | else | |
1326 | alternate_shallow_file = NULL; | |
1327 | } else { | |
1328 | alternate_shallow_file = NULL; | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | static int cmp_name_ref(const void *name, const void *ref) | |
1333 | { | |
1334 | return strcmp(name, (*(struct ref **)ref)->name); | |
1335 | } | |
1336 | ||
1337 | static void receive_wanted_refs(struct packet_reader *reader, | |
1338 | struct ref **sought, int nr_sought) | |
1339 | { | |
1340 | process_section_header(reader, "wanted-refs", 0); | |
1341 | while (packet_reader_read(reader) == PACKET_READ_NORMAL) { | |
1342 | struct object_id oid; | |
1343 | const char *end; | |
1344 | struct ref **found; | |
1345 | ||
1346 | if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ') | |
1347 | die(_("expected wanted-ref, got '%s'"), reader->line); | |
1348 | ||
1349 | found = bsearch(end, sought, nr_sought, sizeof(*sought), | |
1350 | cmp_name_ref); | |
1351 | if (!found) | |
1352 | die(_("unexpected wanted-ref: '%s'"), reader->line); | |
1353 | oidcpy(&(*found)->old_oid, &oid); | |
1354 | } | |
1355 | ||
1356 | if (reader->status != PACKET_READ_DELIM) | |
1357 | die(_("error processing wanted refs: %d"), reader->status); | |
1358 | } | |
1359 | ||
1360 | enum fetch_state { | |
1361 | FETCH_CHECK_LOCAL = 0, | |
1362 | FETCH_SEND_REQUEST, | |
1363 | FETCH_PROCESS_ACKS, | |
1364 | FETCH_GET_PACK, | |
1365 | FETCH_DONE, | |
1366 | }; | |
1367 | ||
1368 | static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, | |
1369 | int fd[2], | |
1370 | const struct ref *orig_ref, | |
1371 | struct ref **sought, int nr_sought, | |
1372 | struct oid_array *shallows, | |
1373 | struct shallow_info *si, | |
1374 | char **pack_lockfile) | |
1375 | { | |
1376 | struct repository *r = the_repository; | |
1377 | struct ref *ref = copy_ref_list(orig_ref); | |
1378 | enum fetch_state state = FETCH_CHECK_LOCAL; | |
1379 | struct oidset common = OIDSET_INIT; | |
1380 | struct packet_reader reader; | |
1381 | int in_vain = 0; | |
1382 | int haves_to_send = INITIAL_FLUSH; | |
1383 | struct fetch_negotiator negotiator; | |
1384 | fetch_negotiator_init(r, &negotiator); | |
1385 | packet_reader_init(&reader, fd[0], NULL, 0, | |
1386 | PACKET_READ_CHOMP_NEWLINE | | |
1387 | PACKET_READ_DIE_ON_ERR_PACKET); | |
1388 | if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) && | |
1389 | server_supports_feature("fetch", "sideband-all", 0)) { | |
1390 | reader.use_sideband = 1; | |
1391 | reader.me = "fetch-pack"; | |
1392 | } | |
1393 | ||
1394 | while (state != FETCH_DONE) { | |
1395 | switch (state) { | |
1396 | case FETCH_CHECK_LOCAL: | |
1397 | sort_ref_list(&ref, ref_compare_name); | |
1398 | QSORT(sought, nr_sought, cmp_ref_by_name); | |
1399 | ||
1400 | /* v2 supports these by default */ | |
1401 | allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1; | |
1402 | use_sideband = 2; | |
1403 | if (args->depth > 0 || args->deepen_since || args->deepen_not) | |
1404 | args->deepen = 1; | |
1405 | ||
1406 | /* Filter 'ref' by 'sought' and those that aren't local */ | |
1407 | if (!args->no_dependents) { | |
1408 | mark_complete_and_common_ref(&negotiator, args, &ref); | |
1409 | filter_refs(args, &ref, sought, nr_sought); | |
1410 | if (everything_local(args, &ref)) | |
1411 | state = FETCH_DONE; | |
1412 | else | |
1413 | state = FETCH_SEND_REQUEST; | |
1414 | ||
1415 | mark_tips(&negotiator, args->negotiation_tips); | |
1416 | for_each_cached_alternate(&negotiator, | |
1417 | insert_one_alternate_object); | |
1418 | } else { | |
1419 | filter_refs(args, &ref, sought, nr_sought); | |
1420 | state = FETCH_SEND_REQUEST; | |
1421 | } | |
1422 | break; | |
1423 | case FETCH_SEND_REQUEST: | |
1424 | if (send_fetch_request(&negotiator, fd[1], args, ref, | |
1425 | &common, | |
1426 | &haves_to_send, &in_vain, | |
1427 | reader.use_sideband)) | |
1428 | state = FETCH_GET_PACK; | |
1429 | else | |
1430 | state = FETCH_PROCESS_ACKS; | |
1431 | break; | |
1432 | case FETCH_PROCESS_ACKS: | |
1433 | /* Process ACKs/NAKs */ | |
1434 | switch (process_acks(&negotiator, &reader, &common)) { | |
1435 | case 2: | |
1436 | state = FETCH_GET_PACK; | |
1437 | break; | |
1438 | case 1: | |
1439 | in_vain = 0; | |
1440 | /* fallthrough */ | |
1441 | default: | |
1442 | state = FETCH_SEND_REQUEST; | |
1443 | break; | |
1444 | } | |
1445 | break; | |
1446 | case FETCH_GET_PACK: | |
1447 | /* Check for shallow-info section */ | |
1448 | if (process_section_header(&reader, "shallow-info", 1)) | |
1449 | receive_shallow_info(args, &reader, shallows, si); | |
1450 | ||
1451 | if (process_section_header(&reader, "wanted-refs", 1)) | |
1452 | receive_wanted_refs(&reader, sought, nr_sought); | |
1453 | ||
1454 | /* get the pack */ | |
1455 | process_section_header(&reader, "packfile", 0); | |
1456 | if (get_pack(args, fd, pack_lockfile)) | |
1457 | die(_("git fetch-pack: fetch failed.")); | |
1458 | ||
1459 | state = FETCH_DONE; | |
1460 | break; | |
1461 | case FETCH_DONE: | |
1462 | continue; | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | negotiator.release(&negotiator); | |
1467 | oidset_clear(&common); | |
1468 | return ref; | |
1469 | } | |
1470 | ||
1471 | static int fetch_pack_config_cb(const char *var, const char *value, void *cb) | |
1472 | { | |
1473 | if (strcmp(var, "fetch.fsck.skiplist") == 0) { | |
1474 | const char *path; | |
1475 | ||
1476 | if (git_config_pathname(&path, var, value)) | |
1477 | return 1; | |
1478 | strbuf_addf(&fsck_msg_types, "%cskiplist=%s", | |
1479 | fsck_msg_types.len ? ',' : '=', path); | |
1480 | free((char *)path); | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | if (skip_prefix(var, "fetch.fsck.", &var)) { | |
1485 | if (is_valid_msg_type(var, value)) | |
1486 | strbuf_addf(&fsck_msg_types, "%c%s=%s", | |
1487 | fsck_msg_types.len ? ',' : '=', var, value); | |
1488 | else | |
1489 | warning("Skipping unknown msg id '%s'", var); | |
1490 | return 0; | |
1491 | } | |
1492 | ||
1493 | return git_default_config(var, value, cb); | |
1494 | } | |
1495 | ||
1496 | static void fetch_pack_config(void) | |
1497 | { | |
1498 | git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit); | |
1499 | git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit); | |
1500 | git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta); | |
1501 | git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects); | |
1502 | git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects); | |
1503 | ||
1504 | git_config(fetch_pack_config_cb, NULL); | |
1505 | } | |
1506 | ||
1507 | static void fetch_pack_setup(void) | |
1508 | { | |
1509 | static int did_setup; | |
1510 | if (did_setup) | |
1511 | return; | |
1512 | fetch_pack_config(); | |
1513 | if (0 <= transfer_unpack_limit) | |
1514 | unpack_limit = transfer_unpack_limit; | |
1515 | else if (0 <= fetch_unpack_limit) | |
1516 | unpack_limit = fetch_unpack_limit; | |
1517 | did_setup = 1; | |
1518 | } | |
1519 | ||
1520 | static int remove_duplicates_in_refs(struct ref **ref, int nr) | |
1521 | { | |
1522 | struct string_list names = STRING_LIST_INIT_NODUP; | |
1523 | int src, dst; | |
1524 | ||
1525 | for (src = dst = 0; src < nr; src++) { | |
1526 | struct string_list_item *item; | |
1527 | item = string_list_insert(&names, ref[src]->name); | |
1528 | if (item->util) | |
1529 | continue; /* already have it */ | |
1530 | item->util = ref[src]; | |
1531 | if (src != dst) | |
1532 | ref[dst] = ref[src]; | |
1533 | dst++; | |
1534 | } | |
1535 | for (src = dst; src < nr; src++) | |
1536 | ref[src] = NULL; | |
1537 | string_list_clear(&names, 0); | |
1538 | return dst; | |
1539 | } | |
1540 | ||
1541 | static void update_shallow(struct fetch_pack_args *args, | |
1542 | struct ref **sought, int nr_sought, | |
1543 | struct shallow_info *si) | |
1544 | { | |
1545 | struct oid_array ref = OID_ARRAY_INIT; | |
1546 | int *status; | |
1547 | int i; | |
1548 | ||
1549 | if (args->deepen && alternate_shallow_file) { | |
1550 | if (*alternate_shallow_file == '\0') { /* --unshallow */ | |
1551 | unlink_or_warn(git_path_shallow(the_repository)); | |
1552 | rollback_lock_file(&shallow_lock); | |
1553 | } else | |
1554 | commit_lock_file(&shallow_lock); | |
1555 | alternate_shallow_file = NULL; | |
1556 | return; | |
1557 | } | |
1558 | ||
1559 | if (!si->shallow || !si->shallow->nr) | |
1560 | return; | |
1561 | ||
1562 | if (args->cloning) { | |
1563 | /* | |
1564 | * remote is shallow, but this is a clone, there are | |
1565 | * no objects in repo to worry about. Accept any | |
1566 | * shallow points that exist in the pack (iow in repo | |
1567 | * after get_pack() and reprepare_packed_git()) | |
1568 | */ | |
1569 | struct oid_array extra = OID_ARRAY_INIT; | |
1570 | struct object_id *oid = si->shallow->oid; | |
1571 | for (i = 0; i < si->shallow->nr; i++) | |
1572 | if (has_object_file(&oid[i])) | |
1573 | oid_array_append(&extra, &oid[i]); | |
1574 | if (extra.nr) { | |
1575 | setup_alternate_shallow(&shallow_lock, | |
1576 | &alternate_shallow_file, | |
1577 | &extra); | |
1578 | commit_lock_file(&shallow_lock); | |
1579 | alternate_shallow_file = NULL; | |
1580 | } | |
1581 | oid_array_clear(&extra); | |
1582 | return; | |
1583 | } | |
1584 | ||
1585 | if (!si->nr_ours && !si->nr_theirs) | |
1586 | return; | |
1587 | ||
1588 | remove_nonexistent_theirs_shallow(si); | |
1589 | if (!si->nr_ours && !si->nr_theirs) | |
1590 | return; | |
1591 | for (i = 0; i < nr_sought; i++) | |
1592 | oid_array_append(&ref, &sought[i]->old_oid); | |
1593 | si->ref = &ref; | |
1594 | ||
1595 | if (args->update_shallow) { | |
1596 | /* | |
1597 | * remote is also shallow, .git/shallow may be updated | |
1598 | * so all refs can be accepted. Make sure we only add | |
1599 | * shallow roots that are actually reachable from new | |
1600 | * refs. | |
1601 | */ | |
1602 | struct oid_array extra = OID_ARRAY_INIT; | |
1603 | struct object_id *oid = si->shallow->oid; | |
1604 | assign_shallow_commits_to_refs(si, NULL, NULL); | |
1605 | if (!si->nr_ours && !si->nr_theirs) { | |
1606 | oid_array_clear(&ref); | |
1607 | return; | |
1608 | } | |
1609 | for (i = 0; i < si->nr_ours; i++) | |
1610 | oid_array_append(&extra, &oid[si->ours[i]]); | |
1611 | for (i = 0; i < si->nr_theirs; i++) | |
1612 | oid_array_append(&extra, &oid[si->theirs[i]]); | |
1613 | setup_alternate_shallow(&shallow_lock, | |
1614 | &alternate_shallow_file, | |
1615 | &extra); | |
1616 | commit_lock_file(&shallow_lock); | |
1617 | oid_array_clear(&extra); | |
1618 | oid_array_clear(&ref); | |
1619 | alternate_shallow_file = NULL; | |
1620 | return; | |
1621 | } | |
1622 | ||
1623 | /* | |
1624 | * remote is also shallow, check what ref is safe to update | |
1625 | * without updating .git/shallow | |
1626 | */ | |
1627 | status = xcalloc(nr_sought, sizeof(*status)); | |
1628 | assign_shallow_commits_to_refs(si, NULL, status); | |
1629 | if (si->nr_ours || si->nr_theirs) { | |
1630 | for (i = 0; i < nr_sought; i++) | |
1631 | if (status[i]) | |
1632 | sought[i]->status = REF_STATUS_REJECT_SHALLOW; | |
1633 | } | |
1634 | free(status); | |
1635 | oid_array_clear(&ref); | |
1636 | } | |
1637 | ||
1638 | static int iterate_ref_map(void *cb_data, struct object_id *oid) | |
1639 | { | |
1640 | struct ref **rm = cb_data; | |
1641 | struct ref *ref = *rm; | |
1642 | ||
1643 | if (!ref) | |
1644 | return -1; /* end of the list */ | |
1645 | *rm = ref->next; | |
1646 | oidcpy(oid, &ref->old_oid); | |
1647 | return 0; | |
1648 | } | |
1649 | ||
1650 | struct ref *fetch_pack(struct fetch_pack_args *args, | |
1651 | int fd[], | |
1652 | const struct ref *ref, | |
1653 | struct ref **sought, int nr_sought, | |
1654 | struct oid_array *shallow, | |
1655 | char **pack_lockfile, | |
1656 | enum protocol_version version) | |
1657 | { | |
1658 | struct ref *ref_cpy; | |
1659 | struct shallow_info si; | |
1660 | struct oid_array shallows_scratch = OID_ARRAY_INIT; | |
1661 | ||
1662 | fetch_pack_setup(); | |
1663 | if (nr_sought) | |
1664 | nr_sought = remove_duplicates_in_refs(sought, nr_sought); | |
1665 | ||
1666 | if (args->no_dependents && !args->filter_options.choice) { | |
1667 | /* | |
1668 | * The protocol does not support requesting that only the | |
1669 | * wanted objects be sent, so approximate this by setting a | |
1670 | * "blob:none" filter if no filter is already set. This works | |
1671 | * for all object types: note that wanted blobs will still be | |
1672 | * sent because they are directly specified as a "want". | |
1673 | * | |
1674 | * NEEDSWORK: Add an option in the protocol to request that | |
1675 | * only the wanted objects be sent, and implement it. | |
1676 | */ | |
1677 | parse_list_objects_filter(&args->filter_options, "blob:none"); | |
1678 | } | |
1679 | ||
1680 | if (version != protocol_v2 && !ref) { | |
1681 | packet_flush(fd[1]); | |
1682 | die(_("no matching remote head")); | |
1683 | } | |
1684 | if (version == protocol_v2) { | |
1685 | if (shallow->nr) | |
1686 | BUG("Protocol V2 does not provide shallows at this point in the fetch"); | |
1687 | memset(&si, 0, sizeof(si)); | |
1688 | ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought, | |
1689 | &shallows_scratch, &si, | |
1690 | pack_lockfile); | |
1691 | } else { | |
1692 | prepare_shallow_info(&si, shallow); | |
1693 | ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought, | |
1694 | &si, pack_lockfile); | |
1695 | } | |
1696 | reprepare_packed_git(the_repository); | |
1697 | ||
1698 | if (!args->cloning && args->deepen) { | |
1699 | struct check_connected_options opt = CHECK_CONNECTED_INIT; | |
1700 | struct ref *iterator = ref_cpy; | |
1701 | opt.shallow_file = alternate_shallow_file; | |
1702 | if (args->deepen) | |
1703 | opt.is_deepening_fetch = 1; | |
1704 | if (check_connected(iterate_ref_map, &iterator, &opt)) { | |
1705 | error(_("remote did not send all necessary objects")); | |
1706 | free_refs(ref_cpy); | |
1707 | ref_cpy = NULL; | |
1708 | rollback_lock_file(&shallow_lock); | |
1709 | goto cleanup; | |
1710 | } | |
1711 | args->connectivity_checked = 1; | |
1712 | } | |
1713 | ||
1714 | update_shallow(args, sought, nr_sought, &si); | |
1715 | cleanup: | |
1716 | clear_shallow_info(&si); | |
1717 | oid_array_clear(&shallows_scratch); | |
1718 | return ref_cpy; | |
1719 | } | |
1720 | ||
1721 | int report_unmatched_refs(struct ref **sought, int nr_sought) | |
1722 | { | |
1723 | int i, ret = 0; | |
1724 | ||
1725 | for (i = 0; i < nr_sought; i++) { | |
1726 | if (!sought[i]) | |
1727 | continue; | |
1728 | switch (sought[i]->match_status) { | |
1729 | case REF_MATCHED: | |
1730 | continue; | |
1731 | case REF_NOT_MATCHED: | |
1732 | error(_("no such remote ref %s"), sought[i]->name); | |
1733 | break; | |
1734 | case REF_UNADVERTISED_NOT_ALLOWED: | |
1735 | error(_("Server does not allow request for unadvertised object %s"), | |
1736 | sought[i]->name); | |
1737 | break; | |
1738 | } | |
1739 | ret = 1; | |
1740 | } | |
1741 | return ret; | |
1742 | } |