]> git.ipfire.org Git - thirdparty/git.git/blob - fetch-pack.c
Merge branch 'js/rebase-r-strategy'
[thirdparty/git.git] / fetch-pack.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
21 #include "object-store.h"
22 #include "connected.h"
23 #include "fetch-negotiator.h"
24 #include "fsck.h"
25
26 static int transfer_unpack_limit = -1;
27 static int fetch_unpack_limit = -1;
28 static int unpack_limit = 100;
29 static int prefer_ofs_delta = 1;
30 static int no_done;
31 static int deepen_since_ok;
32 static int deepen_not_ok;
33 static int fetch_fsck_objects = -1;
34 static int transfer_fsck_objects = -1;
35 static int agent_supported;
36 static int server_supports_filtering;
37 static struct lock_file shallow_lock;
38 static const char *alternate_shallow_file;
39 static struct strbuf fsck_msg_types = STRBUF_INIT;
40
41 /* Remember to update object flag allocation in object.h */
42 #define COMPLETE (1U << 0)
43 #define ALTERNATE (1U << 1)
44
45 /*
46 * After sending this many "have"s if we do not get any new ACK , we
47 * give up traversing our history.
48 */
49 #define MAX_IN_VAIN 256
50
51 static int multi_ack, use_sideband;
52 /* Allow specifying sha1 if it is a ref tip. */
53 #define ALLOW_TIP_SHA1 01
54 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
55 #define ALLOW_REACHABLE_SHA1 02
56 static unsigned int allow_unadvertised_object_request;
57
58 __attribute__((format (printf, 2, 3)))
59 static inline void print_verbose(const struct fetch_pack_args *args,
60 const char *fmt, ...)
61 {
62 va_list params;
63
64 if (!args->verbose)
65 return;
66
67 va_start(params, fmt);
68 vfprintf(stderr, fmt, params);
69 va_end(params);
70 fputc('\n', stderr);
71 }
72
73 struct alternate_object_cache {
74 struct object **items;
75 size_t nr, alloc;
76 };
77
78 static void cache_one_alternate(const struct object_id *oid,
79 void *vcache)
80 {
81 struct alternate_object_cache *cache = vcache;
82 struct object *obj = parse_object(the_repository, oid);
83
84 if (!obj || (obj->flags & ALTERNATE))
85 return;
86
87 obj->flags |= ALTERNATE;
88 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
89 cache->items[cache->nr++] = obj;
90 }
91
92 static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
93 void (*cb)(struct fetch_negotiator *,
94 struct object *))
95 {
96 static int initialized;
97 static struct alternate_object_cache cache;
98 size_t i;
99
100 if (!initialized) {
101 for_each_alternate_ref(cache_one_alternate, &cache);
102 initialized = 1;
103 }
104
105 for (i = 0; i < cache.nr; i++)
106 cb(negotiator, cache.items[i]);
107 }
108
109 static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
110 const char *refname,
111 const struct object_id *oid)
112 {
113 struct object *o = deref_tag(the_repository,
114 parse_object(the_repository, oid),
115 refname, 0);
116
117 if (o && o->type == OBJ_COMMIT)
118 negotiator->add_tip(negotiator, (struct commit *)o);
119
120 return 0;
121 }
122
123 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
124 int flag, void *cb_data)
125 {
126 return rev_list_insert_ref(cb_data, refname, oid);
127 }
128
129 enum ack_type {
130 NAK = 0,
131 ACK,
132 ACK_continue,
133 ACK_common,
134 ACK_ready
135 };
136
137 static void consume_shallow_list(struct fetch_pack_args *args,
138 struct packet_reader *reader)
139 {
140 if (args->stateless_rpc && args->deepen) {
141 /* If we sent a depth we will get back "duplicate"
142 * shallow and unshallow commands every time there
143 * is a block of have lines exchanged.
144 */
145 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
146 if (starts_with(reader->line, "shallow "))
147 continue;
148 if (starts_with(reader->line, "unshallow "))
149 continue;
150 die(_("git fetch-pack: expected shallow list"));
151 }
152 if (reader->status != PACKET_READ_FLUSH)
153 die(_("git fetch-pack: expected a flush packet after shallow list"));
154 }
155 }
156
157 static enum ack_type get_ack(struct packet_reader *reader,
158 struct object_id *result_oid)
159 {
160 int len;
161 const char *arg;
162
163 if (packet_reader_read(reader) != PACKET_READ_NORMAL)
164 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
165 len = reader->pktlen;
166
167 if (!strcmp(reader->line, "NAK"))
168 return NAK;
169 if (skip_prefix(reader->line, "ACK ", &arg)) {
170 if (!get_oid_hex(arg, result_oid)) {
171 arg += 40;
172 len -= arg - reader->line;
173 if (len < 1)
174 return ACK;
175 if (strstr(arg, "continue"))
176 return ACK_continue;
177 if (strstr(arg, "common"))
178 return ACK_common;
179 if (strstr(arg, "ready"))
180 return ACK_ready;
181 return ACK;
182 }
183 }
184 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line);
185 }
186
187 static void send_request(struct fetch_pack_args *args,
188 int fd, struct strbuf *buf)
189 {
190 if (args->stateless_rpc) {
191 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
192 packet_flush(fd);
193 } else {
194 if (write_in_full(fd, buf->buf, buf->len) < 0)
195 die_errno(_("unable to write to remote"));
196 }
197 }
198
199 static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
200 struct object *obj)
201 {
202 rev_list_insert_ref(negotiator, NULL, &obj->oid);
203 }
204
205 #define INITIAL_FLUSH 16
206 #define PIPESAFE_FLUSH 32
207 #define LARGE_FLUSH 16384
208
209 static int next_flush(int stateless_rpc, int count)
210 {
211 if (stateless_rpc) {
212 if (count < LARGE_FLUSH)
213 count <<= 1;
214 else
215 count = count * 11 / 10;
216 } else {
217 if (count < PIPESAFE_FLUSH)
218 count <<= 1;
219 else
220 count += PIPESAFE_FLUSH;
221 }
222 return count;
223 }
224
225 static void mark_tips(struct fetch_negotiator *negotiator,
226 const struct oid_array *negotiation_tips)
227 {
228 int i;
229
230 if (!negotiation_tips) {
231 for_each_ref(rev_list_insert_ref_oid, negotiator);
232 return;
233 }
234
235 for (i = 0; i < negotiation_tips->nr; i++)
236 rev_list_insert_ref(negotiator, NULL,
237 &negotiation_tips->oid[i]);
238 return;
239 }
240
241 static int find_common(struct fetch_negotiator *negotiator,
242 struct fetch_pack_args *args,
243 int fd[2], struct object_id *result_oid,
244 struct ref *refs)
245 {
246 int fetching;
247 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
248 const struct object_id *oid;
249 unsigned in_vain = 0;
250 int got_continue = 0;
251 int got_ready = 0;
252 struct strbuf req_buf = STRBUF_INIT;
253 size_t state_len = 0;
254 struct packet_reader reader;
255
256 if (args->stateless_rpc && multi_ack == 1)
257 die(_("--stateless-rpc requires multi_ack_detailed"));
258
259 packet_reader_init(&reader, fd[0], NULL, 0,
260 PACKET_READ_CHOMP_NEWLINE |
261 PACKET_READ_DIE_ON_ERR_PACKET);
262
263 if (!args->no_dependents) {
264 mark_tips(negotiator, args->negotiation_tips);
265 for_each_cached_alternate(negotiator, insert_one_alternate_object);
266 }
267
268 fetching = 0;
269 for ( ; refs ; refs = refs->next) {
270 struct object_id *remote = &refs->old_oid;
271 const char *remote_hex;
272 struct object *o;
273
274 /*
275 * If that object is complete (i.e. it is an ancestor of a
276 * local ref), we tell them we have it but do not have to
277 * tell them about its ancestors, which they already know
278 * about.
279 *
280 * We use lookup_object here because we are only
281 * interested in the case we *know* the object is
282 * reachable and we have already scanned it.
283 *
284 * Do this only if args->no_dependents is false (if it is true,
285 * we cannot trust the object flags).
286 */
287 if (!args->no_dependents &&
288 ((o = lookup_object(the_repository, remote)) != NULL) &&
289 (o->flags & COMPLETE)) {
290 continue;
291 }
292
293 remote_hex = oid_to_hex(remote);
294 if (!fetching) {
295 struct strbuf c = STRBUF_INIT;
296 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
297 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
298 if (no_done) strbuf_addstr(&c, " no-done");
299 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
300 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
301 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
302 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
303 if (args->no_progress) strbuf_addstr(&c, " no-progress");
304 if (args->include_tag) strbuf_addstr(&c, " include-tag");
305 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
306 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
307 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
308 if (agent_supported) strbuf_addf(&c, " agent=%s",
309 git_user_agent_sanitized());
310 if (args->filter_options.choice)
311 strbuf_addstr(&c, " filter");
312 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
313 strbuf_release(&c);
314 } else
315 packet_buf_write(&req_buf, "want %s\n", remote_hex);
316 fetching++;
317 }
318
319 if (!fetching) {
320 strbuf_release(&req_buf);
321 packet_flush(fd[1]);
322 return 1;
323 }
324
325 if (is_repository_shallow(the_repository))
326 write_shallow_commits(&req_buf, 1, NULL);
327 if (args->depth > 0)
328 packet_buf_write(&req_buf, "deepen %d", args->depth);
329 if (args->deepen_since) {
330 timestamp_t max_age = approxidate(args->deepen_since);
331 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
332 }
333 if (args->deepen_not) {
334 int i;
335 for (i = 0; i < args->deepen_not->nr; i++) {
336 struct string_list_item *s = args->deepen_not->items + i;
337 packet_buf_write(&req_buf, "deepen-not %s", s->string);
338 }
339 }
340 if (server_supports_filtering && args->filter_options.choice) {
341 struct strbuf expanded_filter_spec = STRBUF_INIT;
342 expand_list_objects_filter_spec(&args->filter_options,
343 &expanded_filter_spec);
344 packet_buf_write(&req_buf, "filter %s",
345 expanded_filter_spec.buf);
346 strbuf_release(&expanded_filter_spec);
347 }
348 packet_buf_flush(&req_buf);
349 state_len = req_buf.len;
350
351 if (args->deepen) {
352 const char *arg;
353 struct object_id oid;
354
355 send_request(args, fd[1], &req_buf);
356 while (packet_reader_read(&reader) == PACKET_READ_NORMAL) {
357 if (skip_prefix(reader.line, "shallow ", &arg)) {
358 if (get_oid_hex(arg, &oid))
359 die(_("invalid shallow line: %s"), reader.line);
360 register_shallow(the_repository, &oid);
361 continue;
362 }
363 if (skip_prefix(reader.line, "unshallow ", &arg)) {
364 if (get_oid_hex(arg, &oid))
365 die(_("invalid unshallow line: %s"), reader.line);
366 if (!lookup_object(the_repository, &oid))
367 die(_("object not found: %s"), reader.line);
368 /* make sure that it is parsed as shallow */
369 if (!parse_object(the_repository, &oid))
370 die(_("error in object: %s"), reader.line);
371 if (unregister_shallow(&oid))
372 die(_("no shallow found: %s"), reader.line);
373 continue;
374 }
375 die(_("expected shallow/unshallow, got %s"), reader.line);
376 }
377 } else if (!args->stateless_rpc)
378 send_request(args, fd[1], &req_buf);
379
380 if (!args->stateless_rpc) {
381 /* If we aren't using the stateless-rpc interface
382 * we don't need to retain the headers.
383 */
384 strbuf_setlen(&req_buf, 0);
385 state_len = 0;
386 }
387
388 flushes = 0;
389 retval = -1;
390 if (args->no_dependents)
391 goto done;
392 while ((oid = negotiator->next(negotiator))) {
393 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
394 print_verbose(args, "have %s", oid_to_hex(oid));
395 in_vain++;
396 if (flush_at <= ++count) {
397 int ack;
398
399 packet_buf_flush(&req_buf);
400 send_request(args, fd[1], &req_buf);
401 strbuf_setlen(&req_buf, state_len);
402 flushes++;
403 flush_at = next_flush(args->stateless_rpc, count);
404
405 /*
406 * We keep one window "ahead" of the other side, and
407 * will wait for an ACK only on the next one
408 */
409 if (!args->stateless_rpc && count == INITIAL_FLUSH)
410 continue;
411
412 consume_shallow_list(args, &reader);
413 do {
414 ack = get_ack(&reader, result_oid);
415 if (ack)
416 print_verbose(args, _("got %s %d %s"), "ack",
417 ack, oid_to_hex(result_oid));
418 switch (ack) {
419 case ACK:
420 flushes = 0;
421 multi_ack = 0;
422 retval = 0;
423 goto done;
424 case ACK_common:
425 case ACK_ready:
426 case ACK_continue: {
427 struct commit *commit =
428 lookup_commit(the_repository,
429 result_oid);
430 int was_common;
431
432 if (!commit)
433 die(_("invalid commit %s"), oid_to_hex(result_oid));
434 was_common = negotiator->ack(negotiator, commit);
435 if (args->stateless_rpc
436 && ack == ACK_common
437 && !was_common) {
438 /* We need to replay the have for this object
439 * on the next RPC request so the peer knows
440 * it is in common with us.
441 */
442 const char *hex = oid_to_hex(result_oid);
443 packet_buf_write(&req_buf, "have %s\n", hex);
444 state_len = req_buf.len;
445 /*
446 * Reset in_vain because an ack
447 * for this commit has not been
448 * seen.
449 */
450 in_vain = 0;
451 } else if (!args->stateless_rpc
452 || ack != ACK_common)
453 in_vain = 0;
454 retval = 0;
455 got_continue = 1;
456 if (ack == ACK_ready)
457 got_ready = 1;
458 break;
459 }
460 }
461 } while (ack);
462 flushes--;
463 if (got_continue && MAX_IN_VAIN < in_vain) {
464 print_verbose(args, _("giving up"));
465 break; /* give up */
466 }
467 if (got_ready)
468 break;
469 }
470 }
471 done:
472 if (!got_ready || !no_done) {
473 packet_buf_write(&req_buf, "done\n");
474 send_request(args, fd[1], &req_buf);
475 }
476 print_verbose(args, _("done"));
477 if (retval != 0) {
478 multi_ack = 0;
479 flushes++;
480 }
481 strbuf_release(&req_buf);
482
483 if (!got_ready || !no_done)
484 consume_shallow_list(args, &reader);
485 while (flushes || multi_ack) {
486 int ack = get_ack(&reader, result_oid);
487 if (ack) {
488 print_verbose(args, _("got %s (%d) %s"), "ack",
489 ack, oid_to_hex(result_oid));
490 if (ack == ACK)
491 return 0;
492 multi_ack = 1;
493 continue;
494 }
495 flushes--;
496 }
497 /* it is no error to fetch into a completely empty repo */
498 return count ? retval : 0;
499 }
500
501 static struct commit_list *complete;
502
503 static int mark_complete(const struct object_id *oid)
504 {
505 struct object *o = parse_object(the_repository, oid);
506
507 while (o && o->type == OBJ_TAG) {
508 struct tag *t = (struct tag *) o;
509 if (!t->tagged)
510 break; /* broken repository */
511 o->flags |= COMPLETE;
512 o = parse_object(the_repository, &t->tagged->oid);
513 }
514 if (o && o->type == OBJ_COMMIT) {
515 struct commit *commit = (struct commit *)o;
516 if (!(commit->object.flags & COMPLETE)) {
517 commit->object.flags |= COMPLETE;
518 commit_list_insert(commit, &complete);
519 }
520 }
521 return 0;
522 }
523
524 static int mark_complete_oid(const char *refname, const struct object_id *oid,
525 int flag, void *cb_data)
526 {
527 return mark_complete(oid);
528 }
529
530 static void mark_recent_complete_commits(struct fetch_pack_args *args,
531 timestamp_t cutoff)
532 {
533 while (complete && cutoff <= complete->item->date) {
534 print_verbose(args, _("Marking %s as complete"),
535 oid_to_hex(&complete->item->object.oid));
536 pop_most_recent_commit(&complete, COMPLETE);
537 }
538 }
539
540 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
541 {
542 for (; refs; refs = refs->next)
543 oidset_insert(oids, &refs->old_oid);
544 }
545
546 static int is_unmatched_ref(const struct ref *ref)
547 {
548 struct object_id oid;
549 const char *p;
550 return ref->match_status == REF_NOT_MATCHED &&
551 !parse_oid_hex(ref->name, &oid, &p) &&
552 *p == '\0' &&
553 oideq(&oid, &ref->old_oid);
554 }
555
556 static void filter_refs(struct fetch_pack_args *args,
557 struct ref **refs,
558 struct ref **sought, int nr_sought)
559 {
560 struct ref *newlist = NULL;
561 struct ref **newtail = &newlist;
562 struct ref *unmatched = NULL;
563 struct ref *ref, *next;
564 struct oidset tip_oids = OIDSET_INIT;
565 int i;
566 int strict = !(allow_unadvertised_object_request &
567 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
568
569 i = 0;
570 for (ref = *refs; ref; ref = next) {
571 int keep = 0;
572 next = ref->next;
573
574 if (starts_with(ref->name, "refs/") &&
575 check_refname_format(ref->name, 0)) {
576 /*
577 * trash or a peeled value; do not even add it to
578 * unmatched list
579 */
580 free_one_ref(ref);
581 continue;
582 } else {
583 while (i < nr_sought) {
584 int cmp = strcmp(ref->name, sought[i]->name);
585 if (cmp < 0)
586 break; /* definitely do not have it */
587 else if (cmp == 0) {
588 keep = 1; /* definitely have it */
589 sought[i]->match_status = REF_MATCHED;
590 }
591 i++;
592 }
593
594 if (!keep && args->fetch_all &&
595 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
596 keep = 1;
597 }
598
599 if (keep) {
600 *newtail = ref;
601 ref->next = NULL;
602 newtail = &ref->next;
603 } else {
604 ref->next = unmatched;
605 unmatched = ref;
606 }
607 }
608
609 if (strict) {
610 for (i = 0; i < nr_sought; i++) {
611 ref = sought[i];
612 if (!is_unmatched_ref(ref))
613 continue;
614
615 add_refs_to_oidset(&tip_oids, unmatched);
616 add_refs_to_oidset(&tip_oids, newlist);
617 break;
618 }
619 }
620
621 /* Append unmatched requests to the list */
622 for (i = 0; i < nr_sought; i++) {
623 ref = sought[i];
624 if (!is_unmatched_ref(ref))
625 continue;
626
627 if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
628 ref->match_status = REF_MATCHED;
629 *newtail = copy_ref(ref);
630 newtail = &(*newtail)->next;
631 } else {
632 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
633 }
634 }
635
636 oidset_clear(&tip_oids);
637 free_refs(unmatched);
638
639 *refs = newlist;
640 }
641
642 static void mark_alternate_complete(struct fetch_negotiator *unused,
643 struct object *obj)
644 {
645 mark_complete(&obj->oid);
646 }
647
648 struct loose_object_iter {
649 struct oidset *loose_object_set;
650 struct ref *refs;
651 };
652
653 /*
654 * Mark recent commits available locally and reachable from a local ref as
655 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
656 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
657 * thus do not need COMMON_REF marks).
658 *
659 * The cutoff time for recency is determined by this heuristic: it is the
660 * earliest commit time of the objects in refs that are commits and that we know
661 * the commit time of.
662 */
663 static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
664 struct fetch_pack_args *args,
665 struct ref **refs)
666 {
667 struct ref *ref;
668 int old_save_commit_buffer = save_commit_buffer;
669 timestamp_t cutoff = 0;
670
671 save_commit_buffer = 0;
672
673 for (ref = *refs; ref; ref = ref->next) {
674 struct object *o;
675
676 if (!has_object_file_with_flags(&ref->old_oid,
677 OBJECT_INFO_QUICK))
678 continue;
679 o = parse_object(the_repository, &ref->old_oid);
680 if (!o)
681 continue;
682
683 /* We already have it -- which may mean that we were
684 * in sync with the other side at some time after
685 * that (it is OK if we guess wrong here).
686 */
687 if (o->type == OBJ_COMMIT) {
688 struct commit *commit = (struct commit *)o;
689 if (!cutoff || cutoff < commit->date)
690 cutoff = commit->date;
691 }
692 }
693
694 if (!args->deepen) {
695 for_each_ref(mark_complete_oid, NULL);
696 for_each_cached_alternate(NULL, mark_alternate_complete);
697 commit_list_sort_by_date(&complete);
698 if (cutoff)
699 mark_recent_complete_commits(args, cutoff);
700 }
701
702 /*
703 * Mark all complete remote refs as common refs.
704 * Don't mark them common yet; the server has to be told so first.
705 */
706 for (ref = *refs; ref; ref = ref->next) {
707 struct object *o = deref_tag(the_repository,
708 lookup_object(the_repository,
709 &ref->old_oid),
710 NULL, 0);
711
712 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
713 continue;
714
715 negotiator->known_common(negotiator,
716 (struct commit *)o);
717 }
718
719 save_commit_buffer = old_save_commit_buffer;
720 }
721
722 /*
723 * Returns 1 if every object pointed to by the given remote refs is available
724 * locally and reachable from a local ref, and 0 otherwise.
725 */
726 static int everything_local(struct fetch_pack_args *args,
727 struct ref **refs)
728 {
729 struct ref *ref;
730 int retval;
731
732 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
733 const struct object_id *remote = &ref->old_oid;
734 struct object *o;
735
736 o = lookup_object(the_repository, remote);
737 if (!o || !(o->flags & COMPLETE)) {
738 retval = 0;
739 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
740 ref->name);
741 continue;
742 }
743 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
744 ref->name);
745 }
746
747 return retval;
748 }
749
750 static int sideband_demux(int in, int out, void *data)
751 {
752 int *xd = data;
753 int ret;
754
755 ret = recv_sideband("fetch-pack", xd[0], out);
756 close(out);
757 return ret;
758 }
759
760 static int get_pack(struct fetch_pack_args *args,
761 int xd[2], char **pack_lockfile)
762 {
763 struct async demux;
764 int do_keep = args->keep_pack;
765 const char *cmd_name;
766 struct pack_header header;
767 int pass_header = 0;
768 struct child_process cmd = CHILD_PROCESS_INIT;
769 int ret;
770
771 memset(&demux, 0, sizeof(demux));
772 if (use_sideband) {
773 /* xd[] is talking with upload-pack; subprocess reads from
774 * xd[0], spits out band#2 to stderr, and feeds us band#1
775 * through demux->out.
776 */
777 demux.proc = sideband_demux;
778 demux.data = xd;
779 demux.out = -1;
780 demux.isolate_sigpipe = 1;
781 if (start_async(&demux))
782 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
783 }
784 else
785 demux.out = xd[0];
786
787 if (!args->keep_pack && unpack_limit) {
788
789 if (read_pack_header(demux.out, &header))
790 die(_("protocol error: bad pack header"));
791 pass_header = 1;
792 if (ntohl(header.hdr_entries) < unpack_limit)
793 do_keep = 0;
794 else
795 do_keep = 1;
796 }
797
798 if (alternate_shallow_file) {
799 argv_array_push(&cmd.args, "--shallow-file");
800 argv_array_push(&cmd.args, alternate_shallow_file);
801 }
802
803 if (do_keep || args->from_promisor) {
804 if (pack_lockfile)
805 cmd.out = -1;
806 cmd_name = "index-pack";
807 argv_array_push(&cmd.args, cmd_name);
808 argv_array_push(&cmd.args, "--stdin");
809 if (!args->quiet && !args->no_progress)
810 argv_array_push(&cmd.args, "-v");
811 if (args->use_thin_pack)
812 argv_array_push(&cmd.args, "--fix-thin");
813 if (do_keep && (args->lock_pack || unpack_limit)) {
814 char hostname[HOST_NAME_MAX + 1];
815 if (xgethostname(hostname, sizeof(hostname)))
816 xsnprintf(hostname, sizeof(hostname), "localhost");
817 argv_array_pushf(&cmd.args,
818 "--keep=fetch-pack %"PRIuMAX " on %s",
819 (uintmax_t)getpid(), hostname);
820 }
821 if (args->check_self_contained_and_connected)
822 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
823 if (args->from_promisor)
824 argv_array_push(&cmd.args, "--promisor");
825 }
826 else {
827 cmd_name = "unpack-objects";
828 argv_array_push(&cmd.args, cmd_name);
829 if (args->quiet || args->no_progress)
830 argv_array_push(&cmd.args, "-q");
831 args->check_self_contained_and_connected = 0;
832 }
833
834 if (pass_header)
835 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
836 ntohl(header.hdr_version),
837 ntohl(header.hdr_entries));
838 if (fetch_fsck_objects >= 0
839 ? fetch_fsck_objects
840 : transfer_fsck_objects >= 0
841 ? transfer_fsck_objects
842 : 0) {
843 if (args->from_promisor)
844 /*
845 * We cannot use --strict in index-pack because it
846 * checks both broken objects and links, but we only
847 * want to check for broken objects.
848 */
849 argv_array_push(&cmd.args, "--fsck-objects");
850 else
851 argv_array_pushf(&cmd.args, "--strict%s",
852 fsck_msg_types.buf);
853 }
854
855 cmd.in = demux.out;
856 cmd.git_cmd = 1;
857 if (start_command(&cmd))
858 die(_("fetch-pack: unable to fork off %s"), cmd_name);
859 if (do_keep && pack_lockfile) {
860 *pack_lockfile = index_pack_lockfile(cmd.out);
861 close(cmd.out);
862 }
863
864 if (!use_sideband)
865 /* Closed by start_command() */
866 xd[0] = -1;
867
868 ret = finish_command(&cmd);
869 if (!ret || (args->check_self_contained_and_connected && ret == 1))
870 args->self_contained_and_connected =
871 args->check_self_contained_and_connected &&
872 ret == 0;
873 else
874 die(_("%s failed"), cmd_name);
875 if (use_sideband && finish_async(&demux))
876 die(_("error in sideband demultiplexer"));
877 return 0;
878 }
879
880 static int cmp_ref_by_name(const void *a_, const void *b_)
881 {
882 const struct ref *a = *((const struct ref **)a_);
883 const struct ref *b = *((const struct ref **)b_);
884 return strcmp(a->name, b->name);
885 }
886
887 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
888 int fd[2],
889 const struct ref *orig_ref,
890 struct ref **sought, int nr_sought,
891 struct shallow_info *si,
892 char **pack_lockfile)
893 {
894 struct repository *r = the_repository;
895 struct ref *ref = copy_ref_list(orig_ref);
896 struct object_id oid;
897 const char *agent_feature;
898 int agent_len;
899 struct fetch_negotiator negotiator;
900 fetch_negotiator_init(r, &negotiator);
901
902 sort_ref_list(&ref, ref_compare_name);
903 QSORT(sought, nr_sought, cmp_ref_by_name);
904
905 if ((agent_feature = server_feature_value("agent", &agent_len))) {
906 agent_supported = 1;
907 if (agent_len)
908 print_verbose(args, _("Server version is %.*s"),
909 agent_len, agent_feature);
910 }
911
912 if (server_supports("shallow"))
913 print_verbose(args, _("Server supports %s"), "shallow");
914 else if (args->depth > 0 || is_repository_shallow(r))
915 die(_("Server does not support shallow clients"));
916 if (args->depth > 0 || args->deepen_since || args->deepen_not)
917 args->deepen = 1;
918 if (server_supports("multi_ack_detailed")) {
919 print_verbose(args, _("Server supports %s"), "multi_ack_detailed");
920 multi_ack = 2;
921 if (server_supports("no-done")) {
922 print_verbose(args, _("Server supports %s"), "no-done");
923 if (args->stateless_rpc)
924 no_done = 1;
925 }
926 }
927 else if (server_supports("multi_ack")) {
928 print_verbose(args, _("Server supports %s"), "multi_ack");
929 multi_ack = 1;
930 }
931 if (server_supports("side-band-64k")) {
932 print_verbose(args, _("Server supports %s"), "side-band-64k");
933 use_sideband = 2;
934 }
935 else if (server_supports("side-band")) {
936 print_verbose(args, _("Server supports %s"), "side-band");
937 use_sideband = 1;
938 }
939 if (server_supports("allow-tip-sha1-in-want")) {
940 print_verbose(args, _("Server supports %s"), "allow-tip-sha1-in-want");
941 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
942 }
943 if (server_supports("allow-reachable-sha1-in-want")) {
944 print_verbose(args, _("Server supports %s"), "allow-reachable-sha1-in-want");
945 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
946 }
947 if (server_supports("thin-pack"))
948 print_verbose(args, _("Server supports %s"), "thin-pack");
949 else
950 args->use_thin_pack = 0;
951 if (server_supports("no-progress"))
952 print_verbose(args, _("Server supports %s"), "no-progress");
953 else
954 args->no_progress = 0;
955 if (server_supports("include-tag"))
956 print_verbose(args, _("Server supports %s"), "include-tag");
957 else
958 args->include_tag = 0;
959 if (server_supports("ofs-delta"))
960 print_verbose(args, _("Server supports %s"), "ofs-delta");
961 else
962 prefer_ofs_delta = 0;
963
964 if (server_supports("filter")) {
965 server_supports_filtering = 1;
966 print_verbose(args, _("Server supports %s"), "filter");
967 } else if (args->filter_options.choice) {
968 warning("filtering not recognized by server, ignoring");
969 }
970
971 if (server_supports("deepen-since")) {
972 print_verbose(args, _("Server supports %s"), "deepen-since");
973 deepen_since_ok = 1;
974 } else if (args->deepen_since)
975 die(_("Server does not support --shallow-since"));
976 if (server_supports("deepen-not")) {
977 print_verbose(args, _("Server supports %s"), "deepen-not");
978 deepen_not_ok = 1;
979 } else if (args->deepen_not)
980 die(_("Server does not support --shallow-exclude"));
981 if (server_supports("deepen-relative"))
982 print_verbose(args, _("Server supports %s"), "deepen-relative");
983 else if (args->deepen_relative)
984 die(_("Server does not support --deepen"));
985
986 if (!args->no_dependents) {
987 mark_complete_and_common_ref(&negotiator, args, &ref);
988 filter_refs(args, &ref, sought, nr_sought);
989 if (everything_local(args, &ref)) {
990 packet_flush(fd[1]);
991 goto all_done;
992 }
993 } else {
994 filter_refs(args, &ref, sought, nr_sought);
995 }
996 if (find_common(&negotiator, args, fd, &oid, ref) < 0)
997 if (!args->keep_pack)
998 /* When cloning, it is not unusual to have
999 * no common commit.
1000 */
1001 warning(_("no common commits"));
1002
1003 if (args->stateless_rpc)
1004 packet_flush(fd[1]);
1005 if (args->deepen)
1006 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1007 NULL);
1008 else if (si->nr_ours || si->nr_theirs)
1009 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1010 else
1011 alternate_shallow_file = NULL;
1012 if (get_pack(args, fd, pack_lockfile))
1013 die(_("git fetch-pack: fetch failed."));
1014
1015 all_done:
1016 negotiator.release(&negotiator);
1017 return ref;
1018 }
1019
1020 static void add_shallow_requests(struct strbuf *req_buf,
1021 const struct fetch_pack_args *args)
1022 {
1023 if (is_repository_shallow(the_repository))
1024 write_shallow_commits(req_buf, 1, NULL);
1025 if (args->depth > 0)
1026 packet_buf_write(req_buf, "deepen %d", args->depth);
1027 if (args->deepen_since) {
1028 timestamp_t max_age = approxidate(args->deepen_since);
1029 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1030 }
1031 if (args->deepen_not) {
1032 int i;
1033 for (i = 0; i < args->deepen_not->nr; i++) {
1034 struct string_list_item *s = args->deepen_not->items + i;
1035 packet_buf_write(req_buf, "deepen-not %s", s->string);
1036 }
1037 }
1038 if (args->deepen_relative)
1039 packet_buf_write(req_buf, "deepen-relative\n");
1040 }
1041
1042 static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
1043 {
1044 int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
1045
1046 for ( ; wants ; wants = wants->next) {
1047 const struct object_id *remote = &wants->old_oid;
1048 struct object *o;
1049
1050 /*
1051 * If that object is complete (i.e. it is an ancestor of a
1052 * local ref), we tell them we have it but do not have to
1053 * tell them about its ancestors, which they already know
1054 * about.
1055 *
1056 * We use lookup_object here because we are only
1057 * interested in the case we *know* the object is
1058 * reachable and we have already scanned it.
1059 *
1060 * Do this only if args->no_dependents is false (if it is true,
1061 * we cannot trust the object flags).
1062 */
1063 if (!no_dependents &&
1064 ((o = lookup_object(the_repository, remote)) != NULL) &&
1065 (o->flags & COMPLETE)) {
1066 continue;
1067 }
1068
1069 if (!use_ref_in_want || wants->exact_oid)
1070 packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
1071 else
1072 packet_buf_write(req_buf, "want-ref %s\n", wants->name);
1073 }
1074 }
1075
1076 static void add_common(struct strbuf *req_buf, struct oidset *common)
1077 {
1078 struct oidset_iter iter;
1079 const struct object_id *oid;
1080 oidset_iter_init(common, &iter);
1081
1082 while ((oid = oidset_iter_next(&iter))) {
1083 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1084 }
1085 }
1086
1087 static int add_haves(struct fetch_negotiator *negotiator,
1088 struct strbuf *req_buf,
1089 int *haves_to_send, int *in_vain)
1090 {
1091 int ret = 0;
1092 int haves_added = 0;
1093 const struct object_id *oid;
1094
1095 while ((oid = negotiator->next(negotiator))) {
1096 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1097 if (++haves_added >= *haves_to_send)
1098 break;
1099 }
1100
1101 *in_vain += haves_added;
1102 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1103 /* Send Done */
1104 packet_buf_write(req_buf, "done\n");
1105 ret = 1;
1106 }
1107
1108 /* Increase haves to send on next round */
1109 *haves_to_send = next_flush(1, *haves_to_send);
1110
1111 return ret;
1112 }
1113
1114 static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
1115 const struct fetch_pack_args *args,
1116 const struct ref *wants, struct oidset *common,
1117 int *haves_to_send, int *in_vain,
1118 int sideband_all)
1119 {
1120 int ret = 0;
1121 struct strbuf req_buf = STRBUF_INIT;
1122
1123 if (server_supports_v2("fetch", 1))
1124 packet_buf_write(&req_buf, "command=fetch");
1125 if (server_supports_v2("agent", 0))
1126 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1127 if (args->server_options && args->server_options->nr &&
1128 server_supports_v2("server-option", 1)) {
1129 int i;
1130 for (i = 0; i < args->server_options->nr; i++)
1131 packet_buf_write(&req_buf, "server-option=%s",
1132 args->server_options->items[i].string);
1133 }
1134
1135 packet_buf_delim(&req_buf);
1136 if (args->use_thin_pack)
1137 packet_buf_write(&req_buf, "thin-pack");
1138 if (args->no_progress)
1139 packet_buf_write(&req_buf, "no-progress");
1140 if (args->include_tag)
1141 packet_buf_write(&req_buf, "include-tag");
1142 if (prefer_ofs_delta)
1143 packet_buf_write(&req_buf, "ofs-delta");
1144 if (sideband_all)
1145 packet_buf_write(&req_buf, "sideband-all");
1146
1147 /* Add shallow-info and deepen request */
1148 if (server_supports_feature("fetch", "shallow", 0))
1149 add_shallow_requests(&req_buf, args);
1150 else if (is_repository_shallow(the_repository) || args->deepen)
1151 die(_("Server does not support shallow requests"));
1152
1153 /* Add filter */
1154 if (server_supports_feature("fetch", "filter", 0) &&
1155 args->filter_options.choice) {
1156 struct strbuf expanded_filter_spec = STRBUF_INIT;
1157 print_verbose(args, _("Server supports filter"));
1158 expand_list_objects_filter_spec(&args->filter_options,
1159 &expanded_filter_spec);
1160 packet_buf_write(&req_buf, "filter %s",
1161 expanded_filter_spec.buf);
1162 strbuf_release(&expanded_filter_spec);
1163 } else if (args->filter_options.choice) {
1164 warning("filtering not recognized by server, ignoring");
1165 }
1166
1167 /* add wants */
1168 add_wants(args->no_dependents, wants, &req_buf);
1169
1170 if (args->no_dependents) {
1171 packet_buf_write(&req_buf, "done");
1172 ret = 1;
1173 } else {
1174 /* Add all of the common commits we've found in previous rounds */
1175 add_common(&req_buf, common);
1176
1177 /* Add initial haves */
1178 ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
1179 }
1180
1181 /* Send request */
1182 packet_buf_flush(&req_buf);
1183 if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0)
1184 die_errno(_("unable to write request to remote"));
1185
1186 strbuf_release(&req_buf);
1187 return ret;
1188 }
1189
1190 /*
1191 * Processes a section header in a server's response and checks if it matches
1192 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1193 * not consumed); if 0, the line will be consumed and the function will die if
1194 * the section header doesn't match what was expected.
1195 */
1196 static int process_section_header(struct packet_reader *reader,
1197 const char *section, int peek)
1198 {
1199 int ret;
1200
1201 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1202 die(_("error reading section header '%s'"), section);
1203
1204 ret = !strcmp(reader->line, section);
1205
1206 if (!peek) {
1207 if (!ret)
1208 die(_("expected '%s', received '%s'"),
1209 section, reader->line);
1210 packet_reader_read(reader);
1211 }
1212
1213 return ret;
1214 }
1215
1216 static int process_acks(struct fetch_negotiator *negotiator,
1217 struct packet_reader *reader,
1218 struct oidset *common)
1219 {
1220 /* received */
1221 int received_ready = 0;
1222 int received_ack = 0;
1223
1224 process_section_header(reader, "acknowledgments", 0);
1225 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1226 const char *arg;
1227
1228 if (!strcmp(reader->line, "NAK"))
1229 continue;
1230
1231 if (skip_prefix(reader->line, "ACK ", &arg)) {
1232 struct object_id oid;
1233 if (!get_oid_hex(arg, &oid)) {
1234 struct commit *commit;
1235 oidset_insert(common, &oid);
1236 commit = lookup_commit(the_repository, &oid);
1237 negotiator->ack(negotiator, commit);
1238 }
1239 continue;
1240 }
1241
1242 if (!strcmp(reader->line, "ready")) {
1243 received_ready = 1;
1244 continue;
1245 }
1246
1247 die(_("unexpected acknowledgment line: '%s'"), reader->line);
1248 }
1249
1250 if (reader->status != PACKET_READ_FLUSH &&
1251 reader->status != PACKET_READ_DELIM)
1252 die(_("error processing acks: %d"), reader->status);
1253
1254 /*
1255 * If an "acknowledgments" section is sent, a packfile is sent if and
1256 * only if "ready" was sent in this section. The other sections
1257 * ("shallow-info" and "wanted-refs") are sent only if a packfile is
1258 * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
1259 * otherwise.
1260 */
1261 if (received_ready && reader->status != PACKET_READ_DELIM)
1262 die(_("expected packfile to be sent after 'ready'"));
1263 if (!received_ready && reader->status != PACKET_READ_FLUSH)
1264 die(_("expected no other sections to be sent after no 'ready'"));
1265
1266 /* return 0 if no common, 1 if there are common, or 2 if ready */
1267 return received_ready ? 2 : (received_ack ? 1 : 0);
1268 }
1269
1270 static void receive_shallow_info(struct fetch_pack_args *args,
1271 struct packet_reader *reader,
1272 struct oid_array *shallows,
1273 struct shallow_info *si)
1274 {
1275 int unshallow_received = 0;
1276
1277 process_section_header(reader, "shallow-info", 0);
1278 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1279 const char *arg;
1280 struct object_id oid;
1281
1282 if (skip_prefix(reader->line, "shallow ", &arg)) {
1283 if (get_oid_hex(arg, &oid))
1284 die(_("invalid shallow line: %s"), reader->line);
1285 oid_array_append(shallows, &oid);
1286 continue;
1287 }
1288 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1289 if (get_oid_hex(arg, &oid))
1290 die(_("invalid unshallow line: %s"), reader->line);
1291 if (!lookup_object(the_repository, &oid))
1292 die(_("object not found: %s"), reader->line);
1293 /* make sure that it is parsed as shallow */
1294 if (!parse_object(the_repository, &oid))
1295 die(_("error in object: %s"), reader->line);
1296 if (unregister_shallow(&oid))
1297 die(_("no shallow found: %s"), reader->line);
1298 unshallow_received = 1;
1299 continue;
1300 }
1301 die(_("expected shallow/unshallow, got %s"), reader->line);
1302 }
1303
1304 if (reader->status != PACKET_READ_FLUSH &&
1305 reader->status != PACKET_READ_DELIM)
1306 die(_("error processing shallow info: %d"), reader->status);
1307
1308 if (args->deepen || unshallow_received) {
1309 /*
1310 * Treat these as shallow lines caused by our depth settings.
1311 * In v0, these lines cannot cause refs to be rejected; do the
1312 * same.
1313 */
1314 int i;
1315
1316 for (i = 0; i < shallows->nr; i++)
1317 register_shallow(the_repository, &shallows->oid[i]);
1318 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1319 NULL);
1320 args->deepen = 1;
1321 } else if (shallows->nr) {
1322 /*
1323 * Treat these as shallow lines caused by the remote being
1324 * shallow. In v0, remote refs that reach these objects are
1325 * rejected (unless --update-shallow is set); do the same.
1326 */
1327 prepare_shallow_info(si, shallows);
1328 if (si->nr_ours || si->nr_theirs)
1329 alternate_shallow_file =
1330 setup_temporary_shallow(si->shallow);
1331 else
1332 alternate_shallow_file = NULL;
1333 } else {
1334 alternate_shallow_file = NULL;
1335 }
1336 }
1337
1338 static int cmp_name_ref(const void *name, const void *ref)
1339 {
1340 return strcmp(name, (*(struct ref **)ref)->name);
1341 }
1342
1343 static void receive_wanted_refs(struct packet_reader *reader,
1344 struct ref **sought, int nr_sought)
1345 {
1346 process_section_header(reader, "wanted-refs", 0);
1347 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1348 struct object_id oid;
1349 const char *end;
1350 struct ref **found;
1351
1352 if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
1353 die(_("expected wanted-ref, got '%s'"), reader->line);
1354
1355 found = bsearch(end, sought, nr_sought, sizeof(*sought),
1356 cmp_name_ref);
1357 if (!found)
1358 die(_("unexpected wanted-ref: '%s'"), reader->line);
1359 oidcpy(&(*found)->old_oid, &oid);
1360 }
1361
1362 if (reader->status != PACKET_READ_DELIM)
1363 die(_("error processing wanted refs: %d"), reader->status);
1364 }
1365
1366 enum fetch_state {
1367 FETCH_CHECK_LOCAL = 0,
1368 FETCH_SEND_REQUEST,
1369 FETCH_PROCESS_ACKS,
1370 FETCH_GET_PACK,
1371 FETCH_DONE,
1372 };
1373
1374 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1375 int fd[2],
1376 const struct ref *orig_ref,
1377 struct ref **sought, int nr_sought,
1378 struct oid_array *shallows,
1379 struct shallow_info *si,
1380 char **pack_lockfile)
1381 {
1382 struct repository *r = the_repository;
1383 struct ref *ref = copy_ref_list(orig_ref);
1384 enum fetch_state state = FETCH_CHECK_LOCAL;
1385 struct oidset common = OIDSET_INIT;
1386 struct packet_reader reader;
1387 int in_vain = 0;
1388 int haves_to_send = INITIAL_FLUSH;
1389 struct fetch_negotiator negotiator;
1390 fetch_negotiator_init(r, &negotiator);
1391 packet_reader_init(&reader, fd[0], NULL, 0,
1392 PACKET_READ_CHOMP_NEWLINE |
1393 PACKET_READ_DIE_ON_ERR_PACKET);
1394 if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) &&
1395 server_supports_feature("fetch", "sideband-all", 0)) {
1396 reader.use_sideband = 1;
1397 reader.me = "fetch-pack";
1398 }
1399
1400 while (state != FETCH_DONE) {
1401 switch (state) {
1402 case FETCH_CHECK_LOCAL:
1403 sort_ref_list(&ref, ref_compare_name);
1404 QSORT(sought, nr_sought, cmp_ref_by_name);
1405
1406 /* v2 supports these by default */
1407 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1408 use_sideband = 2;
1409 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1410 args->deepen = 1;
1411
1412 /* Filter 'ref' by 'sought' and those that aren't local */
1413 if (!args->no_dependents) {
1414 mark_complete_and_common_ref(&negotiator, args, &ref);
1415 filter_refs(args, &ref, sought, nr_sought);
1416 if (everything_local(args, &ref))
1417 state = FETCH_DONE;
1418 else
1419 state = FETCH_SEND_REQUEST;
1420
1421 mark_tips(&negotiator, args->negotiation_tips);
1422 for_each_cached_alternate(&negotiator,
1423 insert_one_alternate_object);
1424 } else {
1425 filter_refs(args, &ref, sought, nr_sought);
1426 state = FETCH_SEND_REQUEST;
1427 }
1428 break;
1429 case FETCH_SEND_REQUEST:
1430 if (send_fetch_request(&negotiator, fd[1], args, ref,
1431 &common,
1432 &haves_to_send, &in_vain,
1433 reader.use_sideband))
1434 state = FETCH_GET_PACK;
1435 else
1436 state = FETCH_PROCESS_ACKS;
1437 break;
1438 case FETCH_PROCESS_ACKS:
1439 /* Process ACKs/NAKs */
1440 switch (process_acks(&negotiator, &reader, &common)) {
1441 case 2:
1442 state = FETCH_GET_PACK;
1443 break;
1444 case 1:
1445 in_vain = 0;
1446 /* fallthrough */
1447 default:
1448 state = FETCH_SEND_REQUEST;
1449 break;
1450 }
1451 break;
1452 case FETCH_GET_PACK:
1453 /* Check for shallow-info section */
1454 if (process_section_header(&reader, "shallow-info", 1))
1455 receive_shallow_info(args, &reader, shallows, si);
1456
1457 if (process_section_header(&reader, "wanted-refs", 1))
1458 receive_wanted_refs(&reader, sought, nr_sought);
1459
1460 /* get the pack */
1461 process_section_header(&reader, "packfile", 0);
1462 if (get_pack(args, fd, pack_lockfile))
1463 die(_("git fetch-pack: fetch failed."));
1464
1465 state = FETCH_DONE;
1466 break;
1467 case FETCH_DONE:
1468 continue;
1469 }
1470 }
1471
1472 negotiator.release(&negotiator);
1473 oidset_clear(&common);
1474 return ref;
1475 }
1476
1477 static int fetch_pack_config_cb(const char *var, const char *value, void *cb)
1478 {
1479 if (strcmp(var, "fetch.fsck.skiplist") == 0) {
1480 const char *path;
1481
1482 if (git_config_pathname(&path, var, value))
1483 return 1;
1484 strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
1485 fsck_msg_types.len ? ',' : '=', path);
1486 free((char *)path);
1487 return 0;
1488 }
1489
1490 if (skip_prefix(var, "fetch.fsck.", &var)) {
1491 if (is_valid_msg_type(var, value))
1492 strbuf_addf(&fsck_msg_types, "%c%s=%s",
1493 fsck_msg_types.len ? ',' : '=', var, value);
1494 else
1495 warning("Skipping unknown msg id '%s'", var);
1496 return 0;
1497 }
1498
1499 return git_default_config(var, value, cb);
1500 }
1501
1502 static void fetch_pack_config(void)
1503 {
1504 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1505 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1506 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1507 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1508 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1509
1510 git_config(fetch_pack_config_cb, NULL);
1511 }
1512
1513 static void fetch_pack_setup(void)
1514 {
1515 static int did_setup;
1516 if (did_setup)
1517 return;
1518 fetch_pack_config();
1519 if (0 <= transfer_unpack_limit)
1520 unpack_limit = transfer_unpack_limit;
1521 else if (0 <= fetch_unpack_limit)
1522 unpack_limit = fetch_unpack_limit;
1523 did_setup = 1;
1524 }
1525
1526 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1527 {
1528 struct string_list names = STRING_LIST_INIT_NODUP;
1529 int src, dst;
1530
1531 for (src = dst = 0; src < nr; src++) {
1532 struct string_list_item *item;
1533 item = string_list_insert(&names, ref[src]->name);
1534 if (item->util)
1535 continue; /* already have it */
1536 item->util = ref[src];
1537 if (src != dst)
1538 ref[dst] = ref[src];
1539 dst++;
1540 }
1541 for (src = dst; src < nr; src++)
1542 ref[src] = NULL;
1543 string_list_clear(&names, 0);
1544 return dst;
1545 }
1546
1547 static void update_shallow(struct fetch_pack_args *args,
1548 struct ref **sought, int nr_sought,
1549 struct shallow_info *si)
1550 {
1551 struct oid_array ref = OID_ARRAY_INIT;
1552 int *status;
1553 int i;
1554
1555 if (args->deepen && alternate_shallow_file) {
1556 if (*alternate_shallow_file == '\0') { /* --unshallow */
1557 unlink_or_warn(git_path_shallow(the_repository));
1558 rollback_lock_file(&shallow_lock);
1559 } else
1560 commit_lock_file(&shallow_lock);
1561 alternate_shallow_file = NULL;
1562 return;
1563 }
1564
1565 if (!si->shallow || !si->shallow->nr)
1566 return;
1567
1568 if (args->cloning) {
1569 /*
1570 * remote is shallow, but this is a clone, there are
1571 * no objects in repo to worry about. Accept any
1572 * shallow points that exist in the pack (iow in repo
1573 * after get_pack() and reprepare_packed_git())
1574 */
1575 struct oid_array extra = OID_ARRAY_INIT;
1576 struct object_id *oid = si->shallow->oid;
1577 for (i = 0; i < si->shallow->nr; i++)
1578 if (has_object_file(&oid[i]))
1579 oid_array_append(&extra, &oid[i]);
1580 if (extra.nr) {
1581 setup_alternate_shallow(&shallow_lock,
1582 &alternate_shallow_file,
1583 &extra);
1584 commit_lock_file(&shallow_lock);
1585 alternate_shallow_file = NULL;
1586 }
1587 oid_array_clear(&extra);
1588 return;
1589 }
1590
1591 if (!si->nr_ours && !si->nr_theirs)
1592 return;
1593
1594 remove_nonexistent_theirs_shallow(si);
1595 if (!si->nr_ours && !si->nr_theirs)
1596 return;
1597 for (i = 0; i < nr_sought; i++)
1598 oid_array_append(&ref, &sought[i]->old_oid);
1599 si->ref = &ref;
1600
1601 if (args->update_shallow) {
1602 /*
1603 * remote is also shallow, .git/shallow may be updated
1604 * so all refs can be accepted. Make sure we only add
1605 * shallow roots that are actually reachable from new
1606 * refs.
1607 */
1608 struct oid_array extra = OID_ARRAY_INIT;
1609 struct object_id *oid = si->shallow->oid;
1610 assign_shallow_commits_to_refs(si, NULL, NULL);
1611 if (!si->nr_ours && !si->nr_theirs) {
1612 oid_array_clear(&ref);
1613 return;
1614 }
1615 for (i = 0; i < si->nr_ours; i++)
1616 oid_array_append(&extra, &oid[si->ours[i]]);
1617 for (i = 0; i < si->nr_theirs; i++)
1618 oid_array_append(&extra, &oid[si->theirs[i]]);
1619 setup_alternate_shallow(&shallow_lock,
1620 &alternate_shallow_file,
1621 &extra);
1622 commit_lock_file(&shallow_lock);
1623 oid_array_clear(&extra);
1624 oid_array_clear(&ref);
1625 alternate_shallow_file = NULL;
1626 return;
1627 }
1628
1629 /*
1630 * remote is also shallow, check what ref is safe to update
1631 * without updating .git/shallow
1632 */
1633 status = xcalloc(nr_sought, sizeof(*status));
1634 assign_shallow_commits_to_refs(si, NULL, status);
1635 if (si->nr_ours || si->nr_theirs) {
1636 for (i = 0; i < nr_sought; i++)
1637 if (status[i])
1638 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1639 }
1640 free(status);
1641 oid_array_clear(&ref);
1642 }
1643
1644 static int iterate_ref_map(void *cb_data, struct object_id *oid)
1645 {
1646 struct ref **rm = cb_data;
1647 struct ref *ref = *rm;
1648
1649 if (!ref)
1650 return -1; /* end of the list */
1651 *rm = ref->next;
1652 oidcpy(oid, &ref->old_oid);
1653 return 0;
1654 }
1655
1656 struct ref *fetch_pack(struct fetch_pack_args *args,
1657 int fd[],
1658 const struct ref *ref,
1659 struct ref **sought, int nr_sought,
1660 struct oid_array *shallow,
1661 char **pack_lockfile,
1662 enum protocol_version version)
1663 {
1664 struct ref *ref_cpy;
1665 struct shallow_info si;
1666 struct oid_array shallows_scratch = OID_ARRAY_INIT;
1667
1668 fetch_pack_setup();
1669 if (nr_sought)
1670 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1671
1672 if (args->no_dependents && !args->filter_options.choice) {
1673 /*
1674 * The protocol does not support requesting that only the
1675 * wanted objects be sent, so approximate this by setting a
1676 * "blob:none" filter if no filter is already set. This works
1677 * for all object types: note that wanted blobs will still be
1678 * sent because they are directly specified as a "want".
1679 *
1680 * NEEDSWORK: Add an option in the protocol to request that
1681 * only the wanted objects be sent, and implement it.
1682 */
1683 parse_list_objects_filter(&args->filter_options, "blob:none");
1684 }
1685
1686 if (version != protocol_v2 && !ref) {
1687 packet_flush(fd[1]);
1688 die(_("no matching remote head"));
1689 }
1690 if (version == protocol_v2) {
1691 if (shallow->nr)
1692 BUG("Protocol V2 does not provide shallows at this point in the fetch");
1693 memset(&si, 0, sizeof(si));
1694 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1695 &shallows_scratch, &si,
1696 pack_lockfile);
1697 } else {
1698 prepare_shallow_info(&si, shallow);
1699 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1700 &si, pack_lockfile);
1701 }
1702 reprepare_packed_git(the_repository);
1703
1704 if (!args->cloning && args->deepen) {
1705 struct check_connected_options opt = CHECK_CONNECTED_INIT;
1706 struct ref *iterator = ref_cpy;
1707 opt.shallow_file = alternate_shallow_file;
1708 if (args->deepen)
1709 opt.is_deepening_fetch = 1;
1710 if (check_connected(iterate_ref_map, &iterator, &opt)) {
1711 error(_("remote did not send all necessary objects"));
1712 free_refs(ref_cpy);
1713 ref_cpy = NULL;
1714 rollback_lock_file(&shallow_lock);
1715 goto cleanup;
1716 }
1717 args->connectivity_checked = 1;
1718 }
1719
1720 update_shallow(args, sought, nr_sought, &si);
1721 cleanup:
1722 clear_shallow_info(&si);
1723 oid_array_clear(&shallows_scratch);
1724 return ref_cpy;
1725 }
1726
1727 int report_unmatched_refs(struct ref **sought, int nr_sought)
1728 {
1729 int i, ret = 0;
1730
1731 for (i = 0; i < nr_sought; i++) {
1732 if (!sought[i])
1733 continue;
1734 switch (sought[i]->match_status) {
1735 case REF_MATCHED:
1736 continue;
1737 case REF_NOT_MATCHED:
1738 error(_("no such remote ref %s"), sought[i]->name);
1739 break;
1740 case REF_UNADVERTISED_NOT_ALLOWED:
1741 error(_("Server does not allow request for unadvertised object %s"),
1742 sought[i]->name);
1743 break;
1744 }
1745 ret = 1;
1746 }
1747 return ret;
1748 }