]> git.ipfire.org Git - thirdparty/git.git/blob - fetch-pack.c
Sync with 2.16.6
[thirdparty/git.git] / fetch-pack.c
1 #include "cache.h"
2 #include "config.h"
3 #include "lockfile.h"
4 #include "refs.h"
5 #include "pkt-line.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "exec_cmd.h"
9 #include "pack.h"
10 #include "sideband.h"
11 #include "fetch-pack.h"
12 #include "remote.h"
13 #include "run-command.h"
14 #include "connect.h"
15 #include "transport.h"
16 #include "version.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
21
22 static int transfer_unpack_limit = -1;
23 static int fetch_unpack_limit = -1;
24 static int unpack_limit = 100;
25 static int prefer_ofs_delta = 1;
26 static int no_done;
27 static int deepen_since_ok;
28 static int deepen_not_ok;
29 static int fetch_fsck_objects = -1;
30 static int transfer_fsck_objects = -1;
31 static int agent_supported;
32 static int server_supports_filtering;
33 static struct lock_file shallow_lock;
34 static const char *alternate_shallow_file;
35
36 /* Remember to update object flag allocation in object.h */
37 #define COMPLETE (1U << 0)
38 #define COMMON (1U << 1)
39 #define COMMON_REF (1U << 2)
40 #define SEEN (1U << 3)
41 #define POPPED (1U << 4)
42 #define ALTERNATE (1U << 5)
43
44 static int marked;
45
46 /*
47 * After sending this many "have"s if we do not get any new ACK , we
48 * give up traversing our history.
49 */
50 #define MAX_IN_VAIN 256
51
52 static struct prio_queue rev_list = { compare_commits_by_commit_date };
53 static int non_common_revs, multi_ack, use_sideband;
54 /* Allow specifying sha1 if it is a ref tip. */
55 #define ALLOW_TIP_SHA1 01
56 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
57 #define ALLOW_REACHABLE_SHA1 02
58 static unsigned int allow_unadvertised_object_request;
59
60 __attribute__((format (printf, 2, 3)))
61 static inline void print_verbose(const struct fetch_pack_args *args,
62 const char *fmt, ...)
63 {
64 va_list params;
65
66 if (!args->verbose)
67 return;
68
69 va_start(params, fmt);
70 vfprintf(stderr, fmt, params);
71 va_end(params);
72 fputc('\n', stderr);
73 }
74
75 struct alternate_object_cache {
76 struct object **items;
77 size_t nr, alloc;
78 };
79
80 static void cache_one_alternate(const char *refname,
81 const struct object_id *oid,
82 void *vcache)
83 {
84 struct alternate_object_cache *cache = vcache;
85 struct object *obj = parse_object(oid);
86
87 if (!obj || (obj->flags & ALTERNATE))
88 return;
89
90 obj->flags |= ALTERNATE;
91 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
92 cache->items[cache->nr++] = obj;
93 }
94
95 static void for_each_cached_alternate(void (*cb)(struct object *))
96 {
97 static int initialized;
98 static struct alternate_object_cache cache;
99 size_t i;
100
101 if (!initialized) {
102 for_each_alternate_ref(cache_one_alternate, &cache);
103 initialized = 1;
104 }
105
106 for (i = 0; i < cache.nr; i++)
107 cb(cache.items[i]);
108 }
109
110 static void rev_list_push(struct commit *commit, int mark)
111 {
112 if (!(commit->object.flags & mark)) {
113 commit->object.flags |= mark;
114
115 if (parse_commit(commit))
116 return;
117
118 prio_queue_put(&rev_list, commit);
119
120 if (!(commit->object.flags & COMMON))
121 non_common_revs++;
122 }
123 }
124
125 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
126 {
127 struct object *o = deref_tag(parse_object(oid), refname, 0);
128
129 if (o && o->type == OBJ_COMMIT)
130 rev_list_push((struct commit *)o, SEEN);
131
132 return 0;
133 }
134
135 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
136 int flag, void *cb_data)
137 {
138 return rev_list_insert_ref(refname, oid);
139 }
140
141 static int clear_marks(const char *refname, const struct object_id *oid,
142 int flag, void *cb_data)
143 {
144 struct object *o = deref_tag(parse_object(oid), refname, 0);
145
146 if (o && o->type == OBJ_COMMIT)
147 clear_commit_marks((struct commit *)o,
148 COMMON | COMMON_REF | SEEN | POPPED);
149 return 0;
150 }
151
152 /*
153 This function marks a rev and its ancestors as common.
154 In some cases, it is desirable to mark only the ancestors (for example
155 when only the server does not yet know that they are common).
156 */
157
158 static void mark_common(struct commit *commit,
159 int ancestors_only, int dont_parse)
160 {
161 if (commit != NULL && !(commit->object.flags & COMMON)) {
162 struct object *o = (struct object *)commit;
163
164 if (!ancestors_only)
165 o->flags |= COMMON;
166
167 if (!(o->flags & SEEN))
168 rev_list_push(commit, SEEN);
169 else {
170 struct commit_list *parents;
171
172 if (!ancestors_only && !(o->flags & POPPED))
173 non_common_revs--;
174 if (!o->parsed && !dont_parse)
175 if (parse_commit(commit))
176 return;
177
178 for (parents = commit->parents;
179 parents;
180 parents = parents->next)
181 mark_common(parents->item, 0, dont_parse);
182 }
183 }
184 }
185
186 /*
187 Get the next rev to send, ignoring the common.
188 */
189
190 static const struct object_id *get_rev(void)
191 {
192 struct commit *commit = NULL;
193
194 while (commit == NULL) {
195 unsigned int mark;
196 struct commit_list *parents;
197
198 if (rev_list.nr == 0 || non_common_revs == 0)
199 return NULL;
200
201 commit = prio_queue_get(&rev_list);
202 parse_commit(commit);
203 parents = commit->parents;
204
205 commit->object.flags |= POPPED;
206 if (!(commit->object.flags & COMMON))
207 non_common_revs--;
208
209 if (commit->object.flags & COMMON) {
210 /* do not send "have", and ignore ancestors */
211 commit = NULL;
212 mark = COMMON | SEEN;
213 } else if (commit->object.flags & COMMON_REF)
214 /* send "have", and ignore ancestors */
215 mark = COMMON | SEEN;
216 else
217 /* send "have", also for its ancestors */
218 mark = SEEN;
219
220 while (parents) {
221 if (!(parents->item->object.flags & SEEN))
222 rev_list_push(parents->item, mark);
223 if (mark & COMMON)
224 mark_common(parents->item, 1, 0);
225 parents = parents->next;
226 }
227 }
228
229 return &commit->object.oid;
230 }
231
232 enum ack_type {
233 NAK = 0,
234 ACK,
235 ACK_continue,
236 ACK_common,
237 ACK_ready
238 };
239
240 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
241 {
242 if (args->stateless_rpc && args->deepen) {
243 /* If we sent a depth we will get back "duplicate"
244 * shallow and unshallow commands every time there
245 * is a block of have lines exchanged.
246 */
247 char *line;
248 while ((line = packet_read_line(fd, NULL))) {
249 if (starts_with(line, "shallow "))
250 continue;
251 if (starts_with(line, "unshallow "))
252 continue;
253 die(_("git fetch-pack: expected shallow list"));
254 }
255 }
256 }
257
258 static enum ack_type get_ack(int fd, struct object_id *result_oid)
259 {
260 int len;
261 char *line = packet_read_line(fd, &len);
262 const char *arg;
263
264 if (!line)
265 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
266 if (!strcmp(line, "NAK"))
267 return NAK;
268 if (skip_prefix(line, "ACK ", &arg)) {
269 if (!get_oid_hex(arg, result_oid)) {
270 arg += 40;
271 len -= arg - line;
272 if (len < 1)
273 return ACK;
274 if (strstr(arg, "continue"))
275 return ACK_continue;
276 if (strstr(arg, "common"))
277 return ACK_common;
278 if (strstr(arg, "ready"))
279 return ACK_ready;
280 return ACK;
281 }
282 }
283 if (skip_prefix(line, "ERR ", &arg))
284 die(_("remote error: %s"), arg);
285 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
286 }
287
288 static void send_request(struct fetch_pack_args *args,
289 int fd, struct strbuf *buf)
290 {
291 if (args->stateless_rpc) {
292 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
293 packet_flush(fd);
294 } else
295 write_or_die(fd, buf->buf, buf->len);
296 }
297
298 static void insert_one_alternate_object(struct object *obj)
299 {
300 rev_list_insert_ref(NULL, &obj->oid);
301 }
302
303 #define INITIAL_FLUSH 16
304 #define PIPESAFE_FLUSH 32
305 #define LARGE_FLUSH 16384
306
307 static int next_flush(struct fetch_pack_args *args, int count)
308 {
309 if (args->stateless_rpc) {
310 if (count < LARGE_FLUSH)
311 count <<= 1;
312 else
313 count = count * 11 / 10;
314 } else {
315 if (count < PIPESAFE_FLUSH)
316 count <<= 1;
317 else
318 count += PIPESAFE_FLUSH;
319 }
320 return count;
321 }
322
323 static int find_common(struct fetch_pack_args *args,
324 int fd[2], struct object_id *result_oid,
325 struct ref *refs)
326 {
327 int fetching;
328 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
329 const struct object_id *oid;
330 unsigned in_vain = 0;
331 int got_continue = 0;
332 int got_ready = 0;
333 struct strbuf req_buf = STRBUF_INIT;
334 size_t state_len = 0;
335
336 if (args->stateless_rpc && multi_ack == 1)
337 die(_("--stateless-rpc requires multi_ack_detailed"));
338 if (marked)
339 for_each_ref(clear_marks, NULL);
340 marked = 1;
341
342 for_each_ref(rev_list_insert_ref_oid, NULL);
343 for_each_cached_alternate(insert_one_alternate_object);
344
345 fetching = 0;
346 for ( ; refs ; refs = refs->next) {
347 struct object_id *remote = &refs->old_oid;
348 const char *remote_hex;
349 struct object *o;
350
351 /*
352 * If that object is complete (i.e. it is an ancestor of a
353 * local ref), we tell them we have it but do not have to
354 * tell them about its ancestors, which they already know
355 * about.
356 *
357 * We use lookup_object here because we are only
358 * interested in the case we *know* the object is
359 * reachable and we have already scanned it.
360 */
361 if (((o = lookup_object(remote->hash)) != NULL) &&
362 (o->flags & COMPLETE)) {
363 continue;
364 }
365
366 remote_hex = oid_to_hex(remote);
367 if (!fetching) {
368 struct strbuf c = STRBUF_INIT;
369 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
370 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
371 if (no_done) strbuf_addstr(&c, " no-done");
372 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
373 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
374 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
375 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
376 if (args->no_progress) strbuf_addstr(&c, " no-progress");
377 if (args->include_tag) strbuf_addstr(&c, " include-tag");
378 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
379 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
380 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
381 if (agent_supported) strbuf_addf(&c, " agent=%s",
382 git_user_agent_sanitized());
383 if (args->filter_options.choice)
384 strbuf_addstr(&c, " filter");
385 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
386 strbuf_release(&c);
387 } else
388 packet_buf_write(&req_buf, "want %s\n", remote_hex);
389 fetching++;
390 }
391
392 if (!fetching) {
393 strbuf_release(&req_buf);
394 packet_flush(fd[1]);
395 return 1;
396 }
397
398 if (is_repository_shallow())
399 write_shallow_commits(&req_buf, 1, NULL);
400 if (args->depth > 0)
401 packet_buf_write(&req_buf, "deepen %d", args->depth);
402 if (args->deepen_since) {
403 timestamp_t max_age = approxidate(args->deepen_since);
404 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
405 }
406 if (args->deepen_not) {
407 int i;
408 for (i = 0; i < args->deepen_not->nr; i++) {
409 struct string_list_item *s = args->deepen_not->items + i;
410 packet_buf_write(&req_buf, "deepen-not %s", s->string);
411 }
412 }
413 if (server_supports_filtering && args->filter_options.choice)
414 packet_buf_write(&req_buf, "filter %s",
415 args->filter_options.filter_spec);
416 packet_buf_flush(&req_buf);
417 state_len = req_buf.len;
418
419 if (args->deepen) {
420 char *line;
421 const char *arg;
422 struct object_id oid;
423
424 send_request(args, fd[1], &req_buf);
425 while ((line = packet_read_line(fd[0], NULL))) {
426 if (skip_prefix(line, "shallow ", &arg)) {
427 if (get_oid_hex(arg, &oid))
428 die(_("invalid shallow line: %s"), line);
429 register_shallow(&oid);
430 continue;
431 }
432 if (skip_prefix(line, "unshallow ", &arg)) {
433 if (get_oid_hex(arg, &oid))
434 die(_("invalid unshallow line: %s"), line);
435 if (!lookup_object(oid.hash))
436 die(_("object not found: %s"), line);
437 /* make sure that it is parsed as shallow */
438 if (!parse_object(&oid))
439 die(_("error in object: %s"), line);
440 if (unregister_shallow(&oid))
441 die(_("no shallow found: %s"), line);
442 continue;
443 }
444 die(_("expected shallow/unshallow, got %s"), line);
445 }
446 } else if (!args->stateless_rpc)
447 send_request(args, fd[1], &req_buf);
448
449 if (!args->stateless_rpc) {
450 /* If we aren't using the stateless-rpc interface
451 * we don't need to retain the headers.
452 */
453 strbuf_setlen(&req_buf, 0);
454 state_len = 0;
455 }
456
457 flushes = 0;
458 retval = -1;
459 if (args->no_dependents)
460 goto done;
461 while ((oid = get_rev())) {
462 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
463 print_verbose(args, "have %s", oid_to_hex(oid));
464 in_vain++;
465 if (flush_at <= ++count) {
466 int ack;
467
468 packet_buf_flush(&req_buf);
469 send_request(args, fd[1], &req_buf);
470 strbuf_setlen(&req_buf, state_len);
471 flushes++;
472 flush_at = next_flush(args, count);
473
474 /*
475 * We keep one window "ahead" of the other side, and
476 * will wait for an ACK only on the next one
477 */
478 if (!args->stateless_rpc && count == INITIAL_FLUSH)
479 continue;
480
481 consume_shallow_list(args, fd[0]);
482 do {
483 ack = get_ack(fd[0], result_oid);
484 if (ack)
485 print_verbose(args, _("got %s %d %s"), "ack",
486 ack, oid_to_hex(result_oid));
487 switch (ack) {
488 case ACK:
489 flushes = 0;
490 multi_ack = 0;
491 retval = 0;
492 goto done;
493 case ACK_common:
494 case ACK_ready:
495 case ACK_continue: {
496 struct commit *commit =
497 lookup_commit(result_oid);
498 if (!commit)
499 die(_("invalid commit %s"), oid_to_hex(result_oid));
500 if (args->stateless_rpc
501 && ack == ACK_common
502 && !(commit->object.flags & COMMON)) {
503 /* We need to replay the have for this object
504 * on the next RPC request so the peer knows
505 * it is in common with us.
506 */
507 const char *hex = oid_to_hex(result_oid);
508 packet_buf_write(&req_buf, "have %s\n", hex);
509 state_len = req_buf.len;
510 /*
511 * Reset in_vain because an ack
512 * for this commit has not been
513 * seen.
514 */
515 in_vain = 0;
516 } else if (!args->stateless_rpc
517 || ack != ACK_common)
518 in_vain = 0;
519 mark_common(commit, 0, 1);
520 retval = 0;
521 got_continue = 1;
522 if (ack == ACK_ready) {
523 clear_prio_queue(&rev_list);
524 got_ready = 1;
525 }
526 break;
527 }
528 }
529 } while (ack);
530 flushes--;
531 if (got_continue && MAX_IN_VAIN < in_vain) {
532 print_verbose(args, _("giving up"));
533 break; /* give up */
534 }
535 }
536 }
537 done:
538 if (!got_ready || !no_done) {
539 packet_buf_write(&req_buf, "done\n");
540 send_request(args, fd[1], &req_buf);
541 }
542 print_verbose(args, _("done"));
543 if (retval != 0) {
544 multi_ack = 0;
545 flushes++;
546 }
547 strbuf_release(&req_buf);
548
549 if (!got_ready || !no_done)
550 consume_shallow_list(args, fd[0]);
551 while (flushes || multi_ack) {
552 int ack = get_ack(fd[0], result_oid);
553 if (ack) {
554 print_verbose(args, _("got %s (%d) %s"), "ack",
555 ack, oid_to_hex(result_oid));
556 if (ack == ACK)
557 return 0;
558 multi_ack = 1;
559 continue;
560 }
561 flushes--;
562 }
563 /* it is no error to fetch into a completely empty repo */
564 return count ? retval : 0;
565 }
566
567 static struct commit_list *complete;
568
569 static int mark_complete(const struct object_id *oid)
570 {
571 struct object *o = parse_object(oid);
572
573 while (o && o->type == OBJ_TAG) {
574 struct tag *t = (struct tag *) o;
575 if (!t->tagged)
576 break; /* broken repository */
577 o->flags |= COMPLETE;
578 o = parse_object(&t->tagged->oid);
579 }
580 if (o && o->type == OBJ_COMMIT) {
581 struct commit *commit = (struct commit *)o;
582 if (!(commit->object.flags & COMPLETE)) {
583 commit->object.flags |= COMPLETE;
584 commit_list_insert(commit, &complete);
585 }
586 }
587 return 0;
588 }
589
590 static int mark_complete_oid(const char *refname, const struct object_id *oid,
591 int flag, void *cb_data)
592 {
593 return mark_complete(oid);
594 }
595
596 static void mark_recent_complete_commits(struct fetch_pack_args *args,
597 timestamp_t cutoff)
598 {
599 while (complete && cutoff <= complete->item->date) {
600 print_verbose(args, _("Marking %s as complete"),
601 oid_to_hex(&complete->item->object.oid));
602 pop_most_recent_commit(&complete, COMPLETE);
603 }
604 }
605
606 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
607 {
608 for (; refs; refs = refs->next)
609 oidset_insert(oids, &refs->old_oid);
610 }
611
612 static int tip_oids_contain(struct oidset *tip_oids,
613 struct ref *unmatched, struct ref *newlist,
614 const struct object_id *id)
615 {
616 /*
617 * Note that this only looks at the ref lists the first time it's
618 * called. This works out in filter_refs() because even though it may
619 * add to "newlist" between calls, the additions will always be for
620 * oids that are already in the set.
621 */
622 if (!tip_oids->map.map.tablesize) {
623 add_refs_to_oidset(tip_oids, unmatched);
624 add_refs_to_oidset(tip_oids, newlist);
625 }
626 return oidset_contains(tip_oids, id);
627 }
628
629 static void filter_refs(struct fetch_pack_args *args,
630 struct ref **refs,
631 struct ref **sought, int nr_sought)
632 {
633 struct ref *newlist = NULL;
634 struct ref **newtail = &newlist;
635 struct ref *unmatched = NULL;
636 struct ref *ref, *next;
637 struct oidset tip_oids = OIDSET_INIT;
638 int i;
639
640 i = 0;
641 for (ref = *refs; ref; ref = next) {
642 int keep = 0;
643 next = ref->next;
644
645 if (starts_with(ref->name, "refs/") &&
646 check_refname_format(ref->name, 0))
647 ; /* trash */
648 else {
649 while (i < nr_sought) {
650 int cmp = strcmp(ref->name, sought[i]->name);
651 if (cmp < 0)
652 break; /* definitely do not have it */
653 else if (cmp == 0) {
654 keep = 1; /* definitely have it */
655 sought[i]->match_status = REF_MATCHED;
656 }
657 i++;
658 }
659 }
660
661 if (!keep && args->fetch_all &&
662 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
663 keep = 1;
664
665 if (keep) {
666 *newtail = ref;
667 ref->next = NULL;
668 newtail = &ref->next;
669 } else {
670 ref->next = unmatched;
671 unmatched = ref;
672 }
673 }
674
675 /* Append unmatched requests to the list */
676 for (i = 0; i < nr_sought; i++) {
677 struct object_id oid;
678 const char *p;
679
680 ref = sought[i];
681 if (ref->match_status != REF_NOT_MATCHED)
682 continue;
683 if (parse_oid_hex(ref->name, &oid, &p) ||
684 *p != '\0' ||
685 oidcmp(&oid, &ref->old_oid))
686 continue;
687
688 if ((allow_unadvertised_object_request &
689 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
690 tip_oids_contain(&tip_oids, unmatched, newlist,
691 &ref->old_oid)) {
692 ref->match_status = REF_MATCHED;
693 *newtail = copy_ref(ref);
694 newtail = &(*newtail)->next;
695 } else {
696 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
697 }
698 }
699
700 oidset_clear(&tip_oids);
701 for (ref = unmatched; ref; ref = next) {
702 next = ref->next;
703 free(ref);
704 }
705
706 *refs = newlist;
707 }
708
709 static void mark_alternate_complete(struct object *obj)
710 {
711 mark_complete(&obj->oid);
712 }
713
714 static int everything_local(struct fetch_pack_args *args,
715 struct ref **refs,
716 struct ref **sought, int nr_sought)
717 {
718 struct ref *ref;
719 int retval;
720 int old_save_commit_buffer = save_commit_buffer;
721 timestamp_t cutoff = 0;
722
723 save_commit_buffer = 0;
724
725 for (ref = *refs; ref; ref = ref->next) {
726 struct object *o;
727
728 if (!has_object_file_with_flags(&ref->old_oid,
729 OBJECT_INFO_QUICK))
730 continue;
731
732 o = parse_object(&ref->old_oid);
733 if (!o)
734 continue;
735
736 /* We already have it -- which may mean that we were
737 * in sync with the other side at some time after
738 * that (it is OK if we guess wrong here).
739 */
740 if (o->type == OBJ_COMMIT) {
741 struct commit *commit = (struct commit *)o;
742 if (!cutoff || cutoff < commit->date)
743 cutoff = commit->date;
744 }
745 }
746
747 if (!args->no_dependents) {
748 if (!args->deepen) {
749 for_each_ref(mark_complete_oid, NULL);
750 for_each_cached_alternate(mark_alternate_complete);
751 commit_list_sort_by_date(&complete);
752 if (cutoff)
753 mark_recent_complete_commits(args, cutoff);
754 }
755
756 /*
757 * Mark all complete remote refs as common refs.
758 * Don't mark them common yet; the server has to be told so first.
759 */
760 for (ref = *refs; ref; ref = ref->next) {
761 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
762 NULL, 0);
763
764 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
765 continue;
766
767 if (!(o->flags & SEEN)) {
768 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
769
770 mark_common((struct commit *)o, 1, 1);
771 }
772 }
773 }
774
775 filter_refs(args, refs, sought, nr_sought);
776
777 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
778 const struct object_id *remote = &ref->old_oid;
779 struct object *o;
780
781 o = lookup_object(remote->hash);
782 if (!o || !(o->flags & COMPLETE)) {
783 retval = 0;
784 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
785 ref->name);
786 continue;
787 }
788 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
789 ref->name);
790 }
791
792 save_commit_buffer = old_save_commit_buffer;
793
794 return retval;
795 }
796
797 static int sideband_demux(int in, int out, void *data)
798 {
799 int *xd = data;
800 int ret;
801
802 ret = recv_sideband("fetch-pack", xd[0], out);
803 close(out);
804 return ret;
805 }
806
807 static int get_pack(struct fetch_pack_args *args,
808 int xd[2], char **pack_lockfile)
809 {
810 struct async demux;
811 int do_keep = args->keep_pack;
812 const char *cmd_name;
813 struct pack_header header;
814 int pass_header = 0;
815 struct child_process cmd = CHILD_PROCESS_INIT;
816 int ret;
817
818 memset(&demux, 0, sizeof(demux));
819 if (use_sideband) {
820 /* xd[] is talking with upload-pack; subprocess reads from
821 * xd[0], spits out band#2 to stderr, and feeds us band#1
822 * through demux->out.
823 */
824 demux.proc = sideband_demux;
825 demux.data = xd;
826 demux.out = -1;
827 demux.isolate_sigpipe = 1;
828 if (start_async(&demux))
829 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
830 }
831 else
832 demux.out = xd[0];
833
834 if (!args->keep_pack && unpack_limit) {
835
836 if (read_pack_header(demux.out, &header))
837 die(_("protocol error: bad pack header"));
838 pass_header = 1;
839 if (ntohl(header.hdr_entries) < unpack_limit)
840 do_keep = 0;
841 else
842 do_keep = 1;
843 }
844
845 if (alternate_shallow_file) {
846 argv_array_push(&cmd.args, "--shallow-file");
847 argv_array_push(&cmd.args, alternate_shallow_file);
848 }
849
850 if (do_keep || args->from_promisor) {
851 if (pack_lockfile)
852 cmd.out = -1;
853 cmd_name = "index-pack";
854 argv_array_push(&cmd.args, cmd_name);
855 argv_array_push(&cmd.args, "--stdin");
856 if (!args->quiet && !args->no_progress)
857 argv_array_push(&cmd.args, "-v");
858 if (args->use_thin_pack)
859 argv_array_push(&cmd.args, "--fix-thin");
860 if (do_keep && (args->lock_pack || unpack_limit)) {
861 char hostname[HOST_NAME_MAX + 1];
862 if (xgethostname(hostname, sizeof(hostname)))
863 xsnprintf(hostname, sizeof(hostname), "localhost");
864 argv_array_pushf(&cmd.args,
865 "--keep=fetch-pack %"PRIuMAX " on %s",
866 (uintmax_t)getpid(), hostname);
867 }
868 if (args->check_self_contained_and_connected)
869 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
870 if (args->from_promisor)
871 argv_array_push(&cmd.args, "--promisor");
872 }
873 else {
874 cmd_name = "unpack-objects";
875 argv_array_push(&cmd.args, cmd_name);
876 if (args->quiet || args->no_progress)
877 argv_array_push(&cmd.args, "-q");
878 args->check_self_contained_and_connected = 0;
879 }
880
881 if (pass_header)
882 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
883 ntohl(header.hdr_version),
884 ntohl(header.hdr_entries));
885 if (fetch_fsck_objects >= 0
886 ? fetch_fsck_objects
887 : transfer_fsck_objects >= 0
888 ? transfer_fsck_objects
889 : 0) {
890 if (args->from_promisor)
891 /*
892 * We cannot use --strict in index-pack because it
893 * checks both broken objects and links, but we only
894 * want to check for broken objects.
895 */
896 argv_array_push(&cmd.args, "--fsck-objects");
897 else
898 argv_array_push(&cmd.args, "--strict");
899 }
900
901 cmd.in = demux.out;
902 cmd.git_cmd = 1;
903 if (start_command(&cmd))
904 die(_("fetch-pack: unable to fork off %s"), cmd_name);
905 if (do_keep && pack_lockfile) {
906 *pack_lockfile = index_pack_lockfile(cmd.out);
907 close(cmd.out);
908 }
909
910 if (!use_sideband)
911 /* Closed by start_command() */
912 xd[0] = -1;
913
914 ret = finish_command(&cmd);
915 if (!ret || (args->check_self_contained_and_connected && ret == 1))
916 args->self_contained_and_connected =
917 args->check_self_contained_and_connected &&
918 ret == 0;
919 else
920 die(_("%s failed"), cmd_name);
921 if (use_sideband && finish_async(&demux))
922 die(_("error in sideband demultiplexer"));
923 return 0;
924 }
925
926 static int cmp_ref_by_name(const void *a_, const void *b_)
927 {
928 const struct ref *a = *((const struct ref **)a_);
929 const struct ref *b = *((const struct ref **)b_);
930 return strcmp(a->name, b->name);
931 }
932
933 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
934 int fd[2],
935 const struct ref *orig_ref,
936 struct ref **sought, int nr_sought,
937 struct shallow_info *si,
938 char **pack_lockfile)
939 {
940 struct ref *ref = copy_ref_list(orig_ref);
941 struct object_id oid;
942 const char *agent_feature;
943 int agent_len;
944
945 sort_ref_list(&ref, ref_compare_name);
946 QSORT(sought, nr_sought, cmp_ref_by_name);
947
948 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
949 die(_("Server does not support shallow clients"));
950 if (args->depth > 0 || args->deepen_since || args->deepen_not)
951 args->deepen = 1;
952 if (server_supports("multi_ack_detailed")) {
953 print_verbose(args, _("Server supports multi_ack_detailed"));
954 multi_ack = 2;
955 if (server_supports("no-done")) {
956 print_verbose(args, _("Server supports no-done"));
957 if (args->stateless_rpc)
958 no_done = 1;
959 }
960 }
961 else if (server_supports("multi_ack")) {
962 print_verbose(args, _("Server supports multi_ack"));
963 multi_ack = 1;
964 }
965 if (server_supports("side-band-64k")) {
966 print_verbose(args, _("Server supports side-band-64k"));
967 use_sideband = 2;
968 }
969 else if (server_supports("side-band")) {
970 print_verbose(args, _("Server supports side-band"));
971 use_sideband = 1;
972 }
973 if (server_supports("allow-tip-sha1-in-want")) {
974 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
975 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
976 }
977 if (server_supports("allow-reachable-sha1-in-want")) {
978 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
979 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
980 }
981 if (!server_supports("thin-pack"))
982 args->use_thin_pack = 0;
983 if (!server_supports("no-progress"))
984 args->no_progress = 0;
985 if (!server_supports("include-tag"))
986 args->include_tag = 0;
987 if (server_supports("ofs-delta"))
988 print_verbose(args, _("Server supports ofs-delta"));
989 else
990 prefer_ofs_delta = 0;
991
992 if (server_supports("filter")) {
993 server_supports_filtering = 1;
994 print_verbose(args, _("Server supports filter"));
995 } else if (args->filter_options.choice) {
996 warning("filtering not recognized by server, ignoring");
997 }
998
999 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1000 agent_supported = 1;
1001 if (agent_len)
1002 print_verbose(args, _("Server version is %.*s"),
1003 agent_len, agent_feature);
1004 }
1005 if (server_supports("deepen-since"))
1006 deepen_since_ok = 1;
1007 else if (args->deepen_since)
1008 die(_("Server does not support --shallow-since"));
1009 if (server_supports("deepen-not"))
1010 deepen_not_ok = 1;
1011 else if (args->deepen_not)
1012 die(_("Server does not support --shallow-exclude"));
1013 if (!server_supports("deepen-relative") && args->deepen_relative)
1014 die(_("Server does not support --deepen"));
1015
1016 if (everything_local(args, &ref, sought, nr_sought)) {
1017 packet_flush(fd[1]);
1018 goto all_done;
1019 }
1020 if (find_common(args, fd, &oid, ref) < 0)
1021 if (!args->keep_pack)
1022 /* When cloning, it is not unusual to have
1023 * no common commit.
1024 */
1025 warning(_("no common commits"));
1026
1027 if (args->stateless_rpc)
1028 packet_flush(fd[1]);
1029 if (args->deepen)
1030 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1031 NULL);
1032 else if (si->nr_ours || si->nr_theirs)
1033 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1034 else
1035 alternate_shallow_file = NULL;
1036 if (get_pack(args, fd, pack_lockfile))
1037 die(_("git fetch-pack: fetch failed."));
1038
1039 all_done:
1040 return ref;
1041 }
1042
1043 static void fetch_pack_config(void)
1044 {
1045 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1046 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1047 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1048 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1049 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1050
1051 git_config(git_default_config, NULL);
1052 }
1053
1054 static void fetch_pack_setup(void)
1055 {
1056 static int did_setup;
1057 if (did_setup)
1058 return;
1059 fetch_pack_config();
1060 if (0 <= transfer_unpack_limit)
1061 unpack_limit = transfer_unpack_limit;
1062 else if (0 <= fetch_unpack_limit)
1063 unpack_limit = fetch_unpack_limit;
1064 did_setup = 1;
1065 }
1066
1067 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1068 {
1069 struct string_list names = STRING_LIST_INIT_NODUP;
1070 int src, dst;
1071
1072 for (src = dst = 0; src < nr; src++) {
1073 struct string_list_item *item;
1074 item = string_list_insert(&names, ref[src]->name);
1075 if (item->util)
1076 continue; /* already have it */
1077 item->util = ref[src];
1078 if (src != dst)
1079 ref[dst] = ref[src];
1080 dst++;
1081 }
1082 for (src = dst; src < nr; src++)
1083 ref[src] = NULL;
1084 string_list_clear(&names, 0);
1085 return dst;
1086 }
1087
1088 static void update_shallow(struct fetch_pack_args *args,
1089 struct ref **sought, int nr_sought,
1090 struct shallow_info *si)
1091 {
1092 struct oid_array ref = OID_ARRAY_INIT;
1093 int *status;
1094 int i;
1095
1096 if (args->deepen && alternate_shallow_file) {
1097 if (*alternate_shallow_file == '\0') { /* --unshallow */
1098 unlink_or_warn(git_path_shallow());
1099 rollback_lock_file(&shallow_lock);
1100 } else
1101 commit_lock_file(&shallow_lock);
1102 return;
1103 }
1104
1105 if (!si->shallow || !si->shallow->nr)
1106 return;
1107
1108 if (args->cloning) {
1109 /*
1110 * remote is shallow, but this is a clone, there are
1111 * no objects in repo to worry about. Accept any
1112 * shallow points that exist in the pack (iow in repo
1113 * after get_pack() and reprepare_packed_git())
1114 */
1115 struct oid_array extra = OID_ARRAY_INIT;
1116 struct object_id *oid = si->shallow->oid;
1117 for (i = 0; i < si->shallow->nr; i++)
1118 if (has_object_file(&oid[i]))
1119 oid_array_append(&extra, &oid[i]);
1120 if (extra.nr) {
1121 setup_alternate_shallow(&shallow_lock,
1122 &alternate_shallow_file,
1123 &extra);
1124 commit_lock_file(&shallow_lock);
1125 }
1126 oid_array_clear(&extra);
1127 return;
1128 }
1129
1130 if (!si->nr_ours && !si->nr_theirs)
1131 return;
1132
1133 remove_nonexistent_theirs_shallow(si);
1134 if (!si->nr_ours && !si->nr_theirs)
1135 return;
1136 for (i = 0; i < nr_sought; i++)
1137 oid_array_append(&ref, &sought[i]->old_oid);
1138 si->ref = &ref;
1139
1140 if (args->update_shallow) {
1141 /*
1142 * remote is also shallow, .git/shallow may be updated
1143 * so all refs can be accepted. Make sure we only add
1144 * shallow roots that are actually reachable from new
1145 * refs.
1146 */
1147 struct oid_array extra = OID_ARRAY_INIT;
1148 struct object_id *oid = si->shallow->oid;
1149 assign_shallow_commits_to_refs(si, NULL, NULL);
1150 if (!si->nr_ours && !si->nr_theirs) {
1151 oid_array_clear(&ref);
1152 return;
1153 }
1154 for (i = 0; i < si->nr_ours; i++)
1155 oid_array_append(&extra, &oid[si->ours[i]]);
1156 for (i = 0; i < si->nr_theirs; i++)
1157 oid_array_append(&extra, &oid[si->theirs[i]]);
1158 setup_alternate_shallow(&shallow_lock,
1159 &alternate_shallow_file,
1160 &extra);
1161 commit_lock_file(&shallow_lock);
1162 oid_array_clear(&extra);
1163 oid_array_clear(&ref);
1164 return;
1165 }
1166
1167 /*
1168 * remote is also shallow, check what ref is safe to update
1169 * without updating .git/shallow
1170 */
1171 status = xcalloc(nr_sought, sizeof(*status));
1172 assign_shallow_commits_to_refs(si, NULL, status);
1173 if (si->nr_ours || si->nr_theirs) {
1174 for (i = 0; i < nr_sought; i++)
1175 if (status[i])
1176 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1177 }
1178 free(status);
1179 oid_array_clear(&ref);
1180 }
1181
1182 struct ref *fetch_pack(struct fetch_pack_args *args,
1183 int fd[], struct child_process *conn,
1184 const struct ref *ref,
1185 const char *dest,
1186 struct ref **sought, int nr_sought,
1187 struct oid_array *shallow,
1188 char **pack_lockfile)
1189 {
1190 struct ref *ref_cpy;
1191 struct shallow_info si;
1192
1193 fetch_pack_setup();
1194 if (nr_sought)
1195 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1196
1197 if (!ref) {
1198 packet_flush(fd[1]);
1199 die(_("no matching remote head"));
1200 }
1201 prepare_shallow_info(&si, shallow);
1202 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1203 &si, pack_lockfile);
1204 reprepare_packed_git();
1205 update_shallow(args, sought, nr_sought, &si);
1206 clear_shallow_info(&si);
1207 return ref_cpy;
1208 }
1209
1210 int report_unmatched_refs(struct ref **sought, int nr_sought)
1211 {
1212 int i, ret = 0;
1213
1214 for (i = 0; i < nr_sought; i++) {
1215 if (!sought[i])
1216 continue;
1217 switch (sought[i]->match_status) {
1218 case REF_MATCHED:
1219 continue;
1220 case REF_NOT_MATCHED:
1221 error(_("no such remote ref %s"), sought[i]->name);
1222 break;
1223 case REF_UNADVERTISED_NOT_ALLOWED:
1224 error(_("Server does not allow request for unadvertised object %s"),
1225 sought[i]->name);
1226 break;
1227 }
1228 ret = 1;
1229 }
1230 return ret;
1231 }