11 #include "fetch-pack.h"
13 #include "run-command.h"
15 #include "transport.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
22 static int transfer_unpack_limit
= -1;
23 static int fetch_unpack_limit
= -1;
24 static int unpack_limit
= 100;
25 static int prefer_ofs_delta
= 1;
27 static int deepen_since_ok
;
28 static int deepen_not_ok
;
29 static int fetch_fsck_objects
= -1;
30 static int transfer_fsck_objects
= -1;
31 static int agent_supported
;
32 static struct lock_file shallow_lock
;
33 static const char *alternate_shallow_file
;
35 /* Remember to update object flag allocation in object.h */
36 #define COMPLETE (1U << 0)
37 #define COMMON (1U << 1)
38 #define COMMON_REF (1U << 2)
39 #define SEEN (1U << 3)
40 #define POPPED (1U << 4)
41 #define ALTERNATE (1U << 5)
46 * After sending this many "have"s if we do not get any new ACK , we
47 * give up traversing our history.
49 #define MAX_IN_VAIN 256
51 static struct prio_queue rev_list
= { compare_commits_by_commit_date
};
52 static int non_common_revs
, multi_ack
, use_sideband
;
53 /* Allow specifying sha1 if it is a ref tip. */
54 #define ALLOW_TIP_SHA1 01
55 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56 #define ALLOW_REACHABLE_SHA1 02
57 static unsigned int allow_unadvertised_object_request
;
59 __attribute__((format (printf
, 2, 3)))
60 static inline void print_verbose(const struct fetch_pack_args
*args
,
68 va_start(params
, fmt
);
69 vfprintf(stderr
, fmt
, params
);
74 struct alternate_object_cache
{
75 struct object
**items
;
79 static void cache_one_alternate(const char *refname
,
80 const struct object_id
*oid
,
83 struct alternate_object_cache
*cache
= vcache
;
84 struct object
*obj
= parse_object(oid
);
86 if (!obj
|| (obj
->flags
& ALTERNATE
))
89 obj
->flags
|= ALTERNATE
;
90 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
91 cache
->items
[cache
->nr
++] = obj
;
94 static void for_each_cached_alternate(void (*cb
)(struct object
*))
96 static int initialized
;
97 static struct alternate_object_cache cache
;
101 for_each_alternate_ref(cache_one_alternate
, &cache
);
105 for (i
= 0; i
< cache
.nr
; i
++)
109 static void rev_list_push(struct commit
*commit
, int mark
)
111 if (!(commit
->object
.flags
& mark
)) {
112 commit
->object
.flags
|= mark
;
114 if (parse_commit(commit
))
117 prio_queue_put(&rev_list
, commit
);
119 if (!(commit
->object
.flags
& COMMON
))
124 static int rev_list_insert_ref(const char *refname
, const struct object_id
*oid
)
126 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
128 if (o
&& o
->type
== OBJ_COMMIT
)
129 rev_list_push((struct commit
*)o
, SEEN
);
134 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
135 int flag
, void *cb_data
)
137 return rev_list_insert_ref(refname
, oid
);
140 static int clear_marks(const char *refname
, const struct object_id
*oid
,
141 int flag
, void *cb_data
)
143 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
145 if (o
&& o
->type
== OBJ_COMMIT
)
146 clear_commit_marks((struct commit
*)o
,
147 COMMON
| COMMON_REF
| SEEN
| POPPED
);
152 This function marks a rev and its ancestors as common.
153 In some cases, it is desirable to mark only the ancestors (for example
154 when only the server does not yet know that they are common).
157 static void mark_common(struct commit
*commit
,
158 int ancestors_only
, int dont_parse
)
160 if (commit
!= NULL
&& !(commit
->object
.flags
& COMMON
)) {
161 struct object
*o
= (struct object
*)commit
;
166 if (!(o
->flags
& SEEN
))
167 rev_list_push(commit
, SEEN
);
169 struct commit_list
*parents
;
171 if (!ancestors_only
&& !(o
->flags
& POPPED
))
173 if (!o
->parsed
&& !dont_parse
)
174 if (parse_commit(commit
))
177 for (parents
= commit
->parents
;
179 parents
= parents
->next
)
180 mark_common(parents
->item
, 0, dont_parse
);
186 Get the next rev to send, ignoring the common.
189 static const struct object_id
*get_rev(void)
191 struct commit
*commit
= NULL
;
193 while (commit
== NULL
) {
195 struct commit_list
*parents
;
197 if (rev_list
.nr
== 0 || non_common_revs
== 0)
200 commit
= prio_queue_get(&rev_list
);
201 parse_commit(commit
);
202 parents
= commit
->parents
;
204 commit
->object
.flags
|= POPPED
;
205 if (!(commit
->object
.flags
& COMMON
))
208 if (commit
->object
.flags
& COMMON
) {
209 /* do not send "have", and ignore ancestors */
211 mark
= COMMON
| SEEN
;
212 } else if (commit
->object
.flags
& COMMON_REF
)
213 /* send "have", and ignore ancestors */
214 mark
= COMMON
| SEEN
;
216 /* send "have", also for its ancestors */
220 if (!(parents
->item
->object
.flags
& SEEN
))
221 rev_list_push(parents
->item
, mark
);
223 mark_common(parents
->item
, 1, 0);
224 parents
= parents
->next
;
228 return &commit
->object
.oid
;
239 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
241 if (args
->stateless_rpc
&& args
->deepen
) {
242 /* If we sent a depth we will get back "duplicate"
243 * shallow and unshallow commands every time there
244 * is a block of have lines exchanged.
247 while ((line
= packet_read_line(fd
, NULL
))) {
248 if (starts_with(line
, "shallow "))
250 if (starts_with(line
, "unshallow "))
252 die(_("git fetch-pack: expected shallow list"));
257 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
260 char *line
= packet_read_line(fd
, &len
);
264 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
265 if (!strcmp(line
, "NAK"))
267 if (skip_prefix(line
, "ACK ", &arg
)) {
268 if (!get_oid_hex(arg
, result_oid
)) {
273 if (strstr(arg
, "continue"))
275 if (strstr(arg
, "common"))
277 if (strstr(arg
, "ready"))
282 if (skip_prefix(line
, "ERR ", &arg
))
283 die(_("remote error: %s"), arg
);
284 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
287 static void send_request(struct fetch_pack_args
*args
,
288 int fd
, struct strbuf
*buf
)
290 if (args
->stateless_rpc
) {
291 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
294 write_or_die(fd
, buf
->buf
, buf
->len
);
297 static void insert_one_alternate_object(struct object
*obj
)
299 rev_list_insert_ref(NULL
, &obj
->oid
);
302 #define INITIAL_FLUSH 16
303 #define PIPESAFE_FLUSH 32
304 #define LARGE_FLUSH 16384
306 static int next_flush(struct fetch_pack_args
*args
, int count
)
308 if (args
->stateless_rpc
) {
309 if (count
< LARGE_FLUSH
)
312 count
= count
* 11 / 10;
314 if (count
< PIPESAFE_FLUSH
)
317 count
+= PIPESAFE_FLUSH
;
322 static int find_common(struct fetch_pack_args
*args
,
323 int fd
[2], struct object_id
*result_oid
,
327 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
328 const struct object_id
*oid
;
329 unsigned in_vain
= 0;
330 int got_continue
= 0;
332 struct strbuf req_buf
= STRBUF_INIT
;
333 size_t state_len
= 0;
335 if (args
->stateless_rpc
&& multi_ack
== 1)
336 die(_("--stateless-rpc requires multi_ack_detailed"));
338 for_each_ref(clear_marks
, NULL
);
341 for_each_ref(rev_list_insert_ref_oid
, NULL
);
342 for_each_cached_alternate(insert_one_alternate_object
);
345 for ( ; refs
; refs
= refs
->next
) {
346 struct object_id
*remote
= &refs
->old_oid
;
347 const char *remote_hex
;
351 * If that object is complete (i.e. it is an ancestor of a
352 * local ref), we tell them we have it but do not have to
353 * tell them about its ancestors, which they already know
356 * We use lookup_object here because we are only
357 * interested in the case we *know* the object is
358 * reachable and we have already scanned it.
360 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
361 (o
->flags
& COMPLETE
)) {
365 remote_hex
= oid_to_hex(remote
);
367 struct strbuf c
= STRBUF_INIT
;
368 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
369 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
370 if (no_done
) strbuf_addstr(&c
, " no-done");
371 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
372 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
373 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
374 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
375 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
376 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
377 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
378 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
379 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
380 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
381 git_user_agent_sanitized());
382 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
385 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
390 strbuf_release(&req_buf
);
395 if (is_repository_shallow())
396 write_shallow_commits(&req_buf
, 1, NULL
);
398 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
399 if (args
->deepen_since
) {
400 timestamp_t max_age
= approxidate(args
->deepen_since
);
401 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
403 if (args
->deepen_not
) {
405 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
406 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
407 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
410 packet_buf_flush(&req_buf
);
411 state_len
= req_buf
.len
;
416 struct object_id oid
;
418 send_request(args
, fd
[1], &req_buf
);
419 while ((line
= packet_read_line(fd
[0], NULL
))) {
420 if (skip_prefix(line
, "shallow ", &arg
)) {
421 if (get_oid_hex(arg
, &oid
))
422 die(_("invalid shallow line: %s"), line
);
423 register_shallow(&oid
);
426 if (skip_prefix(line
, "unshallow ", &arg
)) {
427 if (get_oid_hex(arg
, &oid
))
428 die(_("invalid unshallow line: %s"), line
);
429 if (!lookup_object(oid
.hash
))
430 die(_("object not found: %s"), line
);
431 /* make sure that it is parsed as shallow */
432 if (!parse_object(&oid
))
433 die(_("error in object: %s"), line
);
434 if (unregister_shallow(&oid
))
435 die(_("no shallow found: %s"), line
);
438 die(_("expected shallow/unshallow, got %s"), line
);
440 } else if (!args
->stateless_rpc
)
441 send_request(args
, fd
[1], &req_buf
);
443 if (!args
->stateless_rpc
) {
444 /* If we aren't using the stateless-rpc interface
445 * we don't need to retain the headers.
447 strbuf_setlen(&req_buf
, 0);
453 if (args
->no_dependents
)
455 while ((oid
= get_rev())) {
456 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
457 print_verbose(args
, "have %s", oid_to_hex(oid
));
459 if (flush_at
<= ++count
) {
462 packet_buf_flush(&req_buf
);
463 send_request(args
, fd
[1], &req_buf
);
464 strbuf_setlen(&req_buf
, state_len
);
466 flush_at
= next_flush(args
, count
);
469 * We keep one window "ahead" of the other side, and
470 * will wait for an ACK only on the next one
472 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
475 consume_shallow_list(args
, fd
[0]);
477 ack
= get_ack(fd
[0], result_oid
);
479 print_verbose(args
, _("got %s %d %s"), "ack",
480 ack
, oid_to_hex(result_oid
));
490 struct commit
*commit
=
491 lookup_commit(result_oid
);
493 die(_("invalid commit %s"), oid_to_hex(result_oid
));
494 if (args
->stateless_rpc
496 && !(commit
->object
.flags
& COMMON
)) {
497 /* We need to replay the have for this object
498 * on the next RPC request so the peer knows
499 * it is in common with us.
501 const char *hex
= oid_to_hex(result_oid
);
502 packet_buf_write(&req_buf
, "have %s\n", hex
);
503 state_len
= req_buf
.len
;
505 * Reset in_vain because an ack
506 * for this commit has not been
510 } else if (!args
->stateless_rpc
511 || ack
!= ACK_common
)
513 mark_common(commit
, 0, 1);
516 if (ack
== ACK_ready
) {
517 clear_prio_queue(&rev_list
);
525 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
526 print_verbose(args
, _("giving up"));
532 if (!got_ready
|| !no_done
) {
533 packet_buf_write(&req_buf
, "done\n");
534 send_request(args
, fd
[1], &req_buf
);
536 print_verbose(args
, _("done"));
541 strbuf_release(&req_buf
);
543 if (!got_ready
|| !no_done
)
544 consume_shallow_list(args
, fd
[0]);
545 while (flushes
|| multi_ack
) {
546 int ack
= get_ack(fd
[0], result_oid
);
548 print_verbose(args
, _("got %s (%d) %s"), "ack",
549 ack
, oid_to_hex(result_oid
));
557 /* it is no error to fetch into a completely empty repo */
558 return count
? retval
: 0;
561 static struct commit_list
*complete
;
563 static int mark_complete(const struct object_id
*oid
)
565 struct object
*o
= parse_object(oid
);
567 while (o
&& o
->type
== OBJ_TAG
) {
568 struct tag
*t
= (struct tag
*) o
;
570 break; /* broken repository */
571 o
->flags
|= COMPLETE
;
572 o
= parse_object(&t
->tagged
->oid
);
574 if (o
&& o
->type
== OBJ_COMMIT
) {
575 struct commit
*commit
= (struct commit
*)o
;
576 if (!(commit
->object
.flags
& COMPLETE
)) {
577 commit
->object
.flags
|= COMPLETE
;
578 commit_list_insert(commit
, &complete
);
584 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
585 int flag
, void *cb_data
)
587 return mark_complete(oid
);
590 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
593 while (complete
&& cutoff
<= complete
->item
->date
) {
594 print_verbose(args
, _("Marking %s as complete"),
595 oid_to_hex(&complete
->item
->object
.oid
));
596 pop_most_recent_commit(&complete
, COMPLETE
);
600 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
602 for (; refs
; refs
= refs
->next
)
603 oidset_insert(oids
, &refs
->old_oid
);
606 static int tip_oids_contain(struct oidset
*tip_oids
,
607 struct ref
*unmatched
, struct ref
*newlist
,
608 const struct object_id
*id
)
611 * Note that this only looks at the ref lists the first time it's
612 * called. This works out in filter_refs() because even though it may
613 * add to "newlist" between calls, the additions will always be for
614 * oids that are already in the set.
616 if (!tip_oids
->map
.map
.tablesize
) {
617 add_refs_to_oidset(tip_oids
, unmatched
);
618 add_refs_to_oidset(tip_oids
, newlist
);
620 return oidset_contains(tip_oids
, id
);
623 static void filter_refs(struct fetch_pack_args
*args
,
625 struct ref
**sought
, int nr_sought
)
627 struct ref
*newlist
= NULL
;
628 struct ref
**newtail
= &newlist
;
629 struct ref
*unmatched
= NULL
;
630 struct ref
*ref
, *next
;
631 struct oidset tip_oids
= OIDSET_INIT
;
635 for (ref
= *refs
; ref
; ref
= next
) {
639 if (starts_with(ref
->name
, "refs/") &&
640 check_refname_format(ref
->name
, 0))
643 while (i
< nr_sought
) {
644 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
646 break; /* definitely do not have it */
648 keep
= 1; /* definitely have it */
649 sought
[i
]->match_status
= REF_MATCHED
;
655 if (!keep
&& args
->fetch_all
&&
656 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
662 newtail
= &ref
->next
;
664 ref
->next
= unmatched
;
669 /* Append unmatched requests to the list */
670 for (i
= 0; i
< nr_sought
; i
++) {
671 struct object_id oid
;
675 if (ref
->match_status
!= REF_NOT_MATCHED
)
677 if (parse_oid_hex(ref
->name
, &oid
, &p
) ||
679 oidcmp(&oid
, &ref
->old_oid
))
682 if ((allow_unadvertised_object_request
&
683 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
)) ||
684 tip_oids_contain(&tip_oids
, unmatched
, newlist
,
686 ref
->match_status
= REF_MATCHED
;
687 *newtail
= copy_ref(ref
);
688 newtail
= &(*newtail
)->next
;
690 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
694 oidset_clear(&tip_oids
);
695 for (ref
= unmatched
; ref
; ref
= next
) {
703 static void mark_alternate_complete(struct object
*obj
)
705 mark_complete(&obj
->oid
);
708 static int everything_local(struct fetch_pack_args
*args
,
710 struct ref
**sought
, int nr_sought
)
714 timestamp_t cutoff
= 0;
716 save_commit_buffer
= 0;
718 for (ref
= *refs
; ref
; ref
= ref
->next
) {
721 if (!has_object_file_with_flags(&ref
->old_oid
,
725 o
= parse_object(&ref
->old_oid
);
729 /* We already have it -- which may mean that we were
730 * in sync with the other side at some time after
731 * that (it is OK if we guess wrong here).
733 if (o
->type
== OBJ_COMMIT
) {
734 struct commit
*commit
= (struct commit
*)o
;
735 if (!cutoff
|| cutoff
< commit
->date
)
736 cutoff
= commit
->date
;
740 if (!args
->no_dependents
) {
742 for_each_ref(mark_complete_oid
, NULL
);
743 for_each_cached_alternate(mark_alternate_complete
);
744 commit_list_sort_by_date(&complete
);
746 mark_recent_complete_commits(args
, cutoff
);
750 * Mark all complete remote refs as common refs.
751 * Don't mark them common yet; the server has to be told so first.
753 for (ref
= *refs
; ref
; ref
= ref
->next
) {
754 struct object
*o
= deref_tag(lookup_object(ref
->old_oid
.hash
),
757 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
760 if (!(o
->flags
& SEEN
)) {
761 rev_list_push((struct commit
*)o
, COMMON_REF
| SEEN
);
763 mark_common((struct commit
*)o
, 1, 1);
768 filter_refs(args
, refs
, sought
, nr_sought
);
770 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
771 const struct object_id
*remote
= &ref
->old_oid
;
774 o
= lookup_object(remote
->hash
);
775 if (!o
|| !(o
->flags
& COMPLETE
)) {
777 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
781 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
787 static int sideband_demux(int in
, int out
, void *data
)
792 ret
= recv_sideband("fetch-pack", xd
[0], out
);
797 static int get_pack(struct fetch_pack_args
*args
,
798 int xd
[2], char **pack_lockfile
)
801 int do_keep
= args
->keep_pack
;
802 const char *cmd_name
;
803 struct pack_header header
;
805 struct child_process cmd
= CHILD_PROCESS_INIT
;
808 memset(&demux
, 0, sizeof(demux
));
810 /* xd[] is talking with upload-pack; subprocess reads from
811 * xd[0], spits out band#2 to stderr, and feeds us band#1
812 * through demux->out.
814 demux
.proc
= sideband_demux
;
817 demux
.isolate_sigpipe
= 1;
818 if (start_async(&demux
))
819 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
824 if (!args
->keep_pack
&& unpack_limit
) {
826 if (read_pack_header(demux
.out
, &header
))
827 die(_("protocol error: bad pack header"));
829 if (ntohl(header
.hdr_entries
) < unpack_limit
)
835 if (alternate_shallow_file
) {
836 argv_array_push(&cmd
.args
, "--shallow-file");
837 argv_array_push(&cmd
.args
, alternate_shallow_file
);
840 if (do_keep
|| args
->from_promisor
) {
843 cmd_name
= "index-pack";
844 argv_array_push(&cmd
.args
, cmd_name
);
845 argv_array_push(&cmd
.args
, "--stdin");
846 if (!args
->quiet
&& !args
->no_progress
)
847 argv_array_push(&cmd
.args
, "-v");
848 if (args
->use_thin_pack
)
849 argv_array_push(&cmd
.args
, "--fix-thin");
850 if (do_keep
&& (args
->lock_pack
|| unpack_limit
)) {
851 char hostname
[HOST_NAME_MAX
+ 1];
852 if (xgethostname(hostname
, sizeof(hostname
)))
853 xsnprintf(hostname
, sizeof(hostname
), "localhost");
854 argv_array_pushf(&cmd
.args
,
855 "--keep=fetch-pack %"PRIuMAX
" on %s",
856 (uintmax_t)getpid(), hostname
);
858 if (args
->check_self_contained_and_connected
)
859 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
860 if (args
->from_promisor
)
861 argv_array_push(&cmd
.args
, "--promisor");
864 cmd_name
= "unpack-objects";
865 argv_array_push(&cmd
.args
, cmd_name
);
866 if (args
->quiet
|| args
->no_progress
)
867 argv_array_push(&cmd
.args
, "-q");
868 args
->check_self_contained_and_connected
= 0;
872 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
873 ntohl(header
.hdr_version
),
874 ntohl(header
.hdr_entries
));
875 if (fetch_fsck_objects
>= 0
877 : transfer_fsck_objects
>= 0
878 ? transfer_fsck_objects
880 argv_array_push(&cmd
.args
, "--strict");
884 if (start_command(&cmd
))
885 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
886 if (do_keep
&& pack_lockfile
) {
887 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
892 /* Closed by start_command() */
895 ret
= finish_command(&cmd
);
896 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
897 args
->self_contained_and_connected
=
898 args
->check_self_contained_and_connected
&&
901 die(_("%s failed"), cmd_name
);
902 if (use_sideband
&& finish_async(&demux
))
903 die(_("error in sideband demultiplexer"));
907 static int cmp_ref_by_name(const void *a_
, const void *b_
)
909 const struct ref
*a
= *((const struct ref
**)a_
);
910 const struct ref
*b
= *((const struct ref
**)b_
);
911 return strcmp(a
->name
, b
->name
);
914 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
916 const struct ref
*orig_ref
,
917 struct ref
**sought
, int nr_sought
,
918 struct shallow_info
*si
,
919 char **pack_lockfile
)
921 struct ref
*ref
= copy_ref_list(orig_ref
);
922 struct object_id oid
;
923 const char *agent_feature
;
926 sort_ref_list(&ref
, ref_compare_name
);
927 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
929 if ((args
->depth
> 0 || is_repository_shallow()) && !server_supports("shallow"))
930 die(_("Server does not support shallow clients"));
931 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
933 if (server_supports("multi_ack_detailed")) {
934 print_verbose(args
, _("Server supports multi_ack_detailed"));
936 if (server_supports("no-done")) {
937 print_verbose(args
, _("Server supports no-done"));
938 if (args
->stateless_rpc
)
942 else if (server_supports("multi_ack")) {
943 print_verbose(args
, _("Server supports multi_ack"));
946 if (server_supports("side-band-64k")) {
947 print_verbose(args
, _("Server supports side-band-64k"));
950 else if (server_supports("side-band")) {
951 print_verbose(args
, _("Server supports side-band"));
954 if (server_supports("allow-tip-sha1-in-want")) {
955 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
956 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
958 if (server_supports("allow-reachable-sha1-in-want")) {
959 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
960 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
962 if (!server_supports("thin-pack"))
963 args
->use_thin_pack
= 0;
964 if (!server_supports("no-progress"))
965 args
->no_progress
= 0;
966 if (!server_supports("include-tag"))
967 args
->include_tag
= 0;
968 if (server_supports("ofs-delta"))
969 print_verbose(args
, _("Server supports ofs-delta"));
971 prefer_ofs_delta
= 0;
973 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
976 print_verbose(args
, _("Server version is %.*s"),
977 agent_len
, agent_feature
);
979 if (server_supports("deepen-since"))
981 else if (args
->deepen_since
)
982 die(_("Server does not support --shallow-since"));
983 if (server_supports("deepen-not"))
985 else if (args
->deepen_not
)
986 die(_("Server does not support --shallow-exclude"));
987 if (!server_supports("deepen-relative") && args
->deepen_relative
)
988 die(_("Server does not support --deepen"));
990 if (everything_local(args
, &ref
, sought
, nr_sought
)) {
994 if (find_common(args
, fd
, &oid
, ref
) < 0)
995 if (!args
->keep_pack
)
996 /* When cloning, it is not unusual to have
999 warning(_("no common commits"));
1001 if (args
->stateless_rpc
)
1002 packet_flush(fd
[1]);
1004 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
1006 else if (si
->nr_ours
|| si
->nr_theirs
)
1007 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1009 alternate_shallow_file
= NULL
;
1010 if (get_pack(args
, fd
, pack_lockfile
))
1011 die(_("git fetch-pack: fetch failed."));
1017 static void fetch_pack_config(void)
1019 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1020 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1021 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1022 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1023 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1025 git_config(git_default_config
, NULL
);
1028 static void fetch_pack_setup(void)
1030 static int did_setup
;
1033 fetch_pack_config();
1034 if (0 <= transfer_unpack_limit
)
1035 unpack_limit
= transfer_unpack_limit
;
1036 else if (0 <= fetch_unpack_limit
)
1037 unpack_limit
= fetch_unpack_limit
;
1041 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1043 struct string_list names
= STRING_LIST_INIT_NODUP
;
1046 for (src
= dst
= 0; src
< nr
; src
++) {
1047 struct string_list_item
*item
;
1048 item
= string_list_insert(&names
, ref
[src
]->name
);
1050 continue; /* already have it */
1051 item
->util
= ref
[src
];
1053 ref
[dst
] = ref
[src
];
1056 for (src
= dst
; src
< nr
; src
++)
1058 string_list_clear(&names
, 0);
1062 static void update_shallow(struct fetch_pack_args
*args
,
1063 struct ref
**sought
, int nr_sought
,
1064 struct shallow_info
*si
)
1066 struct oid_array ref
= OID_ARRAY_INIT
;
1070 if (args
->deepen
&& alternate_shallow_file
) {
1071 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1072 unlink_or_warn(git_path_shallow());
1073 rollback_lock_file(&shallow_lock
);
1075 commit_lock_file(&shallow_lock
);
1079 if (!si
->shallow
|| !si
->shallow
->nr
)
1082 if (args
->cloning
) {
1084 * remote is shallow, but this is a clone, there are
1085 * no objects in repo to worry about. Accept any
1086 * shallow points that exist in the pack (iow in repo
1087 * after get_pack() and reprepare_packed_git())
1089 struct oid_array extra
= OID_ARRAY_INIT
;
1090 struct object_id
*oid
= si
->shallow
->oid
;
1091 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1092 if (has_object_file(&oid
[i
]))
1093 oid_array_append(&extra
, &oid
[i
]);
1095 setup_alternate_shallow(&shallow_lock
,
1096 &alternate_shallow_file
,
1098 commit_lock_file(&shallow_lock
);
1100 oid_array_clear(&extra
);
1104 if (!si
->nr_ours
&& !si
->nr_theirs
)
1107 remove_nonexistent_theirs_shallow(si
);
1108 if (!si
->nr_ours
&& !si
->nr_theirs
)
1110 for (i
= 0; i
< nr_sought
; i
++)
1111 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1114 if (args
->update_shallow
) {
1116 * remote is also shallow, .git/shallow may be updated
1117 * so all refs can be accepted. Make sure we only add
1118 * shallow roots that are actually reachable from new
1121 struct oid_array extra
= OID_ARRAY_INIT
;
1122 struct object_id
*oid
= si
->shallow
->oid
;
1123 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1124 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1125 oid_array_clear(&ref
);
1128 for (i
= 0; i
< si
->nr_ours
; i
++)
1129 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1130 for (i
= 0; i
< si
->nr_theirs
; i
++)
1131 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1132 setup_alternate_shallow(&shallow_lock
,
1133 &alternate_shallow_file
,
1135 commit_lock_file(&shallow_lock
);
1136 oid_array_clear(&extra
);
1137 oid_array_clear(&ref
);
1142 * remote is also shallow, check what ref is safe to update
1143 * without updating .git/shallow
1145 status
= xcalloc(nr_sought
, sizeof(*status
));
1146 assign_shallow_commits_to_refs(si
, NULL
, status
);
1147 if (si
->nr_ours
|| si
->nr_theirs
) {
1148 for (i
= 0; i
< nr_sought
; i
++)
1150 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1153 oid_array_clear(&ref
);
1156 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1157 int fd
[], struct child_process
*conn
,
1158 const struct ref
*ref
,
1160 struct ref
**sought
, int nr_sought
,
1161 struct oid_array
*shallow
,
1162 char **pack_lockfile
)
1164 struct ref
*ref_cpy
;
1165 struct shallow_info si
;
1169 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1172 packet_flush(fd
[1]);
1173 die(_("no matching remote head"));
1175 prepare_shallow_info(&si
, shallow
);
1176 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1177 &si
, pack_lockfile
);
1178 reprepare_packed_git();
1179 update_shallow(args
, sought
, nr_sought
, &si
);
1180 clear_shallow_info(&si
);
1184 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1188 for (i
= 0; i
< nr_sought
; i
++) {
1191 switch (sought
[i
]->match_status
) {
1194 case REF_NOT_MATCHED
:
1195 error(_("no such remote ref %s"), sought
[i
]->name
);
1197 case REF_UNADVERTISED_NOT_ALLOWED
:
1198 error(_("Server does not allow request for unadvertised object %s"),