From 30ae764b1e11f10b5fca723a876a0f3de3ca11ab Mon Sep 17 00:00:00 2001 From: Daniel Barkalow Date: Mon, 10 Sep 2007 23:02:45 -0400 Subject: [PATCH] Modularize commit-walker This turns the extern functions to be provided by the backend into a struct of pointers, renames the functions to be more namespace-friendly, and updates http-fetch to this interface. It removes the unused include from http-push.c. It makes git-http-fetch a builtin (with the implementation a separate file, accessible directly). Signed-off-by: Daniel Barkalow Signed-off-by: Junio C Hamano --- Makefile | 17 +-- builtin-http-fetch.c | 77 ++++++++++++ builtin.h | 1 + fetch.h | 54 -------- git.c | 3 + http-push.c | 1 - http-fetch.c => http-walker.c | 228 +++++++++++++++------------------- fetch.c => walker.c | 83 +++++++------ walker.h | 37 ++++++ 9 files changed, 271 insertions(+), 230 deletions(-) create mode 100644 builtin-http-fetch.c delete mode 100644 fetch.h rename http-fetch.c => http-walker.c (85%) rename fetch.c => walker.c (75%) create mode 100644 walker.h diff --git a/Makefile b/Makefile index 520f156f34..3436a2326a 100644 --- a/Makefile +++ b/Makefile @@ -514,7 +514,9 @@ else CC_LD_DYNPATH = -R endif -ifndef NO_CURL +ifdef NO_CURL + BASIC_CFLAGS += -DNO_CURL +else ifdef CURLDIR # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. BASIC_CFLAGS += -I$(CURLDIR)/include @@ -522,7 +524,9 @@ ifndef NO_CURL else CURL_LIBCURL = -lcurl endif - PROGRAMS += git-http-fetch$X + BUILTIN_OBJS += builtin-http-fetch.o + EXTLIBS += $(CURL_LIBCURL) + LIB_OBJS += http.o walker.o http-walker.o curl_check := $(shell (echo 070908; curl-config --vernum) | sort -r | sed -ne 2p) ifeq "$(curl_check)" "070908" ifndef NO_EXPAT @@ -884,7 +888,7 @@ http.o: http.c GIT-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DGIT_USER_AGENT='"git/$(GIT_VERSION)"' $< ifdef NO_EXPAT -http-fetch.o: http-fetch.c http.h GIT-CFLAGS +http-walker.o: http-walker.c http.h GIT-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DNO_EXPAT $< endif @@ -893,16 +897,13 @@ git-%$X: %.o $(GITLIBS) git-imap-send$X: imap-send.o $(LIB_FILE) -http.o http-fetch.o http-push.o: http.h -git-http-fetch$X: fetch.o http.o http-fetch.o $(GITLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ - $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) +http.o http-walker.o http-push.o: http.h git-http-push$X: revision.o http.o http-push.o $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) -$(LIB_OBJS) $(BUILTIN_OBJS) fetch.o: $(LIB_H) +$(LIB_OBJS) $(BUILTIN_OBJS) walker.o: $(LIB_H) $(patsubst git-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) $(DIFF_OBJS): diffcore.h diff --git a/builtin-http-fetch.c b/builtin-http-fetch.c new file mode 100644 index 0000000000..4a50dbd95b --- /dev/null +++ b/builtin-http-fetch.c @@ -0,0 +1,77 @@ +#include "cache.h" +#include "walker.h" + +int cmd_http_fetch(int argc, const char **argv, const char *prefix) +{ + struct walker *walker; + int commits_on_stdin = 0; + int commits; + const char **write_ref = NULL; + char **commit_id; + const char *url; + int arg = 1; + int rc = 0; + int get_tree = 0; + int get_history = 0; + int get_all = 0; + int get_verbosely = 0; + int get_recover = 0; + + git_config(git_default_config); + + while (arg < argc && argv[arg][0] == '-') { + if (argv[arg][1] == 't') { + get_tree = 1; + } else if (argv[arg][1] == 'c') { + get_history = 1; + } else if (argv[arg][1] == 'a') { + get_all = 1; + get_tree = 1; + get_history = 1; + } else if (argv[arg][1] == 'v') { + get_verbosely = 1; + } else if (argv[arg][1] == 'w') { + write_ref = &argv[arg + 1]; + arg++; + } else if (!strcmp(argv[arg], "--recover")) { + get_recover = 1; + } else if (!strcmp(argv[arg], "--stdin")) { + commits_on_stdin = 1; + } + arg++; + } + if (argc < arg + 2 - commits_on_stdin) { + usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url"); + return 1; + } + if (commits_on_stdin) { + commits = walker_targets_stdin(&commit_id, &write_ref); + } else { + commit_id = (char **) &argv[arg++]; + commits = 1; + } + url = argv[arg]; + + walker = get_http_walker(url); + walker->get_tree = get_tree; + walker->get_history = get_history; + walker->get_all = get_all; + walker->get_verbosely = get_verbosely; + walker->get_recover = get_recover; + + rc = walker_fetch(walker, commits, commit_id, write_ref, url); + + if (commits_on_stdin) + walker_targets_free(commits, commit_id, write_ref); + + if (walker->corrupt_object_found) { + fprintf(stderr, +"Some loose object were found to be corrupt, but they might be just\n" +"a false '404 Not Found' error message sent with incorrect HTTP\n" +"status code. Suggest running git-fsck.\n"); + } + + walker_free(walker); + + return rc; +} diff --git a/builtin.h b/builtin.h index 03ee7bf780..1fa1444e33 100644 --- a/builtin.h +++ b/builtin.h @@ -40,6 +40,7 @@ extern int cmd_gc(int argc, const char **argv, const char *prefix); extern int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix); extern int cmd_grep(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); +extern int cmd_http_fetch(int argc, const char **argv, const char *prefix); extern int cmd_init_db(int argc, const char **argv, const char *prefix); extern int cmd_log(int argc, const char **argv, const char *prefix); extern int cmd_log_reflog(int argc, const char **argv, const char *prefix); diff --git a/fetch.h b/fetch.h deleted file mode 100644 index be48c6f190..0000000000 --- a/fetch.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef PULL_H -#define PULL_H - -/* - * Fetch object given SHA1 from the remote, and store it locally under - * GIT_OBJECT_DIRECTORY. Return 0 on success, -1 on failure. To be - * provided by the particular implementation. - */ -extern int fetch(unsigned char *sha1); - -/* - * Fetch the specified object and store it locally; fetch() will be - * called later to determine success. To be provided by the particular - * implementation. - */ -extern void prefetch(unsigned char *sha1); - -/* - * Fetch ref (relative to $GIT_DIR/refs) from the remote, and store - * the 20-byte SHA1 in sha1. Return 0 on success, -1 on failure. To - * be provided by the particular implementation. - */ -extern int fetch_ref(char *ref, unsigned char *sha1); - -/* Set to fetch the target tree. */ -extern int get_tree; - -/* Set to fetch the commit history. */ -extern int get_history; - -/* Set to fetch the trees in the commit history. */ -extern int get_all; - -/* Set to be verbose */ -extern int get_verbosely; - -/* Set to check on all reachable objects. */ -extern int get_recover; - -/* Report what we got under get_verbosely */ -extern void pull_say(const char *, const char *); - -/* Load pull targets from stdin */ -extern int pull_targets_stdin(char ***target, const char ***write_ref); - -/* Free up loaded targets */ -extern void pull_targets_free(int targets, char **target, const char **write_ref); - -/* If write_ref is set, the ref filename to write the target value to. */ -/* If write_ref_log_details is set, additional text will appear in the ref log. */ -extern int pull(int targets, char **target, const char **write_ref, - const char *write_ref_log_details); - -#endif /* PULL_H */ diff --git a/git.c b/git.c index 56ae8ccccf..41fe73eff9 100644 --- a/git.c +++ b/git.c @@ -344,6 +344,9 @@ static void handle_internal_command(int argc, const char **argv) { "get-tar-commit-id", cmd_get_tar_commit_id }, { "grep", cmd_grep, RUN_SETUP | USE_PAGER }, { "help", cmd_help }, +#ifndef NO_CURL + { "http-fetch", cmd_http_fetch, RUN_SETUP }, +#endif { "init", cmd_init_db }, { "init-db", cmd_init_db }, { "log", cmd_log, RUN_SETUP | USE_PAGER }, diff --git a/http-push.c b/http-push.c index c54230b6f3..8e1fdfddf5 100644 --- a/http-push.c +++ b/http-push.c @@ -1,7 +1,6 @@ #include "cache.h" #include "commit.h" #include "pack.h" -#include "fetch.h" #include "tag.h" #include "blob.h" #include "http.h" diff --git a/http-fetch.c b/http-walker.c similarity index 85% rename from http-fetch.c rename to http-walker.c index 7786110ffc..444aebf526 100644 --- a/http-fetch.c +++ b/http-walker.c @@ -1,19 +1,12 @@ #include "cache.h" #include "commit.h" #include "pack.h" -#include "fetch.h" +#include "walker.h" #include "http.h" #define PREV_BUF_SIZE 4096 #define RANGE_HEADER_SIZE 30 -static int commits_on_stdin; - -static int got_alternates = -1; -static int corrupt_object_found; - -static struct curl_slist *no_pragma_header; - struct alt_base { char *base; @@ -22,8 +15,6 @@ struct alt_base struct alt_base *next; }; -static struct alt_base *alt; - enum object_request_state { WAITING, ABORTED, @@ -33,6 +24,7 @@ enum object_request_state { struct object_request { + struct walker *walker; unsigned char sha1[20]; struct alt_base *repo; char *url; @@ -53,6 +45,7 @@ struct object_request }; struct alternates_request { + struct walker *walker; const char *base; char *url; struct buffer *buffer; @@ -60,6 +53,13 @@ struct alternates_request { int http_specific; }; +struct walker_data { + const char *url; + int got_alternates; + struct alt_base *alt; + struct curl_slist *no_pragma_header; +}; + static struct object_request *object_queue_head; static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb, @@ -103,11 +103,12 @@ static int missing__target(int code, int result) #define missing_target(a) missing__target((a)->http_code, (a)->curl_result) -static void fetch_alternates(const char *base); +static void fetch_alternates(struct walker *walker, const char *base); static void process_object_response(void *callback_data); -static void start_object_request(struct object_request *obj_req) +static void start_object_request(struct walker *walker, + struct object_request *obj_req) { char *hex = sha1_to_hex(obj_req->sha1); char prevfile[PATH_MAX]; @@ -120,6 +121,7 @@ static void start_object_request(struct object_request *obj_req) char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; struct active_request_slot *slot; + struct walker_data *data = walker->data; snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename); unlink(prevfile); @@ -212,12 +214,12 @@ static void start_object_request(struct object_request *obj_req) curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file); curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); /* If we have successfully processed data from a previous fetch attempt, only fetch the data we don't already have. */ if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of object %s at byte %ld\n", hex, prev_posn); @@ -268,13 +270,16 @@ static void finish_object_request(struct object_request *obj_req) move_temp_to_file(obj_req->tmpfile, obj_req->filename); if (obj_req->rename == 0) - pull_say("got %s\n", sha1_to_hex(obj_req->sha1)); + walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1)); } static void process_object_response(void *callback_data) { struct object_request *obj_req = (struct object_request *)callback_data; + struct walker *walker = obj_req->walker; + struct walker_data *data = walker->data; + struct alt_base *alt = data->alt; obj_req->curl_result = obj_req->slot->curl_result; obj_req->http_code = obj_req->slot->http_code; @@ -283,13 +288,13 @@ static void process_object_response(void *callback_data) /* Use alternates if necessary */ if (missing_target(obj_req)) { - fetch_alternates(alt->base); + fetch_alternates(walker, alt->base); if (obj_req->repo->next != NULL) { obj_req->repo = obj_req->repo->next; close(obj_req->local); obj_req->local = -1; - start_object_request(obj_req); + start_object_request(walker, obj_req); return; } } @@ -317,7 +322,7 @@ static void release_object_request(struct object_request *obj_req) } #ifdef USE_CURL_MULTI -static int fill_active_slot(void *unused) +static int fill_active_slot(struct walker *walker) { struct object_request *obj_req; @@ -326,7 +331,7 @@ static int fill_active_slot(void *unused) if (has_sha1_file(obj_req->sha1)) obj_req->state = COMPLETE; else { - start_object_request(obj_req); + start_object_request(walker, obj_req); return 1; } } @@ -335,15 +340,17 @@ static int fill_active_slot(void *unused) } #endif -void prefetch(unsigned char *sha1) +static void prefetch(struct walker *walker, unsigned char *sha1) { struct object_request *newreq; struct object_request *tail; + struct walker_data *data = walker->data; char *filename = sha1_file_name(sha1); newreq = xmalloc(sizeof(*newreq)); + newreq->walker = walker; hashcpy(newreq->sha1, sha1); - newreq->repo = alt; + newreq->repo = data->alt; newreq->url = NULL; newreq->local = -1; newreq->state = WAITING; @@ -369,7 +376,7 @@ void prefetch(unsigned char *sha1) #endif } -static int fetch_index(struct alt_base *repo, unsigned char *sha1) +static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); char *filename; @@ -378,6 +385,7 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; + struct walker_data *data = walker->data; FILE *indexfile; struct active_request_slot *slot; @@ -386,7 +394,7 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) if (has_pack_index(sha1)) return 0; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting index for pack %s\n", hex); url = xmalloc(strlen(repo->base) + 64); @@ -404,14 +412,14 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = indexfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(indexfile); if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of index for pack %s at byte %ld\n", hex, prev_posn); @@ -437,13 +445,13 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) return move_temp_to_file(tmpfile, filename); } -static int setup_index(struct alt_base *repo, unsigned char *sha1) +static int setup_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { struct packed_git *new_pack; if (has_pack_file(sha1)) return 0; /* don't list this as something we can get */ - if (fetch_index(repo, sha1)) + if (fetch_index(walker, repo, sha1)) return -1; new_pack = parse_pack_index(sha1); @@ -456,8 +464,10 @@ static void process_alternates_response(void *callback_data) { struct alternates_request *alt_req = (struct alternates_request *)callback_data; + struct walker *walker = alt_req->walker; + struct walker_data *cdata = walker->data; struct active_request_slot *slot = alt_req->slot; - struct alt_base *tail = alt; + struct alt_base *tail = cdata->alt; const char *base = alt_req->base; static const char null_byte = '\0'; char *data; @@ -478,7 +488,7 @@ static void process_alternates_response(void *callback_data) if (slot->finished != NULL) (*slot->finished) = 0; if (!start_active_slot(slot)) { - got_alternates = -1; + cdata->got_alternates = -1; slot->in_use = 0; if (slot->finished != NULL) (*slot->finished) = 1; @@ -487,7 +497,7 @@ static void process_alternates_response(void *callback_data) } } else if (slot->curl_result != CURLE_OK) { if (!missing_target(slot)) { - got_alternates = -1; + cdata->got_alternates = -1; return; } } @@ -564,7 +574,7 @@ static void process_alternates_response(void *callback_data) memcpy(target + serverlen, data + i, posn - i - 7); target[serverlen + posn - i - 7] = 0; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Also look at %s\n", target); newalt = xmalloc(sizeof(*newalt)); @@ -581,39 +591,40 @@ static void process_alternates_response(void *callback_data) i = posn + 1; } - got_alternates = 1; + cdata->got_alternates = 1; } -static void fetch_alternates(const char *base) +static void fetch_alternates(struct walker *walker, const char *base) { struct buffer buffer; char *url; char *data; struct active_request_slot *slot; struct alternates_request alt_req; + struct walker_data *cdata = walker->data; /* If another request has already started fetching alternates, wait for them to arrive and return to processing this request's curl message */ #ifdef USE_CURL_MULTI - while (got_alternates == 0) { + while (cdata->got_alternates == 0) { step_active_slots(); } #endif /* Nothing to do if they've already been fetched */ - if (got_alternates == 1) + if (cdata->got_alternates == 1) return; /* Start the fetch */ - got_alternates = 0; + cdata->got_alternates = 0; data = xmalloc(4096); buffer.size = 4096; buffer.posn = 0; buffer.buffer = data; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting alternates list for %s\n", base); url = xmalloc(strlen(base) + 31); @@ -623,6 +634,7 @@ static void fetch_alternates(const char *base) may fail and need to have alternates loaded before continuing */ slot = get_active_slot(); slot->callback_func = process_alternates_response; + alt_req.walker = walker; slot->callback_data = &alt_req; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); @@ -638,13 +650,13 @@ static void fetch_alternates(const char *base) if (start_active_slot(slot)) run_active_slot(slot); else - got_alternates = -1; + cdata->got_alternates = -1; free(data); free(url); } -static int fetch_indices(struct alt_base *repo) +static int fetch_indices(struct walker *walker, struct alt_base *repo) { unsigned char sha1[20]; char *url; @@ -663,7 +675,7 @@ static int fetch_indices(struct alt_base *repo) buffer.posn = 0; buffer.buffer = data; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting pack list for %s\n", repo->base); url = xmalloc(strlen(repo->base) + 21); @@ -703,7 +715,7 @@ static int fetch_indices(struct alt_base *repo) !prefixcmp(data + i, " pack-") && !prefixcmp(data + i + 46, ".pack\n")) { get_sha1_hex(data + i + 6, sha1); - setup_index(repo, sha1); + setup_index(walker, repo, sha1); i += 51; break; } @@ -719,7 +731,7 @@ static int fetch_indices(struct alt_base *repo) return 0; } -static int fetch_pack(struct alt_base *repo, unsigned char *sha1) +static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *url; struct packed_git *target; @@ -731,17 +743,18 @@ static int fetch_pack(struct alt_base *repo, unsigned char *sha1) long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; + struct walker_data *data = walker->data; struct active_request_slot *slot; struct slot_results results; - if (fetch_indices(repo)) + if (fetch_indices(walker, repo)) return -1; target = find_sha1_pack(sha1, repo->packs); if (!target) return -1; - if (get_verbosely) { + if (walker->get_verbosely) { fprintf(stderr, "Getting pack %s\n", sha1_to_hex(target->sha1)); fprintf(stderr, " which contains %s\n", @@ -764,14 +777,14 @@ static int fetch_pack(struct alt_base *repo, unsigned char *sha1) curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = packfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(packfile); if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of pack %s at byte %ld\n", sha1_to_hex(target->sha1), prev_posn); @@ -825,7 +838,7 @@ static void abort_object_request(struct object_request *obj_req) release_object_request(obj_req); } -static int fetch_object(struct alt_base *repo, unsigned char *sha1) +static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); int ret = 0; @@ -846,7 +859,7 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) step_active_slots(); } #else - start_object_request(obj_req); + start_object_request(walker, obj_req); #endif while (obj_req->state == ACTIVE) { @@ -867,7 +880,7 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) obj_req->errorstr, obj_req->curl_result, obj_req->http_code, hex); } else if (obj_req->zret != Z_STREAM_END) { - corrupt_object_found++; + walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, obj_req->url); } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) { ret = error("File %s has bad hash", hex); @@ -880,20 +893,21 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) return ret; } -int fetch(unsigned char *sha1) +static int fetch(struct walker *walker, unsigned char *sha1) { - struct alt_base *altbase = alt; + struct walker_data *data = walker->data; + struct alt_base *altbase = data->alt; - if (!fetch_object(altbase, sha1)) + if (!fetch_object(walker, altbase, sha1)) return 0; while (altbase) { - if (!fetch_pack(altbase, sha1)) + if (!fetch_pack(walker, altbase, sha1)) return 0; - fetch_alternates(alt->base); + fetch_alternates(walker, data->alt->base); altbase = altbase->next; } return error("Unable to find %s under %s", sha1_to_hex(sha1), - alt->base); + data->alt->base); } static inline int needs_quote(int ch) @@ -942,12 +956,13 @@ static char *quote_ref_url(const char *base, const char *ref) return qref; } -int fetch_ref(char *ref, unsigned char *sha1) +static int fetch_ref(struct walker *walker, char *ref, unsigned char *sha1) { char *url; char hex[42]; struct buffer buffer; - const char *base = alt->base; + struct walker_data *data = walker->data; + const char *base = data->alt->base; struct active_request_slot *slot; struct slot_results results; buffer.size = 41; @@ -976,84 +991,45 @@ int fetch_ref(char *ref, unsigned char *sha1) return 0; } -int main(int argc, const char **argv) +static void cleanup(struct walker *walker) +{ + struct walker_data *data = walker->data; + http_cleanup(); + + curl_slist_free_all(data->no_pragma_header); +} + +struct walker *get_http_walker(const char *url) { - int commits; - const char **write_ref = NULL; - char **commit_id; - const char *url; char *s; - int arg = 1; - int rc = 0; - - setup_git_directory(); - git_config(git_default_config); - - while (arg < argc && argv[arg][0] == '-') { - if (argv[arg][1] == 't') { - get_tree = 1; - } else if (argv[arg][1] == 'c') { - get_history = 1; - } else if (argv[arg][1] == 'a') { - get_all = 1; - get_tree = 1; - get_history = 1; - } else if (argv[arg][1] == 'v') { - get_verbosely = 1; - } else if (argv[arg][1] == 'w') { - write_ref = &argv[arg + 1]; - arg++; - } else if (!strcmp(argv[arg], "--recover")) { - get_recover = 1; - } else if (!strcmp(argv[arg], "--stdin")) { - commits_on_stdin = 1; - } - arg++; - } - if (argc < arg + 2 - commits_on_stdin) { - usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url"); - return 1; - } - if (commits_on_stdin) { - commits = pull_targets_stdin(&commit_id, &write_ref); - } else { - commit_id = (char **) &argv[arg++]; - commits = 1; - } - url = argv[arg]; + struct walker_data *data = xmalloc(sizeof(struct walker_data)); + struct walker *walker = xmalloc(sizeof(struct walker)); http_init(); - no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:"); + data->no_pragma_header = curl_slist_append(NULL, "Pragma:"); - alt = xmalloc(sizeof(*alt)); - alt->base = xmalloc(strlen(url) + 1); - strcpy(alt->base, url); - for (s = alt->base + strlen(alt->base) - 1; *s == '/'; --s) + data->alt = xmalloc(sizeof(*data->alt)); + data->alt->base = xmalloc(strlen(url) + 1); + strcpy(data->alt->base, url); + for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s) *s = 0; - alt->got_indices = 0; - alt->packs = NULL; - alt->next = NULL; -#ifdef USE_CURL_MULTI - add_fill_function(NULL, fill_active_slot); -#endif + data->alt->got_indices = 0; + data->alt->packs = NULL; + data->alt->next = NULL; + data->got_alternates = -1; - if (pull(commits, commit_id, write_ref, url)) - rc = 1; + walker->corrupt_object_found = 0; + walker->fetch = fetch; + walker->fetch_ref = fetch_ref; + walker->prefetch = prefetch; + walker->cleanup = cleanup; + walker->data = data; - http_cleanup(); - - curl_slist_free_all(no_pragma_header); - - if (commits_on_stdin) - pull_targets_free(commits, commit_id, write_ref); +#ifdef USE_CURL_MULTI + add_fill_function(walker, (int (*)(void *)) fill_active_slot); +#endif - if (corrupt_object_found) { - fprintf(stderr, -"Some loose object were found to be corrupt, but they might be just\n" -"a false '404 Not Found' error message sent with incorrect HTTP\n" -"status code. Suggest running git-fsck.\n"); - } - return rc; + return walker; } diff --git a/fetch.c b/walker.c similarity index 75% rename from fetch.c rename to walker.c index 811be87a3c..5c65ea494d 100644 --- a/fetch.c +++ b/walker.c @@ -1,5 +1,5 @@ #include "cache.h" -#include "fetch.h" +#include "walker.h" #include "commit.h" #include "tree.h" #include "tree-walk.h" @@ -8,16 +8,11 @@ #include "refs.h" #include "strbuf.h" -int get_tree = 0; -int get_history = 0; -int get_all = 0; -int get_verbosely = 0; -int get_recover = 0; static unsigned char current_commit_sha1[20]; -void pull_say(const char *fmt, const char *hex) +void walker_say(struct walker *walker, const char *fmt, const char *hex) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, fmt, hex); } @@ -32,9 +27,9 @@ static void report_missing(const struct object *obj) sha1_to_hex(current_commit_sha1)); } -static int process(struct object *obj); +static int process(struct walker *walker, struct object *obj); -static int process_tree(struct tree *tree) +static int process_tree(struct walker *walker, struct tree *tree) { struct tree_desc desc; struct name_entry entry; @@ -59,7 +54,7 @@ static int process_tree(struct tree *tree) if (blob) obj = &blob->object; } - if (!obj || process(obj)) + if (!obj || process(walker, obj)) return -1; } free(tree->buffer); @@ -74,7 +69,7 @@ static int process_tree(struct tree *tree) static struct commit_list *complete = NULL; -static int process_commit(struct commit *commit) +static int process_commit(struct walker *walker, struct commit *commit) { if (parse_commit(commit)) return -1; @@ -88,43 +83,43 @@ static int process_commit(struct commit *commit) hashcpy(current_commit_sha1, commit->object.sha1); - pull_say("walk %s\n", sha1_to_hex(commit->object.sha1)); + walker_say(walker, "walk %s\n", sha1_to_hex(commit->object.sha1)); - if (get_tree) { - if (process(&commit->tree->object)) + if (walker->get_tree) { + if (process(walker, &commit->tree->object)) return -1; - if (!get_all) - get_tree = 0; + if (!walker->get_all) + walker->get_tree = 0; } - if (get_history) { + if (walker->get_history) { struct commit_list *parents = commit->parents; for (; parents; parents = parents->next) { - if (process(&parents->item->object)) + if (process(walker, &parents->item->object)) return -1; } } return 0; } -static int process_tag(struct tag *tag) +static int process_tag(struct walker *walker, struct tag *tag) { if (parse_tag(tag)) return -1; - return process(tag->tagged); + return process(walker, tag->tagged); } static struct object_list *process_queue = NULL; static struct object_list **process_queue_end = &process_queue; -static int process_object(struct object *obj) +static int process_object(struct walker *walker, struct object *obj) { if (obj->type == OBJ_COMMIT) { - if (process_commit((struct commit *)obj)) + if (process_commit(walker, (struct commit *)obj)) return -1; return 0; } if (obj->type == OBJ_TREE) { - if (process_tree((struct tree *)obj)) + if (process_tree(walker, (struct tree *)obj)) return -1; return 0; } @@ -132,7 +127,7 @@ static int process_object(struct object *obj) return 0; } if (obj->type == OBJ_TAG) { - if (process_tag((struct tag *)obj)) + if (process_tag(walker, (struct tag *)obj)) return -1; return 0; } @@ -141,7 +136,7 @@ static int process_object(struct object *obj) typename(obj->type), sha1_to_hex(obj->sha1)); } -static int process(struct object *obj) +static int process(struct walker *walker, struct object *obj) { if (obj->flags & SEEN) return 0; @@ -154,7 +149,7 @@ static int process(struct object *obj) else { if (obj->flags & COMPLETE) return 0; - prefetch(obj->sha1); + walker->prefetch(walker, obj->sha1); } object_list_insert(obj, process_queue_end); @@ -162,7 +157,7 @@ static int process(struct object *obj) return 0; } -static int loop(void) +static int loop(struct walker *walker) { struct object_list *elem; @@ -178,25 +173,25 @@ static int loop(void) * the queue because we needed to fetch it first. */ if (! (obj->flags & TO_SCAN)) { - if (fetch(obj->sha1)) { + if (walker->fetch(walker, obj->sha1)) { report_missing(obj); return -1; } } if (!obj->type) parse_object(obj->sha1); - if (process_object(obj)) + if (process_object(walker, obj)) return -1; } return 0; } -static int interpret_target(char *target, unsigned char *sha1) +static int interpret_target(struct walker *walker, char *target, unsigned char *sha1) { if (!get_sha1_hex(target, sha1)) return 0; if (!check_ref_format(target)) { - if (!fetch_ref(target, sha1)) { + if (!walker->fetch_ref(walker, target, sha1)) { return 0; } } @@ -213,7 +208,7 @@ static int mark_complete(const char *path, const unsigned char *sha1, int flag, return 0; } -int pull_targets_stdin(char ***target, const char ***write_ref) +int walker_targets_stdin(char ***target, const char ***write_ref) { int targets = 0, targets_alloc = 0; struct strbuf buf; @@ -243,7 +238,7 @@ int pull_targets_stdin(char ***target, const char ***write_ref) return targets; } -void pull_targets_free(int targets, char **target, const char **write_ref) +void walker_targets_free(int targets, char **target, const char **write_ref) { while (targets--) { free(target[targets]); @@ -252,8 +247,8 @@ void pull_targets_free(int targets, char **target, const char **write_ref) } } -int pull(int targets, char **target, const char **write_ref, - const char *write_ref_log_details) +int walker_fetch(struct walker *walker, int targets, char **target, + const char **write_ref, const char *write_ref_log_details) { struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *)); unsigned char *sha1 = xmalloc(targets * 20); @@ -275,19 +270,19 @@ int pull(int targets, char **target, const char **write_ref, } } - if (!get_recover) + if (!walker->get_recover) for_each_ref(mark_complete, NULL); for (i = 0; i < targets; i++) { - if (interpret_target(target[i], &sha1[20 * i])) { + if (interpret_target(walker, target[i], &sha1[20 * i])) { error("Could not interpret %s as something to pull", target[i]); goto unlock_and_fail; } - if (process(lookup_unknown_object(&sha1[20 * i]))) + if (process(walker, lookup_unknown_object(&sha1[20 * i]))) goto unlock_and_fail; } - if (loop()) + if (loop(walker)) goto unlock_and_fail; if (write_ref_log_details) { @@ -308,10 +303,16 @@ int pull(int targets, char **target, const char **write_ref, return 0; - unlock_and_fail: for (i = 0; i < targets; i++) if (lock[i]) unlock_ref(lock[i]); + return -1; } + +void walker_free(struct walker *walker) +{ + walker->cleanup(walker); + free(walker); +} diff --git a/walker.h b/walker.h new file mode 100644 index 0000000000..ea2c363f4e --- /dev/null +++ b/walker.h @@ -0,0 +1,37 @@ +#ifndef WALKER_H +#define WALKER_H + +struct walker { + void *data; + int (*fetch_ref)(struct walker *, char *ref, unsigned char *sha1); + void (*prefetch)(struct walker *, unsigned char *sha1); + int (*fetch)(struct walker *, unsigned char *sha1); + void (*cleanup)(struct walker *); + int get_tree; + int get_history; + int get_all; + int get_verbosely; + int get_recover; + + int corrupt_object_found; +}; + +/* Report what we got under get_verbosely */ +void walker_say(struct walker *walker, const char *, const char *); + +/* Load pull targets from stdin */ +int walker_targets_stdin(char ***target, const char ***write_ref); + +/* Free up loaded targets */ +void walker_targets_free(int targets, char **target, const char **write_ref); + +/* If write_ref is set, the ref filename to write the target value to. */ +/* If write_ref_log_details is set, additional text will appear in the ref log. */ +int walker_fetch(struct walker *impl, int targets, char **target, + const char **write_ref, const char *write_ref_log_details); + +void walker_free(struct walker *walker); + +struct walker *get_http_walker(const char *url); + +#endif /* WALKER_H */ -- 2.39.2