struct pakfire_transfer {
TAILQ_ENTRY(pakfire_transfer) nodes;
-
CURL* handle;
- // Where do we write the result to?
+ char url[PATH_MAX];
char path[PATH_MAX];
// Temporary file
char tempfile[PATH_MAX];
FILE* f;
+
+ // Mirrors
+ unsigned int current_mirror;
};
struct pakfire_downloader {
unsigned int num_mirrors;
};
+static int pakfire_url_is_absolute(const char* url) {
+ if (pakfire_string_startswith(url, "https://"))
+ return 1;
+
+ if (pakfire_string_startswith(url, "http://"))
+ return 1;
+
+ if (pakfire_string_startswith(url, "file://"))
+ return 1;
+
+ return 0;
+}
+
+static char* pakfire_url_join(const char* part1, const char* part2) {
+ char* url = NULL;
+
+ int r = asprintf(&url, "%s%s", part1, part2);
+ if (r < 0)
+ return NULL;
+
+ return url;
+}
+
static int pakfire_downloader_setup_curl(struct pakfire_downloader* downloader) {
// Globally initialise cURL
if (!curl_initialized++) {
#endif
static struct pakfire_transfer* pakfire_downloader_create_transfer(
- struct pakfire_downloader* downloader, const char* path) {
+ struct pakfire_downloader* downloader, const char* url, const char* path) {
struct pakfire_transfer* transfer = calloc(1, sizeof(*transfer));
if (!transfer)
return NULL;
+ // Copy URL
+ snprintf(transfer->url, sizeof(transfer->url) - 1, "%s", url);
+
// Copy path
snprintf(transfer->path, sizeof(transfer->path) - 1, "%s", path);
const char* url, const char* path) {
DEBUG(downloader->pakfire, "Adding download of %s\n", url);
+ // Do not allow relative URLs when no mirrors are set
+ if (!pakfire_url_is_absolute(url) && downloader->num_mirrors == 0) {
+ ERROR(downloader->pakfire, "Relative URLs cannot be used without any mirrors set\n");
+ return EINVAL;
+ }
+
struct pakfire_transfer* transfer =
- pakfire_downloader_create_transfer(downloader, path);
+ pakfire_downloader_create_transfer(downloader, url, path);
if (!transfer)
return 1;
- // Set URL
- curl_easy_setopt(transfer->handle, CURLOPT_URL, url);
-
// Push this transfer onto the queue
TAILQ_INSERT_HEAD(&downloader->transfers, transfer, nodes);
return 0;
}
+static int pakfire_downloader_activate_transfer(struct pakfire_downloader* downloader,
+ struct pakfire_transfer* transfer) {
+ // Simply set absolute URLs
+ if (pakfire_url_is_absolute(transfer->url)) {
+ curl_easy_setopt(transfer->handle, CURLOPT_URL, transfer->url);
+
+ // Join path if we are using mirrors
+ } else {
+ // XXX for now
+ struct pakfire_mirror* mirror = downloader->mirrors[transfer->current_mirror];
+
+ char* url = pakfire_url_join(mirror->url, transfer->url);
+ if (!url) {
+ ERROR(downloader->pakfire, "Error composing download URL: %s\n",
+ strerror(errno));
+ return 1;
+ }
+
+ curl_easy_setopt(transfer->handle, CURLOPT_URL, url);
+ free(url);
+ }
+
+ // Add the handle to cURL
+ int r = curl_multi_add_handle(downloader->curl, transfer->handle);
+ if (r) {
+ ERROR(downloader->pakfire, "Adding handle failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
int pakfire_downloader_run(struct pakfire_downloader* downloader) {
struct pakfire_transfer* transfer;
unsigned int transfers = 0;
transfer = TAILQ_LAST(&downloader->transfers, transfers);
TAILQ_REMOVE(&downloader->transfers, transfer, nodes);
- r = curl_multi_add_handle(downloader->curl, transfer->handle);
+ r = pakfire_downloader_activate_transfer(downloader, transfer);
if (r) {
- ERROR(downloader->pakfire, "Adding handler failed\n");
+ ERROR(downloader->pakfire, "Could not activate transfer\n");
pakfire_transfer_free(transfer);
+ continue;
}
transfers++;
char* baseurl;
char* keyfile;
+ // Database filename
+ char database[NAME_MAX];
+
// Mirrorlist
char* mirrorlist_url;
char mirrorlist[PATH_MAX];
return repo_add_solvable(repo->repo);
}
+static int pakfire_repo_read_metadata(PakfireRepo repo, const char* path) {
+ struct json_object* json = pakfire_json_parse_from_file(repo->pakfire, path);
+ if (!json) {
+ ERROR(repo->pakfire, "Could not parse metadata\n");
+ return 1;
+ }
+
+ struct json_object* database = NULL;
+
+ // Search for the database name
+ int found = json_object_object_get_ex(json, "database", &database);
+ if (found) {
+ snprintf(repo->appdata->database, sizeof(repo->appdata->database) - 1,
+ "%s", json_object_get_string(database));
+
+ DEBUG(repo->pakfire, "Configured package database as %s\n", repo->appdata->database);
+ }
+
+ // Free the parsed JSON object
+ json_object_put(json);
+
+ return 0;
+}
+
static void free_repo_appdata(struct pakfire_repo_appdata* appdata) {
// repodata is being destroyed with the repository
}
static int pakfire_repo_refresh_metadata(PakfireRepo repo, const int force) {
+ // Get the downloader
+ struct pakfire_downloader* downloader = pakfire_repo_downloader(repo);
+ if (!downloader)
+ return 1;
+
+ // Make download path
+ char path[PATH_MAX];
+ snprintf(path, sizeof(path) - 1, "%s/repodata/%s/.repomd.json",
+ pakfire_get_cache_path(repo->pakfire), pakfire_repo_get_name(repo));
+
+ // Try to download the metadata
+ int r = pakfire_downloader_retrieve(downloader, "repodata/repomd.json", path);
+ if (r)
+ goto ERROR;
+
+ // Parse metadata
+ r = pakfire_repo_read_metadata(repo, path);
+ if (r)
+ goto ERROR;
+
+ // Store repodata permanently
+
+ // Success
+ r = 0;
+
+ERROR:
+ pakfire_downloader_unref(downloader);
+ unlink(path);
+
return 0;
}
// Refresh mirrorlist
r = pakfire_repo_refresh_mirrorlist(repo, force);
- if (r)
- return r;
+ if (r) {
+ ERROR(repo->pakfire, "Could not refresh mirrorlist, but will continue anyways...\n");
+ }
// Refresh metadata
r = pakfire_repo_refresh_metadata(repo, force);