#include "json_util.h"
#include "log.h"
#include "rrdp.h"
-#include "thread_var.h"
#include "data_structure/path_builder.h"
#include "data_structure/uthash.h"
#include "http/http.h"
#include "rsync/rsync.h"
-/*
- * Please note: Some of the functions in this module (the ones that have to do
- * with jansson, both inside and outside of it) are recursive.
- *
- * This is fine. Infinite recursion is prevented through path_builder's
- * MAX_CAPACITY (which is currently defined as 4096), which has to be done
- * anyway.
- *
- * And given that you need at least one character and one slash per directory
- * level, the maximum allowed recursion level is 2048, which happens to align
- * with jansson's JSON_PARSER_MAX_DEPTH. (Which is also something we can't
- * change.)
- *
- * FIXME test max recursion
- */
-
#define TAGNAME_BN "basename"
#define TAGNAME_DIRECT "direct-download"
#define TAGNAME_ERROR "latest-result"
#define TAGNAME_FILE "is-file"
#define TAGNAME_CHILDREN "children"
-/*
- * Have we ever attempted to download this directly?
- * Otherwise we actually downloaded a descendant.
- *
- * Directly downloaded nodes need to be retained, along with their ancestors.
- * If the download was successful, they should never have children (as this
- * would be redundant), though their directory counterparts probably will.
- */
-#define CNF_DIRECT (1 << 0)
-/* Has it downloaded successfully at some point? */
-#define CNF_SUCCESS (1 << 1)
-/* Has it been traversed during the current cleanup? */
-#define CNF_FOUND (1 << 2)
-/*
- * If enabled, node represents a file. Otherwise, node is a directory.
- * Only valid on HTTPs trees; we never know what rsync downloads.
- */
-#define CNF_FILE (1 << 3)
-
struct cache_node {
- char *basename; /* Simple file name, parents not included */
+ struct rpki_uri *url;
- /* CNF_* */
- int flags;
- /*
- * Last successful download timestamp.
- * (Only if CNF_DIRECT & CNF_SUCCESS.)
- * FIXME Intended to later decide whether a file should be deleted,
- * when the cache is running out of space.
- */
- time_t ts_success;
- /*
- * Last download attempt timestamp. (Only if CNF_DIRECT.)
- * Decides whether the file needs to be updated.
- */
- time_t ts_attempt;
- /* Last download attempt's result status. (Only if CNF_DIRECT) */
- int error;
+ struct {
+ time_t ts; /* Last download attempt's timestamp */
+ int result; /* Last download attempt's result status code */
+ } attempt;
- struct cache_node *parent; /* Simple pointer */
- struct cache_node *children; /* Hash table */
+ struct {
+ /* Has a download attempt ever been successful? */
+ bool happened;
+ /* Last successful download timestamp. (Only if @happened.) */
+ time_t ts;
+ } success;
UT_hash_handle hh; /* Hash table hook */
};
struct rpki_cache {
char *tal;
- struct cache_node *rsync;
- struct cache_node *https;
- time_t startup_time; /* When we started the last validation */
+ struct cache_node *ht;
+ time_t startup_ts; /* When we started the last validation */
};
-static struct cache_node *
-add_child(struct cache_node *parent, char const *basename)
-{
- struct cache_node *child;
- char *key;
- size_t keylen;
-
- child = pzalloc(sizeof(struct cache_node));
- child->basename = pstrdup(basename);
- child->parent = parent;
-
- key = child->basename;
- keylen = strlen(key);
-
- HASH_ADD_KEYPTR(hh, parent->children, key, keylen, child);
-
- return child;
-}
-
-static struct cache_node *
-init_root(struct cache_node *root, char const *name)
-{
- if (root != NULL)
- return root;
-
- root = pzalloc(sizeof(struct cache_node));
- root->basename = pstrdup(name);
-
- return root;
-}
-
-static void
-__delete_node(struct cache_node *node)
-{
- if (node->parent != NULL)
- HASH_DEL(node->parent->children, node);
- free(node->basename);
- free(node);
-}
-
-static void
-delete_node(struct cache_node *node)
-{
- struct cache_node *parent;
-
- if (node == NULL)
- return;
-
- if (node->parent != NULL) {
- HASH_DEL(node->parent->children, node);
- node->parent = NULL;
- }
-
- do {
- while (node->children != NULL)
- node = node->children;
- parent = node->parent;
- __delete_node(node);
- node = parent;
- } while (node != NULL);
-}
+#define TAGNAME_URL "url"
+#define TAGNAME_ATTEMPT_TS "attempt-timestamp"
+#define TAGNAME_ATTEMPT_ERR "attempt-result"
+#define TAGNAME_SUCCESS_TS "success-timestamp"
-static int
-get_metadata_json_filename(char const *tal, char **filename)
+static char *
+get_json_filename(struct rpki_cache *cache)
{
struct path_builder pb;
- int error;
-
- error = pb_init_cache(&pb, tal, "metadata.json");
- if (error)
- return error;
-
- *filename = pb.string;
- return 0;
+ return pb_init_cache(&pb, cache->tal, "metadata.json")
+ ? NULL : pb.string;
}
static struct cache_node *
-json2node(json_t *json, struct cache_node *parent)
+json2node(struct rpki_cache *cache, json_t *json)
{
- struct cache_node *node, *child;
- char const *string;
- bool boolean;
- json_t *jchild;
- size_t c;
+ struct cache_node *node;
+ char const *url;
+ enum uri_type type;
int error;
- if (json == NULL)
- return NULL;
-
node = pzalloc(sizeof(struct cache_node));
- error = json_get_str(json, TAGNAME_BN, &string);
+ error = json_get_str(json, TAGNAME_URL, &url);
if (error) {
if (error > 0)
- pr_op_err("Node is missing the '" TAGNAME_BN "' tag.");
- goto cancel;
+ pr_op_err("Node is missing the '" TAGNAME_URL "' tag.");
+ goto fail;
}
- node->basename = pstrdup(string);
-
- if (json_get_bool(json, TAGNAME_DIRECT, &boolean) < 0)
- goto cancel;
- if (boolean) {
- node->flags |= CNF_DIRECT;
- if (json_get_int(json, TAGNAME_ERROR, &node->error) < 0)
- goto cancel;
- if (json_get_ts(json, TAGNAME_TSATTEMPT, &node->ts_attempt) < 0)
- goto cancel;
+ if (str_starts_with(url, "https://"))
+ type = UT_HTTPS;
+ else if (str_starts_with(url, "rsync://"))
+ type = UT_RSYNC;
+ else {
+ pr_op_err("Unknown protocol: %s", url);
+ goto fail;
+ }
+ error = uri_create(&node->url, cache->tal, type, NULL, url);
- if (json_get_bool(json, TAGNAME_SUCCESS, &boolean) < 0)
- goto cancel;
- if (boolean) {
- node->flags |= CNF_SUCCESS;
- if (json_get_ts(json, TAGNAME_TSSUCCESS, &node->ts_success) < 0)
- goto cancel;
- }
+ error = json_get_ts(json, TAGNAME_ATTEMPT_TS, &node->attempt.ts);
+ if (error) {
+ if (error > 0)
+ pr_op_err("Node '%s' is missing the '"
+ TAGNAME_ATTEMPT_TS "' tag.", url);
+ goto fail;
}
- if (json_get_bool(json, TAGNAME_FILE, &boolean) < 0)
- goto cancel;
- if (boolean)
- node->flags |= CNF_FILE;
+ if (json_get_int(json, TAGNAME_ATTEMPT_ERR, &node->attempt.result) < 0)
+ goto fail;
- if (json_get_array(json, "children", &jchild) < 0)
- goto cancel;
- for (c = 0; c < json_array_size(jchild); c++) {
- child = json2node(json_array_get(jchild, c), node);
- if (child != NULL)
- HASH_ADD_KEYPTR(hh, node->children, child->basename,
- strlen(child->basename), child);
- }
+ error = json_get_ts(json, TAGNAME_SUCCESS_TS, &node->success.ts);
+ if (error < 0)
+ goto fail;
+ node->success.happened = (error == 0);
- node->parent = parent;
- pr_op_debug("Node '%s' successfully loaded from metadata.json.",
- node->basename);
+ pr_op_debug("Node '%s' loaded successfully.", url);
return node;
-cancel:
- delete_node(node);
+fail:
+ uri_refput(node->url);
+ free(node);
return NULL;
}
+static struct cache_node*
+find_node(struct rpki_cache *cache, struct rpki_uri *uri)
+{
+ char const *key = uri_get_local(uri);
+ struct cache_node *result;
+ HASH_FIND_STR(cache->ht, key, result);
+ return result;
+}
+
+static void
+add_node(struct rpki_cache *cache, struct cache_node *node)
+{
+ char const *key = uri_get_local(node->url);
+ size_t keylen = strlen(key);
+ HASH_ADD_KEYPTR(hh, cache->ht, key, keylen, node);
+}
+
static void
load_metadata_json(struct rpki_cache *cache)
{
char *filename;
json_t *root;
json_error_t jerror;
+ size_t n;
struct cache_node *node;
- size_t d;
/*
* Note: Loading metadata.json is one of few things Fort can fail at
* without killing itself. It's just a cache of a cache.
*/
- if (get_metadata_json_filename(cache->tal, &filename) != 0)
+ filename = get_json_filename(cache);
+ if (filename == NULL)
return;
root = json_load_file(filename, 0, &jerror);
if (root == NULL) {
- if (json_error_code(&jerror) == json_error_cannot_open_file) {
+ if (json_error_code(&jerror) == json_error_cannot_open_file)
pr_op_debug("%s does not exist.", filename);
- } else {
+ else
pr_op_err("Json parsing failure at %s (%d:%d): %s",
filename, jerror.line, jerror.column, jerror.text);
- }
goto end;
}
if (json_typeof(root) != JSON_ARRAY) {
goto end;
}
- for (d = 0; d < json_array_size(root); d++) {
- node = json2node(json_array_get(root, d), NULL);
- if (node == NULL)
- continue;
- else if (strcasecmp(node->basename, "rsync") == 0)
- cache->rsync = node;
- else if (strcasecmp(node->basename, "https") == 0)
- cache->https = node;
- else {
- pr_op_warn("%s: Ignoring unrecognized json node '%s'.",
- filename, node->basename);
- delete_node(node);
- }
+ for (n = 0; n < json_array_size(root); n++) {
+ node = json2node(cache, json_array_get(root, n));
+ if (node != NULL)
+ add_node(cache, node);
}
-end:
+end: json_decref(root);
free(filename);
- json_decref(root);
+}
+
+struct rpki_cache *
+cache_create(char const *tal)
+{
+ struct rpki_cache *cache;
+ cache = pzalloc(sizeof(struct rpki_cache));
+ cache->tal = pstrdup(tal);
+ cache->startup_ts = time(NULL);
+ if (cache->startup_ts == (time_t) -1)
+ pr_crit("time(NULL) returned (time_t) -1.");
+ load_metadata_json(cache);
+ return cache;
}
static json_t *
node2json(struct cache_node *node)
{
- json_t *json, *children, *jchild;
- struct cache_node *child, *tmp;
- int cnf;
+ json_t *json;
json = json_object();
if (json == NULL) {
return NULL;
}
- if (json_add_str(json, TAGNAME_BN, node->basename))
+ if (json_add_str(json, TAGNAME_URL, uri_get_global(node->url)))
goto cancel;
-
- cnf = node->flags & CNF_DIRECT;
- if (cnf) {
- if (json_add_bool(json, TAGNAME_DIRECT, cnf))
- goto cancel;
- if (json_add_int(json, TAGNAME_ERROR, node->error))
- goto cancel;
- if (json_add_date(json, TAGNAME_TSATTEMPT, node->ts_attempt))
- goto cancel;
- cnf = node->flags & CNF_SUCCESS;
- if (cnf) {
- if (json_add_bool(json, TAGNAME_SUCCESS, cnf))
- goto cancel;
- if (json_add_date(json, TAGNAME_TSSUCCESS, node->ts_success))
- goto cancel;
- }
- }
- cnf = node->flags & CNF_FILE;
- if (cnf && json_add_bool(json, TAGNAME_FILE, cnf))
+ if (json_add_date(json, TAGNAME_ATTEMPT_TS, node->attempt.ts))
goto cancel;
-
- if (node->children != NULL) {
- children = json_array();
- if (children == NULL)
- enomem_panic();
-
- if (json_object_set_new(json, TAGNAME_CHILDREN, children)) {
- pr_op_err("Cannot push children array into json node; unknown cause.");
+ if (json_add_int(json, TAGNAME_ATTEMPT_ERR, node->attempt.result))
+ goto cancel;
+ if (node->success.happened)
+ if (json_add_date(json, TAGNAME_SUCCESS_TS, node->success.ts))
goto cancel;
- }
-
- HASH_ITER(hh, node->children, child, tmp) {
- jchild = node2json(child);
- if (jchild == NULL)
- goto cancel; /* Error msg already printed */
- if (json_array_append_new(children, jchild)) {
- pr_op_err("Cannot push child into json node; unknown cause.");
- goto cancel;
- }
- }
- }
return json;
return NULL;
}
-static int
-append_node(json_t *root, struct cache_node *node, char const *name)
-{
- json_t *child;
-
- if (node == NULL)
- return 0;
- child = node2json(node);
- if (child == NULL)
- return -1;
- if (json_array_append_new(root, child)) {
- pr_op_err("Cannot push %s json node into json root; unknown cause.",
- name);
- return -1;
- }
-
- return 0;
-}
-
static json_t *
build_metadata_json(struct rpki_cache *cache)
{
- json_t *root;
+ struct cache_node *node, *tmp;
+ json_t *root, *child;
root = json_array();
- if (root == NULL) {
- pr_op_err("json root allocation failure.");
- return NULL;
- }
+ if (root == NULL)
+ enomem_panic();
- if (append_node(root, cache->rsync, "rsync")
- || append_node(root, cache->https, "https")) {
- json_decref(root);
- return NULL;
+ HASH_ITER(hh, cache->ht, node, tmp) {
+ child = node2json(node);
+ if (child == NULL)
+ continue;
+ if (json_array_append_new(root, child)) {
+ pr_op_err("Cannot push %s json node into json root; unknown cause.",
+ uri_op_get_printable(node->url));
+ continue;
+ }
}
return root;
static void
write_metadata_json(struct rpki_cache *cache)
{
- struct json_t *json;
char *filename;
+ struct json_t *json;
json = build_metadata_json(cache);
if (json == NULL)
return;
- if (get_metadata_json_filename(cache->tal, &filename) != 0)
+ filename = get_json_filename(cache);
+ if (filename == NULL)
goto end;
- if (json_dump_file(json, filename, JSON_COMPACT))
- pr_op_err("Unable to write metadata.json; unknown cause.");
+ if (json_dump_file(json, filename, JSON_INDENT(2)))
+ pr_op_err("Unable to write %s; unknown cause.", filename);
- free(filename);
end: json_decref(json);
+ free(filename);
}
-struct rpki_cache *
-cache_create(char const *tal)
+static void
+delete_node(struct rpki_cache *cache, struct cache_node *node)
{
- struct rpki_cache *cache;
-
- cache = pmalloc(sizeof(struct rpki_cache));
- cache->tal = pstrdup(tal);
- cache->rsync = NULL;
- cache->https = NULL;
- cache->startup_time = time(NULL);
- if (cache->startup_time == ((time_t) -1))
- pr_crit("time(NULL) returned -1");
-
- load_metadata_json(cache);
-
- return cache;
+ HASH_DEL(cache->ht, node);
+ uri_refput(node->url);
+ free(node);
}
void
cache_destroy(struct rpki_cache *cache)
{
+ struct cache_node *node, *tmp;
+
write_metadata_json(cache);
+
+ HASH_ITER(hh, cache->ht, node, tmp)
+ delete_node(cache, node);
free(cache->tal);
- delete_node(cache->rsync);
- delete_node(cache->https);
free(cache);
}
static int
-delete_node_file(struct rpki_cache *cache, struct cache_node *node,
- bool is_file)
+get_url(struct rpki_uri *uri, const char *tal, struct rpki_uri **url)
{
- struct path_builder pb;
- struct cache_node *cursor;
+ char const *guri, *c;
+ char *guri2;
+ unsigned int slashes;
int error;
- pb_init(&pb);
- for (cursor = node; cursor != NULL; cursor = cursor->parent) {
- error = pb_append(&pb, cursor->basename);
- if (error)
- goto cancel;
+ if (uri_get_type(uri) != UT_RSYNC) {
+ uri_refget(uri);
+ *url = uri;
+ return 0;
}
- error = pb_append(&pb, cache->tal);
- if (error)
- goto cancel;
- error = pb_append(&pb, config_get_local_repository());
- if (error)
- goto cancel;
- pb_reverse(&pb);
- if (is_file) {
- if (remove(pb.string) != 0) {
- error = errno;
- pr_val_err("Cannot override file '%s': %s",
- pb.string, strerror(error));
+ /*
+ * Careful with this code. rsync(1):
+ *
+ * > A trailing slash on the source changes this behavior to avoid
+ * > creating an additional directory level at the destination. You can
+ * > think of a trailing / on a source as meaning "copy the contents of
+ * > this directory" as opposed to "copy the directory by name", but in
+ * > both cases the attributes of the containing directory are
+ * > transferred to the containing directory on the destination. In
+ * > other words, each of the following commands copies the files in the
+ * > same way, including their setting of the attributes of /dest/foo:
+ * >
+ * > rsync -av /src/foo /dest
+ * > rsync -av /src/foo/ /dest/foo
+ *
+ * This quirk does not behave consistently. In practice, if you rsync
+ * at the module level, rsync servers behave as if the trailing slash
+ * always existed.
+ *
+ * ie. the two following rsyncs behave identically:
+ *
+ * rsync -rtz rsync://repository.lacnic.net/rpki potatoes
+ * (Copies the content of rpki to potatoes.)
+ * rsync -rtz rsync://repository.lacnic.net/rpki/ potatoes
+ * (Copies the content of rpki to potatoes.)
+ *
+ * Even though the following do not:
+ *
+ * rsync -rtz rsync://repository.lacnic.net/rpki/lacnic potatoes
+ * (Copies lacnic to potatoes.)
+ * rsync -rtz rsync://repository.lacnic.net/rpki/lacnic/ potatoes
+ * (Copies the content of lacnic to potatoes.)
+ *
+ * This is important to us, because an inconsistent missing directory
+ * component will screw our URLs-to-cache mappings.
+ *
+ * My solution is to add the slash myself. That's all I can do to force
+ * it to behave consistently, it seems.
+ *
+ * But note: This only works if we're synchronizing a directory.
+ * But this is fine, because this hack stacks with the minimum common
+ * path performance hack.
+ */
+
+ guri = uri_get_global(uri);
+ slashes = 0;
+ for (c = guri; *c != '\0'; c++) {
+ if (*c == '/') {
+ slashes++;
+ if (slashes == 4)
+ return __uri_create(url, tal, UT_RSYNC, NULL,
+ guri, c - guri + 1);
}
- } else {
- error = file_rm_rf(pb.string);
- pr_val_err("Cannot override directory '%s': %s",
- pb.string, strerror(error));
}
- pb_cleanup(&pb);
- return error;
+ if (slashes == 3 && *(c - 1) != '/') {
+ guri2 = pstrdup(guri); /* Remove const */
+ guri2[c - guri] = '/';
+ error = __uri_create(url, tal, UT_RSYNC, NULL, guri2,
+ c - guri + 1);
+ free(guri2);
+ return error;
+ }
-cancel:
- pb_cleanup(&pb);
- return error;
+ /*
+ * Minimum common path performance hack: rsync the rsync module root,
+ * not every RPP separately. The former is much faster.
+ */
+ return pr_val_err("Can't rsync URL '%s': The URL seems to be missing a domain or rsync module.",
+ guri);
}
static bool
was_recently_downloaded(struct rpki_cache *cache, struct cache_node *node)
{
- return (node->flags & CNF_DIRECT) &&
- (cache->startup_time <= node->ts_attempt);
+ return difftime(cache->startup_ts, node->attempt.ts) <= 0;
}
-static void
-drop_children(struct cache_node *node)
-{
- struct cache_node *child, *tmp;
-
- HASH_ITER(hh, node->children, child, tmp)
- delete_node(child);
-}
-
-static char *
-uri2luri(struct rpki_uri *uri)
-{
- char const *luri;
-
- luri = uri_get_local(uri) + strlen(config_get_local_repository());
- while (luri[0] == '/')
- luri++;
-
- return pstrdup(luri);
-}
-
-/* Returns 0 if the file exists, nonzero otherwise. */
static int
-cache_check(struct rpki_uri *uri)
+cache_check(struct rpki_uri *url)
{
- struct stat meta;
int error;
- if (stat(uri_get_local(uri), &meta) != 0) {
- error = errno;
+ error = file_exists(uri_get_local(url));
+ switch (error) {
+ case 0:
+ pr_val_debug("Offline mode, file is cached.");
+ break;
+ case ENOENT:
pr_val_debug("Offline mode, file is not cached.");
- return error;
+ break;
+ default:
+ pr_val_debug("Offline mode, unknown result %d (%s)",
+ error, strerror(error));
}
- pr_val_debug("Offline mode, file is cached.");
- return 0;
+ return error;
}
/**
int
cache_download(struct rpki_cache *cache, struct rpki_uri *uri, bool *changed)
{
- char *luri;
- char *token;
- char *saveptr;
- struct cache_node *node, *child;
- bool recursive;
+ struct rpki_uri *url;
+ struct cache_node *node;
int error;
if (changed != NULL)
*changed = false;
- luri = uri2luri(uri);
-
- token = strtok_r(luri, "/", &saveptr);
- if (strcmp(token, cache->tal) != 0)
- pr_crit("Expected TAL %s for path %s.", cache->tal, uri_get_local(uri));
-
- token = strtok_r(NULL, "/", &saveptr);
- switch (uri_get_type(uri)) {
- case UT_RSYNC:
- if (strcmp(token, "rsync") != 0)
- return pr_val_err("Path is not rsync: %s", uri_get_local(uri));
- if (!config_get_rsync_enabled()) {
- error = cache_check(uri);
- goto end;
- }
- node = cache->rsync = init_root(cache->rsync, "rsync");
- recursive = true;
- break;
- case UT_HTTPS:
- if (strcmp(token, "https") != 0)
- return pr_val_err("Path is not HTTPS: %s", uri_get_local(uri));
- if (!config_get_http_enabled()) {
- error = cache_check(uri);
- goto end;
- }
- node = cache->https = init_root(cache->https, "https");
- recursive = false;
- break;
- default:
- pr_crit("Unexpected URI type: %d", uri_get_type(uri));
- }
-
- while ((token = strtok_r(NULL, "/", &saveptr)) != NULL) {
- if (node->flags & CNF_FILE) {
- /* node used to be a file, now it's a dir. */
- delete_node_file(cache, node, true);
- node->flags = 0;
- }
-
- HASH_FIND_STR(node->children, token, child);
- if (child == NULL) {
- /* Create child */
- do {
- node = add_child(node, token);
- token = strtok_r(NULL, "/", &saveptr);
- } while (token != NULL);
- goto download;
- }
-
- if (recursive) {
- if (was_recently_downloaded(cache, child) &&
- !child->error) {
- error = 0;
- goto end;
- }
- }
-
- node = child;
- }
-
- if (was_recently_downloaded(cache, node)) {
- error = node->error;
- goto end;
- }
+ error = get_url(uri, cache->tal, &url);
+ if (error)
+ return error;
- if (!recursive && !(node->flags & CNF_FILE)) {
- /* node used to be a dir, now it's a file. */
- delete_node_file(cache, node, false);
+ node = find_node(cache, url);
+ if (node != NULL) {
+ uri_refput(url);
+ if (was_recently_downloaded(cache, node))
+ return node->attempt.result;
+ url = node->url;
+ } else {
+ node = pzalloc(sizeof(struct cache_node));
+ node->url = url;
+ add_node(cache, node);
}
-download:
- switch (uri_get_type(uri)) {
+ switch (uri_get_type(url)) {
case UT_RSYNC:
- error = rsync_download(uri);
+ error = config_get_rsync_enabled()
+ ? rsync_download(url)
+ : cache_check(url);
break;
case UT_HTTPS:
- error = http_download(uri, changed);
+ error = config_get_http_enabled()
+ ? http_download(url, changed)
+ : cache_check(url);
break;
default:
- pr_crit("Unexpected URI type: %d", uri_get_type(uri));
+ pr_crit("Unexpected URI type: %d", uri_get_type(url));
}
- node->error = error;
- node->flags = CNF_DIRECT;
- node->ts_attempt = time(NULL);
- if (node->ts_attempt == ((time_t) -1))
- pr_crit("time(NULL) returned -1");
+ node->attempt.ts = time(NULL);
+ if (node->attempt.ts == (time_t) -1)
+ pr_crit("time(NULL) returned (time_t) -1");
+ node->attempt.result = error;
if (!error) {
- node->flags |= CNF_SUCCESS | (recursive ? 0 : CNF_FILE);
- node->ts_success = node->ts_attempt;
+ node->success.happened = true;
+ node->success.ts = node->attempt.ts;
}
- drop_children(node);
-end:
- free(luri);
return error;
}
static struct cache_node *
choose_better(struct cache_node *old, struct cache_node *new)
{
- if (!(new->flags & CNF_SUCCESS))
+ if (!new->success.happened)
return old;
if (old == NULL)
return new;
* remnant cached ROAs that haven't expired yet.
*/
- if (old->error && !new->error)
+ if (old->attempt.result && !new->attempt.result)
return new;
- if (!old->error && new->error)
+ if (!old->attempt.result && new->attempt.result)
return old;
- return (difftime(old->ts_success, new->ts_success) < 0) ? new : old;
-}
-
-static struct cache_node *
-find_node(struct rpki_cache *cache, struct rpki_uri *uri)
-{
- char *luri, *token, *saveptr;
- struct cache_node *parent, *node;
- bool recursive;
- struct cache_node *result;
-
- luri = uri2luri(uri);
- node = NULL;
- result = NULL;
-
- token = strtok_r(luri, "/", &saveptr);
- if (strcmp(token, cache->tal) != 0)
- pr_crit("Expected TAL %s for path %s.", cache->tal, uri_get_local(uri));
-
- token = strtok_r(NULL, "/", &saveptr);
- switch (uri_get_type(uri)) {
- case UT_RSYNC:
- parent = cache->rsync;
- recursive = true;
- break;
- case UT_HTTPS:
- parent = cache->https;
- recursive = false;
- break;
- default:
- pr_crit("Unexpected URI type: %d", uri_get_type(uri));
- }
-
- if (parent == NULL)
- goto end;
-
- while ((token = strtok_r(NULL, "/", &saveptr)) != NULL) {
- HASH_FIND_STR(parent->children, token, node);
- if (node == NULL)
- goto end;
- if (recursive && (node->flags & CNF_DIRECT))
- result = choose_better(result, node);
- parent = node;
- }
-
- if (!recursive && (node != NULL) && (node->flags & CNF_DIRECT))
- result = choose_better(result, node);
-
-end:
- free(luri);
- return result;
+ return (difftime(old->success.ts, new->success.ts) < 0) ? new : old;
}
struct uri_and_node {
struct uri_and_node *best)
{
struct rpki_uri **uri;
+ struct rpki_uri *url;
struct uri_and_node cursor;
ARRAYLIST_FOREACH(uris, uri) {
cursor.uri = *uri;
- cursor.node = find_node(cache, cursor.uri);
+
+ if (get_url(cursor.uri, cache->tal, &url) != 0)
+ continue;
+ cursor.node = find_node(cache, url);
+ uri_refput(url);
if (cursor.node == NULL)
continue;
+
if (choose_better(best->node, cursor.node) == cursor.node)
*best = cursor;
}
return best.uri;
}
-static void
-__cache_print(struct cache_node *node, unsigned int tabs)
-{
- unsigned int i;
- struct cache_node *child, *tmp;
-
- if (node == NULL)
- return;
-
- for (i = 0; i < tabs; i++)
- printf("\t");
- printf("%s: %sdirect %ssuccess %sfile error:%d\n",
- node->basename,
- (node->flags & CNF_DIRECT) ? "" : "!",
- (node->flags & CNF_SUCCESS) ? "" : "!",
- (node->flags & CNF_FILE) ? "" : "!",
- node->error);
- HASH_ITER(hh, node->children, child, tmp)
- __cache_print(child, tabs + 1);
-}
-
void
cache_print(struct rpki_cache *cache)
{
- __cache_print(cache->rsync, 0);
- __cache_print(cache->https, 0);
-}
-
-/*
- * @force: ignore nonexistent files
- */
-static void
-pb_rm_r(struct path_builder *pb, char const *filename, bool force)
-{
- int error;
-
- error = file_rm_rf(pb->string);
- if (error && !force)
- pr_op_err("Cannot delete %s: %s", pb->string, strerror(error));
-}
-
-enum ctt_status {
- CTTS_STILL,
- CTTS_UP,
- CTTS_DOWN,
-};
-
-struct cache_tree_traverser {
- struct rpki_cache *cache;
- struct cache_node **root;
- struct cache_node *next;
- struct path_builder *pb;
- enum ctt_status status;
-};
-
-static void
-ctt_init(struct cache_tree_traverser *ctt, struct rpki_cache *cache,
- struct cache_node **root, struct path_builder *pb)
-{
- struct cache_node *node;
-
- node = *root;
- if (node != NULL && (pb_append(pb, "a") != 0))
- node = node->parent;
+ struct cache_node *node, *tmp;
- ctt->cache = cache;
- ctt->root = root;
- ctt->next = node;
- ctt->pb = pb;
- ctt->status = CTTS_DOWN;
+ HASH_ITER(hh, cache->ht, node, tmp)
+ printf("- %s (%s): %ssuccess error:%d\n",
+ uri_get_local(node->url),
+ uri_get_global(node->url),
+ node->success.happened ? "" : "!",
+ node->attempt.result);
}
static bool
-is_node_fresh(struct rpki_cache *cache, struct cache_node *node)
+is_node_fresh(struct cache_node *node, time_t epoch)
{
- return was_recently_downloaded(cache, node) && !node->error;
+ /* TODO This is a startup; probably complicate this. */
+ return difftime(epoch, node->attempt.ts) < 0;
}
-/*
- * Assumes @node has not been added to the pb.
- */
-static struct cache_node *
-ctt_delete(struct cache_tree_traverser *ctt, struct cache_node *node)
-{
- struct cache_node *parent, *sibling;
-
- sibling = node->hh.next;
- parent = node->parent;
-
- delete_node(node);
-
- if (sibling != NULL) {
- ctt->status = CTTS_DOWN;
- return sibling;
- }
-
- if (parent != NULL) {
- ctt->status = CTTS_UP;
- return parent;
- }
-
- *ctt->root = NULL;
- return NULL;
-}
-
-/*
- * Assumes @node is not NULL, has yet to be traversed, and is already included
- * in the pb.
- */
-static struct cache_node *
-go_up(struct cache_tree_traverser *ctt, struct cache_node *node)
-{
- if (node->children == NULL && !is_node_fresh(ctt->cache, node)) {
- pb_pop(ctt->pb, true);
- return ctt_delete(ctt, node);
- }
-
- ctt->status = CTTS_STILL;
- return node;
-}
-
-static struct cache_node *
-find_first_viable_child(struct cache_tree_traverser *ctt,
- struct cache_node *node)
+static time_t
+get_days_ago(int days)
{
- struct cache_node *child, *tmp;
-
- HASH_ITER(hh, node->children, child, tmp) {
- if (pb_append(ctt->pb, child->basename) == 0)
- return child;
- delete_node(child); /* Unviable */
- }
-
- return NULL;
-}
-
-/*
- * Assumes @node is not NULL, has yet to be traversed, and has not yet been
- * added to the pb.
- */
-static struct cache_node *
-go_down(struct cache_tree_traverser *ctt, struct cache_node *node)
-{
- struct cache_node *child;
-
- if (pb_append(ctt->pb, node->basename) != 0)
- return ctt_delete(ctt, node);
-
- do {
- if (is_node_fresh(ctt->cache, node)) {
- drop_children(node);
- ctt->status = CTTS_STILL;
- return node;
- }
-
- child = find_first_viable_child(ctt, node);
- if (child == NULL) {
- /* Welp; stale and no children. */
- ctt->status = CTTS_UP;
- return node;
- }
-
- node = child;
- } while (true);
-}
-
-/*
- * - Depth-first, post-order, non-recursive, safe [1] traversal.
- * - However, deletion is the only allowed modification during the traversal.
- * - If the node is fresh [2], it will have no children.
- * (Because they would be redundant.)
- * (Childless nodes do not imply corresponding childless directories.)
- * - If the node is not fresh, it WILL have children.
- * (Stale [3] nodes are always sustained by fresh descendant nodes.)
- * - The ctt will automatically clean up unviable [4] and unsustained stale
- * nodes during the traversal, caller doesn't have to worry about them.
- * - The ctt's pb will be updated at all times, caller should not modify the
- * string.
- *
- * [1] Safe = caller can delete the returned node via delete_node(), during
- * iteration.
- * [2] Fresh = Mapped to a file or directory that was downloaded/updated
- * successfully at some point since the beginning of the iteration.
- * [3] Stale = Not fresh
- * [4] Unviable = Node's path is too long, ie. cannot be mapped to a cache file.
- */
-static struct cache_node *
-ctt_next(struct cache_tree_traverser *ctt)
-{
- struct cache_node *next = ctt->next;
-
- if (next == NULL)
- return NULL;
-
- pb_pop(ctt->pb, true);
-
- do {
- if (ctt->status == CTTS_DOWN)
- next = go_down(ctt, next);
- else if (ctt->status == CTTS_UP)
- next = go_up(ctt, next);
-
- if (next == NULL) {
- ctt->next = NULL;
- return NULL;
- }
- } while (ctt->status != CTTS_STILL);
+ time_t tt_now, last_week;
+ struct tm tm;
+ int error;
- if (next->hh.next != NULL) {
- ctt->next = next->hh.next;
- ctt->status = CTTS_DOWN;
- } else {
- ctt->next = next->parent;
- ctt->status = CTTS_UP;
+ tt_now = time(NULL);
+ if (tt_now == (time_t) -1)
+ pr_crit("time(NULL) returned (time_t) -1.");
+ if (localtime_r(&tt_now, &tm) == NULL) {
+ error = errno;
+ pr_crit("localtime_r(tt, &tm) returned error: %s",
+ strerror(error));
}
+ tm.tm_mday -= days;
+ last_week = mktime(&tm);
+ if (last_week == (time_t) -1)
+ pr_crit("mktime(tm) returned (time_t) -1.");
- return next;
+ return last_week;
}
static void
-cleanup_tree(struct rpki_cache *cache, struct cache_node **root,
- char const *treename)
+cleanup_node(struct rpki_cache *cache, struct cache_node *node,
+ time_t last_week)
{
- struct cache_tree_traverser ctt;
- struct path_builder pb;
- struct stat meta;
- DIR *dir;
- struct dirent *file;
- struct cache_node *node, *child, *tmp;
int error;
- if (pb_init_cache(&pb, cache->tal, NULL) != 0)
+ error = file_exists(uri_get_local(node->url));
+ switch (error) {
+ case 0:
+ break;
+ case ENOENT:
+ /* Node exists but file doesn't: Delete node */
+ delete_node(cache, node);
return;
-
- ctt_init(&ctt, cache, root, &pb);
-
- while ((node = ctt_next(&ctt)) != NULL) {
- if (stat(pb.string, &meta) != 0) {
- error = errno;
- if (error == ENOENT) {
- /* Node exists but file doesn't: Delete node */
- delete_node(node);
- continue;
- }
-
- pr_op_err("Cannot clean up '%s'; stat() returned errno %d: %s",
- pb.string, error, strerror(error));
- continue;
- }
-
- if (!node->children)
- continue; /* Node represents file, file does exist. */
- /* Node represents directory. */
-
- if (!S_ISDIR(meta.st_mode)) {
- /* File is not a directory; welp. */
- remove(pb.string);
- delete_node(node);
- continue;
- }
-
- dir = opendir(pb.string);
- if (dir == NULL) {
- error = errno;
- pr_op_err("Cannot clean up '%s'; S_ISDIR() but !opendir(): %s",
- pb.string, strerror(error));
- continue; /* AAAAAAAAAAAAAAAAAH */
- }
-
- FOREACH_DIR_FILE(dir, file) {
- if (S_ISDOTS(file))
- continue;
-
- HASH_FIND_STR(node->children, file->d_name, child);
- if (child != NULL) {
- child->flags |= CNF_FOUND;
- } else {
- /* File child's node does not exist: Delete. */
- if (pb_append(&pb, file->d_name) == 0) {
- pb_rm_r(&pb, file->d_name, false);
- pb_pop(&pb, true);
- }
- }
- }
-
- error = errno;
- closedir(dir);
- if (error) {
- pr_op_err("Cannot clean up directory (basename is '%s'): %s",
- node->basename, strerror(error));
- HASH_ITER(hh, node->children, child, tmp)
- child->flags &= ~CNF_FOUND;
- continue; /* AAAAAAAAAAAAAAAAAH */
- }
-
- HASH_ITER(hh, node->children, child, tmp) {
- if (child->flags & CNF_FOUND) {
- /*
- * File child still exists, which means there's
- * at least one active descendant.
- * Clean the flag and keep the node.
- */
- child->flags &= ~CNF_FOUND;
- } else {
- /* Node child's file does not exist: Delete. */
- delete_node(child);
- }
- }
-
- if (node->children == NULL) {
- /* Node is inactive and we rm'd its children: Delete. */
- pb_rm_r(&pb, node->basename, false);
- delete_node(node);
- }
+ default:
+ pr_op_err("Trouble cleaning '%s'; stat() returned errno %d: %s",
+ uri_op_get_printable(node->url), error, strerror(error));
}
- if ((*root) == NULL && pb_append(&pb, treename) == 0)
- pb_rm_r(&pb, treename, true);
-
- pb_cleanup(&pb);
+ if (!is_node_fresh(node, last_week)) {
+ file_rm_rf(uri_get_local(node->url));
+ delete_node(cache, node);
+ }
}
void
-cache_cleanup(void)
+cache_cleanup(struct rpki_cache *cache)
{
- struct rpki_cache *cache = validation_cache(state_retrieve());
- cleanup_tree(cache, &cache->rsync, "rsync");
- cleanup_tree(cache, &cache->https, "https");
+ struct cache_node *node, *tmp;
+ time_t last_week;
+
+ last_week = get_days_ago(7);
+ HASH_ITER(hh, cache->ht, node, tmp)
+ cleanup_node(cache, node, last_week);
+
write_metadata_json(cache);
}
#include <check.h>
#include <stdarg.h>
+#include <sys/queue.h>
#include "alloc.c"
#include "common.c"
-#include "file.c"
#include "json_util.c"
#include "mock.c"
#include "data_structure/path_builder.c"
/* Mocks */
-struct rpki_cache *cache;
+#define TAL_FILE "test.tal"
-MOCK(state_retrieve, struct validation *, NULL, void)
-MOCK(validation_cache, struct rpki_cache *, cache, struct validation *state)
-MOCK(validation_tal, struct tal *, NULL, struct validation *state)
-MOCK(tal_get_file_name, char const *, "test.tal", struct tal *tal)
+struct rpki_cache *cache;
-static unsigned int dl_count; /* Times the download function was called */
static bool dl_error; /* Download should return error? */
-int
-rsync_download(struct rpki_uri *uri)
-{
- char *cmd;
- int printed;
+struct downloaded_path {
+ char *path;
+ bool visited;
+ SLIST_ENTRY(downloaded_path) hook;
+};
- dl_count++;
- if (dl_error)
- return -EINVAL;
-
- cmd = pmalloc(128);
- printed = snprintf(cmd, 128, "mkdir -p %s", uri_get_local(uri));
- ck_assert(printed < 128);
+/* Paths downloaded during the test */
+static SLIST_HEAD(downloaded_paths, downloaded_path) downloaded;
- ck_assert_int_eq(0, system(cmd));
+unsigned int rsync_counter; /* Times the rsync function was called */
+unsigned int https_counter; /* Times the https function was called */
- free(cmd);
- return 0;
+int
+file_exists(char const *file)
+{
+ struct downloaded_path *path;
+ SLIST_FOREACH(path, &downloaded, hook)
+ if (strcmp(file, path->path) == 0)
+ return 0;
+ return ENOENT;
}
int
-http_download(struct rpki_uri *uri, bool *changed)
+file_rm_rf(char const *file)
+{
+ struct downloaded_path *path;
+ SLIST_FOREACH(path, &downloaded, hook)
+ if (strcmp(file, path->path) == 0) {
+ SLIST_REMOVE(&downloaded, path, downloaded_path, hook);
+ free(path->path);
+ free(path);
+ return 0;
+ }
+ return ENOENT;
+}
+
+static int
+pretend_download(struct rpki_uri *uri)
{
- char *cmd;
- int printed;
- int error;
+ struct downloaded_path *dl;
- dl_count++;
if (dl_error)
return -EINVAL;
+ if (file_exists(uri_get_local(uri)) == 0)
+ return 0;
- cmd = pmalloc(128);
- printed = snprintf(cmd, 128,
- /* "create file, but only if it's not already a directory" */
- "test ! -d %s && install -D /dev/null %s",
- uri_get_local(uri), uri_get_local(uri));
- ck_assert(printed < 128);
+ dl = pmalloc(sizeof(struct downloaded_path));
+ dl->path = pstrdup(uri_get_local(uri));
+ dl->visited = false;
+ SLIST_INSERT_HEAD(&downloaded, dl, hook);
+ return 0;
+}
- error = system(cmd);
+int
+rsync_download(struct rpki_uri *uri)
+{
+ rsync_counter++;
+ return pretend_download(uri);
+}
- free(cmd);
+int
+http_download(struct rpki_uri *uri, bool *changed)
+{
+ int error;
+ https_counter++;
+ error = pretend_download(uri);
+ if (changed != NULL)
+ *changed = error ? false : true;
return error;
}
/* Helpers */
-static const int SUCCESS = CNF_DIRECT | CNF_SUCCESS;
-static const int HTTP_SUCCESS = SUCCESS | CNF_FILE;
-
static void
setup_test(void)
{
ck_assert_int_eq(0, system("rm -rf tmp/"));
- dl_error = false;
- cache = cache_create("test.tal");
+ dl_error = false;
+ cache = cache_create(TAL_FILE);
ck_assert_ptr_nonnull(cache);
-}
-
-static bool
-is_rsync(struct cache_node *node)
-{
- while (node->parent != NULL)
- node = node->parent;
- return strcmp(node->basename, "rsync") == 0;
-}
-
-static bool
-is_https(struct cache_node *node)
-{
- while (node->parent != NULL)
- node = node->parent;
- return strcmp(node->basename, "https") == 0;
+ SLIST_INIT(&downloaded);
}
static void
-__download(char const *url, enum uri_type uritype, int expected_error,
- unsigned int expected_cb_count)
+run_cache_download(char const *url, int expected_error,
+ unsigned int rsync_calls, unsigned int https_calls)
{
struct rpki_uri *uri;
+ enum uri_type type;
- ck_assert_int_eq(0, uri_create(&uri, "test.tal", uritype, NULL, url));
- dl_count = 0;
+ if (str_starts_with(url, "https://"))
+ type = UT_HTTPS;
+ else if (str_starts_with(url, "rsync://"))
+ type = UT_RSYNC;
+ else
+ ck_abort_msg("Bad protocol: %s", url);
+ rsync_counter = 0;
+ https_counter = 0;
+
+ ck_assert_int_eq(0, uri_create(&uri, TAL_FILE, type, NULL, url));
ck_assert_int_eq(expected_error, cache_download(cache, uri, NULL));
- ck_assert_uint_eq(expected_cb_count, dl_count);
+ ck_assert_uint_eq(rsync_calls, rsync_counter);
+ ck_assert_uint_eq(https_calls, https_counter);
uri_refput(uri);
}
-#define download_rsync(url, err, ecc) __download(url, UT_RSYNC, err, ecc)
-#define download_https(url, err, ecc) __download(url, UT_HTTPS, err, ecc)
-
static struct cache_node *
-__NODE(char const *basename, int flags, time_t success, time_t attempt,
- int error, ...)
+node(char const *url, time_t attempt, int err, bool succeeded, time_t success)
{
+ enum uri_type type;
struct cache_node *result;
- struct cache_node *child;
- va_list args;
+
+ if (str_starts_with(url, "https://"))
+ type = UT_HTTPS;
+ else if (str_starts_with(url, "rsync://"))
+ type = UT_RSYNC;
+ else
+ ck_abort_msg("Bad protocol: %s", url);
result = pzalloc(sizeof(struct cache_node));
- result->basename = pstrdup(basename);
- result->flags = flags;
- result->ts_success = success;
- result->ts_attempt = attempt;
- result->error = error;
-
- va_start(args, error);
- while ((child = va_arg(args, struct cache_node *)) != NULL) {
- HASH_ADD_KEYPTR(hh, result->children, child->basename,
- strlen(child->basename), child);
- child->parent = result;
- }
- va_end(args);
+ ck_assert_int_eq(0, uri_create(&result->url, TAL_FILE, type, NULL, url));
+ result->attempt.ts = attempt;
+ result->attempt.result = err;
+ result->success.happened = succeeded;
+ result->success.ts = success;
return result;
}
-#define NODE(bs, f, ...) __NODE(bs, f, 0, 0, __VA_ARGS__, NULL)
-/* "Timed" node */
-#define TNODE(bs, f, s, a, ...) __NODE(bs, f, s, a, __VA_ARGS__, NULL)
+#define NODE(url, err, succeeded, has_file) \
+ node(url, has_file, err, succeeded, 0)
static void
-actual_not_found(struct cache_node *expected, char *parent_basename)
+reset_visiteds(void)
{
- ck_abort_msg("Parent '%s' is missing child '%s'", parent_basename,
- expected->basename);
+ struct downloaded_path *path;
+ SLIST_FOREACH(path, &downloaded, hook)
+ path->visited = false;
}
-static void
-expected_not_found(struct cache_node *actual)
+static struct downloaded_path *
+find_downloaded_path(struct cache_node *node)
{
- ck_abort_msg("Parent '%s' has unexpected node '%s'",
- (actual->parent == NULL) ? "root" : actual->parent->basename,
- actual->basename);
+ struct downloaded_path *path;
+
+ SLIST_FOREACH(path, &downloaded, hook)
+ if (strcmp(uri_get_local(node->url), path->path) == 0) {
+ if (path->visited)
+ return NULL;
+ else {
+ path->visited = true;
+ return path;
+ }
+ }
+
+ return NULL;
}
static void
-print_tree(struct cache_node *root, unsigned int tabs)
+fail_if_nonvisited(void)
{
- struct cache_node *cursor, *tmp;
- unsigned int t;
-
- if (root == NULL)
- return;
-
- for (t = 0; t < tabs; t++)
- printf("\t");
- printf("%s\n", root->basename);
-
- HASH_ITER(hh, root->children, cursor, tmp)
- print_tree(cursor, tabs + 1);
+ struct downloaded_path *path;
+ SLIST_FOREACH(path, &downloaded, hook)
+ if (!path->visited)
+ ck_abort_msg("Unexpected cache file: %s", path->path);
}
static void
-validate_node(struct cache_node *expected, struct cache_node *expected_parent,
- struct cache_node *actual, struct path_builder *pb)
+validate_node(struct cache_node *expected, struct cache_node *actual)
{
- struct cache_node *expected_child, *actual_child, *tmp;
-
if (expected == NULL) {
ck_assert_ptr_eq(NULL, actual);
return;
}
- ck_assert_str_eq(expected->basename, actual->basename);
- ck_assert_int_eq(expected->flags, actual->flags);
- if (expected->flags & CNF_DIRECT) {
- /* ck_assert_int_ne(0, actual->ts_attempt); */
- /* ck_assert_int_eq(actual->ts_attempt, actual->ts_success); */
- if (expected->error)
- ck_assert_int_ne(0, actual->error);
- else
- ck_assert_int_eq(0, actual->error);
- } else {
- /* ck_assert_int_eq(0, actual->ts_attempt); */
- /* ck_assert_int_eq(0, actual->ts_success); */
- ck_assert_int_eq(0, actual->error);
- }
- ck_assert_ptr_eq(expected_parent, actual->parent);
-
- ck_assert_int_eq(0, pb_append(pb, expected->basename));
-
- HASH_ITER(hh, expected->children, expected_child, tmp) {
- HASH_FIND_STR(actual->children, expected_child->basename,
- actual_child);
- if (actual_child == NULL)
- actual_not_found(expected_child, actual->basename);
- validate_node(expected_child, actual, actual_child, pb);
- }
-
- HASH_ITER(hh, actual->children, actual_child, tmp) {
- HASH_FIND_STR(expected->children, actual_child->basename,
- expected_child);
- if (expected_child == NULL)
- expected_not_found(actual_child);
- }
-
- pb_pop(pb, true);
+ ck_assert_str_eq(uri_get_global(expected->url), uri_get_global(actual->url));
+ /* ck_assert_int_eq(expected->attempt.ts, actual->attempt.ts); */
+ ck_assert_int_eq(expected->attempt.result, actual->attempt.result);
+ ck_assert_int_eq(expected->success.happened, actual->success.happened);
+ /* ck_assert_int_eq(expected->success.ts, actual->success.ts); */
}
static void
-search_dir(DIR *parent, char const *path, char const *name)
+validate_cache(int trash, ...)
{
- struct dirent *file;
- int error;
-
- rewinddir(parent);
- FOREACH_DIR_FILE(parent, file) {
- if (S_ISDOTS(file))
- continue;
-
- if (strcmp(name, file->d_name) == 0)
- return;
- }
-
- error = errno;
- ck_assert_int_eq(0, error);
+ struct cache_node *expected = NULL;
+ struct cache_node *e, *a, *tmp;
+ struct downloaded_path *path;
+ char const *key;
+ va_list args;
- ck_abort_msg("File %s/%s doesn't exist", path, name);
-}
+ printf("------------------------------\n");
+ printf("Expected nodes:\n");
-static void
-validate_file(struct cache_node *expected, struct path_builder *pb,
- char const *tree)
-{
- struct stat meta;
- DIR *dir;
- struct dirent *file;
- struct cache_node *child, *tmp;
- int error;
+ va_start(args, trash);
+ while ((e = va_arg(args, struct cache_node *)) != NULL) {
+ printf("- %s %s error:%u success:%u\n",
+ uri_get_global(e->url), uri_get_local(e->url),
+ e->attempt.result, e->success.happened);
- if (expected == NULL) {
-// pb_append(pb, tree);
-// if (stat(pb->string, &meta) != 0) {
-// error = errno;
-// ck_assert_int_eq(ENOENT, error);
-// pb_pop(pb, true);
-// return;
-// }
-// ck_abort_msg("'%s' exists, but it shouldn't.", pb->string);
- return;
+ key = uri_get_global(e->url);
+ HASH_ADD_KEYPTR(hh, expected, key, strlen(key), e);
}
+ va_end(args);
+ printf("\n");
- ck_assert_int_eq(0, pb_append(pb, expected->basename));
-
- if (is_rsync(expected)) {
- /* Currently, the unit tests do not fake rsync files */
- goto must_be_dir;
-
- } else if (is_https(expected)) {
- if (expected->flags & CNF_DIRECT) {
- if (expected->error == 0)
- goto must_be_file; /* Because HTTP */
- else
- goto end;
+ printf("Actual nodes:\n");
+ HASH_ITER(hh, cache->ht, a, tmp)
+ printf("- %s %s attempt:%u success:%u\n",
+ uri_get_global(a->url), uri_get_local(a->url),
+ a->attempt.result, a->success.happened);
+ printf("\n");
+
+ printf("Files in cache:\n");
+ SLIST_FOREACH(path, &downloaded, hook)
+ printf("- %s\n", path->path);
+ printf("\n");
+
+ /* Compare expected and cache */
+ reset_visiteds();
+
+ HASH_ITER(hh, expected, e, tmp) {
+ path = find_downloaded_path(e);
+ if (e->attempt.ts) { /* "if should have cache file" */
+ if (path == NULL)
+ ck_abort_msg("Cached file is missing: %s",
+ uri_get_local(e->url));
+ path->visited = true;
} else {
- goto must_be_dir; /* Because HTTP */
+ if (path != NULL) {
+ ck_abort_msg("Cached file should not exist: %s",
+ path->path);
+ }
}
- } else {
- ck_abort_msg("Not rsync nor httpd");
}
-must_be_file:
- ck_assert_int_eq(0, stat(pb->string, &meta));
- ck_assert_int_eq(1, S_ISREG(meta.st_mode));
- goto end;
-
-must_be_dir:
- errno = 0;
- dir = opendir(pb->string);
- error = errno;
- ck_assert_int_eq(0, error);
- ck_assert_ptr_nonnull(dir);
-
- FOREACH_DIR_FILE(dir, file) {
- if (S_ISDOTS(file))
- continue;
-
- HASH_FIND_STR(expected->children, file->d_name, child);
- if (child == NULL) {
- ck_abort_msg("file %s/%s is not supposed to exist.",
- pb->string, file->d_name);
- }
-
- validate_file(child, pb, tree);
- }
- error = errno;
- ck_assert_int_eq(0, error);
+ fail_if_nonvisited();
- HASH_ITER(hh, expected->children, child, tmp)
- search_dir(dir, pb->string, child->basename);
+ /* Compare expected and actual */
+ HASH_ITER(hh, cache->ht, a, tmp) {
+ key = uri_get_global(a->url);
+ HASH_FIND_STR(expected, key, e);
+ if (e == NULL)
+ ck_abort_msg("Unexpected actual: %s", key);
- closedir(dir);
-end:
- pb_pop(pb, true);
-}
+ validate_node(e, a);
-static void
-validate_trees(struct cache_node *actual, struct cache_node *nodes,
- struct cache_node *files)
-{
- struct path_builder pb;
-
- printf("------------------------------\n");
- printf("Expected nodes:\n");
- print_tree(nodes, 1);
- printf("Actual nodes:\n");
- print_tree(actual, 1);
- if (nodes != files) {
- printf("Expected files:\n");
- print_tree(files, 0);
+ HASH_DEL(expected, e);
+ uri_refput(e->url);
+ free(e);
}
- printf("Actual files:\n");
- file_ls_R("tmp");
- pb_init(&pb);
- ck_assert_int_eq(0, pb_append(&pb, "tmp"));
- ck_assert_int_eq(0, pb_append(&pb, "test.tal"));
-
- validate_node(nodes, NULL, actual, &pb);
- validate_file(files, &pb, (actual != NULL) ? actual->basename : NULL);
-
- pb_cleanup(&pb);
-
- delete_node(nodes);
- if (nodes != files)
- delete_node(files);
+ if (HASH_COUNT(expected) != 0)
+ ck_abort_msg("Actual node is mising: %s",
+ uri_get_global(expected->url));
}
static void
-validate_tree(struct cache_node *actual, struct cache_node *expected)
+new_iteration(bool outdate)
{
- validate_trees(actual, expected, expected);
+ struct cache_node *node, *tmp;
+ time_t epoch;
+
+ epoch = outdate ? get_days_ago(30) : get_days_ago(1);
+ HASH_ITER(hh, cache->ht, node, tmp)
+ node->attempt.ts = epoch;
}
-static void
-set_times(struct cache_node *node, time_t tm)
+void
+cache_reset(struct rpki_cache *cache)
{
- struct cache_node *child, *tmp;
-
- if (node == NULL)
- return;
-
- node->ts_success = tm;
- node->ts_attempt = tm;
- HASH_ITER(hh, node->children, child, tmp)
- set_times(child, tm);
+ struct cache_node *node, *tmp;
+ HASH_ITER(hh, cache->ht, node, tmp)
+ delete_node(cache, node);
}
static void
-new_iteration(struct rpki_cache *cache)
+cleanup_test(void)
{
- cache->startup_time = time(NULL);
- ck_assert_int_ne((time_t) -1, cache->startup_time);
+ struct downloaded_path *path;
- /* Ensure the old ts_successes and ts_attempts are outdated */
- set_times(cache->rsync, cache->startup_time - 100);
- set_times(cache->https, cache->startup_time - 100);
-}
+ dl_error = false;
+ cache_destroy(cache);
-static void
-cache_reset(struct rpki_cache *cache)
-{
- delete_node(cache->rsync);
- cache->rsync = NULL;
- delete_node(cache->https);
- cache->https = NULL;
+ while (!SLIST_EMPTY(&downloaded)) {
+ path = SLIST_FIRST(&downloaded);
+ SLIST_REMOVE_HEAD(&downloaded, hook);
+ free(path->path);
+ free(path);
+ }
}
/* Tests */
{
setup_test();
- download_rsync("rsync://a.b.c/d/e", 0, 1);
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", SUCCESS, 0)))));
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
+ validate_cache(0, NODE("rsync://a.b.c/d/", 0, 1, true), NULL);
/* Redownload same file, nothing should happen */
- download_rsync("rsync://a.b.c/d/e", 0, 0);
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", SUCCESS, 0)))));
+ run_cache_download("rsync://a.b.c/d", 0, 0, 0);
+ validate_cache(0, NODE("rsync://a.b.c/d/", 0, 1, true), NULL);
/*
- * For better *and* worse, rsyncs are recursive, which means if we've
- * been recently asked to download e, we needn't bother redownloading
- * e/f.
+ * rsyncs are recursive, which means if we've been recently asked to
+ * download d, we needn't bother redownloading d/e.
*/
- download_rsync("rsync://a.b.c/d/e/f", 0, 0);
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", SUCCESS, 0)))));
+ run_cache_download("rsync://a.b.c/d/e", 0, 0, 0);
+ validate_cache(0, NODE("rsync://a.b.c/d/", 0, 1, true), NULL);
/*
- * The trees will *look* different, because the tree will get trimmed,
- * while the filesystem will not.
+ * rsyncs get truncated, because it results in much faster
+ * synchronization in practice.
+ * This is not defined in any RFCs; it's an effective standard,
+ * and there would be consequences for violating it.
*/
- download_rsync("rsync://a.b.c/d", 0, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", 0, 0))))
- );
-
- download_rsync("rsync://a.b.c/e", 0, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", SUCCESS, 0))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", 0, 0)),
- NODE("e", 0, 0)))
- );
-
- download_rsync("rsync://x.y.z/e", 0, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", SUCCESS, 0)),
- NODE("x.y.z", 0, 0,
- NODE("e", SUCCESS, 0))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", 0, 0)),
- NODE("e", 0, 0)),
- NODE("x.y.z", 0, 0,
- NODE("e", 0, 0))));
-
- cache_destroy(cache);
+ run_cache_download("rsync://x.y.z/m/n/o", 0, 1, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://x.y.z/m/", 0, 1, true),
+ NULL);
+
+ /* Sibling */
+ run_cache_download("rsync://a.b.c/e/f", 0, 1, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NODE("rsync://x.y.z/m/", 0, 1, true),
+ NULL);
+
+ cleanup_test();
}
END_TEST
setup_test();
dl_error = false;
- download_rsync("rsync://a.b.c/d", 0, 1);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
dl_error = true;
- download_rsync("rsync://a.b.c/e", -EINVAL, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
+ run_cache_download("rsync://a.b.c/e", -EINVAL, 1, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", -EINVAL, 0, false),
+ NULL);
/* Regardless of error, not reattempted because same iteration */
dl_error = true;
- download_rsync("rsync://a.b.c/e", -EINVAL, 0);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
+ run_cache_download("rsync://a.b.c/e", -EINVAL, 0, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", -EINVAL, 0, false),
+ NULL);
dl_error = false;
- download_rsync("rsync://a.b.c/e", -EINVAL, 0);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
+ run_cache_download("rsync://a.b.c/e", -EINVAL, 0, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", -EINVAL, 0, false),
+ NULL);
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
* First iteration: Tree is created. No prunes, because nothing's
* outdated.
*/
- new_iteration(cache);
- download_rsync("rsync://a.b.c/d", 0, 1);
- download_rsync("rsync://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NULL);
/* One iteration with no changes, for paranoia */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/d", 0, 1);
- download_rsync("rsync://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NULL);
/* Add one sibling */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/d", 0, 1);
- download_rsync("rsync://a.b.c/e", 0, 1);
- download_rsync("rsync://a.b.c/f", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", SUCCESS, 0),
- NODE("f", SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ run_cache_download("rsync://a.b.c/f", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NODE("rsync://a.b.c/f/", 0, 1, true),
+ NULL);
+
+ /* Nodes don't get updated, but they're still too young. */
+ new_iteration(false);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NODE("rsync://a.b.c/f/", 0, 1, true),
+ NULL);
/* Remove some branches */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/d", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/d/", 0, 1, true), NULL);
/* Remove old branch and add sibling at the same time */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0))));
-
- /* Add a child to the same branch, do not update the old one */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/e/f/g", 0, 1);
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0,
- NODE("f", 0, 0,
- NODE("g", SUCCESS, 0))))));
-
- /*
- * Download parent, do not update child.
- * Child's node should be deleted (because we don't need it anymore),
- * but its file should persist (because it should be retained as its
- * parent's descendant).
- */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/e/f", 0, 1);
- cache_cleanup();
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0,
- NODE("f", SUCCESS, 0)))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0,
- NODE("f", 0, 0,
- NODE("g", SUCCESS, 0))))));
-
- /* Do it again. Node should die, all descendant files should persist. */
- new_iteration(cache);
- download_rsync("rsync://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0,
- NODE("f", 0, 0,
- NODE("g", SUCCESS, 0))))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/e/", 0, 1, true), NULL);
+
+ /* Try child */
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/e/f/g", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/e/", 0, 1, true), NULL);
+
+ /* Parent again */
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/e/", 0, 1, true), NULL);
/* Empty the tree */
- new_iteration(cache);
- cache_cleanup();
- validate_tree(cache->rsync, NULL);
+ new_iteration(true);
+ cache_cleanup(cache);
+ validate_cache(0, NULL);
/* Node exists, but file doesn't */
- printf("Tmp files:\n");
- file_ls_R("tmp");
- new_iteration(cache);
- download_rsync("rsync://a.b.c/e", 0, 1);
- download_rsync("rsync://a.b.c/f/g/h", 0, 1);
-
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0),
- NODE("f", 0, 0,
- NODE("g", 0, 0,
- NODE("h", SUCCESS, 0))))));
- ck_assert_int_eq(0, system("rm -rf tmp/test.tal/rsync/a.b.c/f/g"));
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("rsync://a.b.c/e", 0, 1, 0);
+ run_cache_download("rsync://a.b.c/f", 0, 1, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/e/", 0, 1, true),
+ NODE("rsync://a.b.c/f/", 0, 1, true),
+ NULL);
+ ck_assert_int_eq(0, file_rm_rf("tmp/" TAL_FILE "/rsync/a.b.c/f"));
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/e/", 0, 1, true), NULL);
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
/* Set up */
dl_error = false;
- download_rsync("rsync://a.b.c/d", 0, 1);
+ run_cache_download("rsync://a.b.c/d", 0, 1, 0);
dl_error = true;
- download_rsync("rsync://a.b.c/e", -EINVAL, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
+ run_cache_download("rsync://a.b.c/e", -EINVAL, 1, 0);
+ validate_cache(0,
+ NODE("rsync://a.b.c/d/", 0, 1, true),
+ NODE("rsync://a.b.c/e/", -EINVAL, 0, false),
+ NULL);
+
+ /* Node gets deleted because cached file doesn't exist */
+ cache_cleanup(cache);
+ validate_cache(0, NODE("rsync://a.b.c/d/", 0, 1, true), NULL);
/*
- * I originally intended this test to delete e because of the error,
- * but it actually gets deleted because the file doesn't exist.
- * Which is fine; we should test that too. We'll try d next, which
- * does have a file.
+ * Node and file do not get deleted, because the failure is still not
+ * that old.
+ * Deletion does not depend on success or failure.
*/
- cache_cleanup();
- validate_tree(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
-
- /* Fail d */
- new_iteration(cache);
+ new_iteration(false);
dl_error = true;
- download_rsync("rsync://a.b.c/d", -EINVAL, 1);
- validate_trees(cache->rsync,
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", CNF_DIRECT, -EINVAL))),
- NODE("rsync", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", SUCCESS, 0))));
-
- /* Clean up d because of error */
- cache_cleanup();
- validate_tree(cache->rsync, NULL);
+ run_cache_download("rsync://a.b.c/d", -EINVAL, 1, 0);
+ validate_cache(0, NODE("rsync://a.b.c/d/", -EINVAL, 1, true), NULL);
- cache_destroy(cache);
+ /* Error is old; gets deleted */
+ new_iteration(true);
+ cache_cleanup(cache);
+ validate_cache(0, NULL);
+
+ cleanup_test();
}
END_TEST
setup_test();
/* Download *file* e. */
- download_https("https://a.b.c/d/e", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", HTTP_SUCCESS, 0)))));
-
- /* e is now a dir; need to replace it. */
- download_https("https://a.b.c/d/e/f", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", 0, 0,
- NODE("e", 0, 0,
- NODE("f", HTTP_SUCCESS, 0))))));
-
- /* d is now a file; need to replace it. */
- download_https("https://a.b.c/d", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/d/e", 0, 0, 1);
+ validate_cache(0, NODE("https://a.b.c/d/e", 0, 1, 1), NULL);
/* Download something else 1 */
- download_https("https://a.b.c/e", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/e", 0, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/d/e", 0, 1, 1),
+ NODE("https://a.b.c/e", 0, 1, 1),
+ NULL);
/* Download something else 2 */
- download_https("https://x.y.z/e", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", HTTP_SUCCESS, 0)),
- NODE("x.y.z", 0, 0,
- NODE("e", HTTP_SUCCESS, 0))));
+ run_cache_download("https://x.y.z/e", 0, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/d/e", 0, 1, 1),
+ NODE("https://a.b.c/e", 0, 1, 1),
+ NODE("https://x.y.z/e", 0, 1, 1),
+ NULL);
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
setup_test();
dl_error = false;
- download_https("https://a.b.c/d", 0, 1);
+ run_cache_download("https://a.b.c/d", 0, 0, 1);
dl_error = true;
- download_https("https://a.b.c/e", -EINVAL, 1);
- validate_trees(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/e", -EINVAL, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NODE("https://a.b.c/e", -EINVAL, 0, 0),
+ NULL);
/* Regardless of error, not reattempted because same iteration */
dl_error = true;
- download_https("https://a.b.c/e", -EINVAL, 0);
- validate_trees(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
-
+ run_cache_download("https://a.b.c/d", 0, 0, 0);
dl_error = false;
- download_https("https://a.b.c/e", -EINVAL, 0);
- validate_trees(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/e", -EINVAL, 0, 0);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NODE("https://a.b.c/e", -EINVAL, 0, 0),
+ NULL);
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
setup_test();
/* First iteration; make a tree and clean it */
- new_iteration(cache);
- download_https("https://a.b.c/d", 0, 1);
- download_https("https://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", HTTP_SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("https://a.b.c/d", 0, 0, 1);
+ run_cache_download("https://a.b.c/e", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NODE("https://a.b.c/e", 0, 1, 1),
+ NULL);
/* Remove one branch */
- new_iteration(cache);
- download_https("https://a.b.c/d", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("https://a.b.c/d", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/d", 0, 1, 1), NULL);
/* Change the one branch */
- new_iteration(cache);
- download_https("https://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", HTTP_SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("https://a.b.c/e", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/e", 0, 1, 1), NULL);
- /*
- * Add a child to the same branch, do not update the old one
- */
- new_iteration(cache);
- download_https("https://a.b.c/e/f/g", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", 0, 0,
- NODE("f", 0, 0,
- NODE("g", HTTP_SUCCESS, 0))))));
+ /* Add a child to the same branch, do not update the old one */
+ new_iteration(true);
+ run_cache_download("https://a.b.c/e/f/g", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("https://a.b.c/e/f/g", 0, 1, 1), NULL);
/*
* Download parent, do not update child.
* Children need to die, because parent is now a file.
*/
- new_iteration(cache);
- download_https("https://a.b.c/e/f", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", 0, 0,
- NODE("f", HTTP_SUCCESS, 0)))));
+ new_iteration(true);
+ run_cache_download("https://a.b.c/e/f", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/e/f", 0, 1, 1), NULL);
/* Do it again. */
- new_iteration(cache);
- download_https("https://a.b.c/e", 0, 1);
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", HTTP_SUCCESS, 0))));
+ new_iteration(true);
+ run_cache_download("https://a.b.c/e", 0, 0, 1);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/e", 0, 1, 1), NULL);
/* Empty the tree */
- new_iteration(cache);
- cache_cleanup();
- validate_tree(cache->https, NULL);
+ new_iteration(true);
+ cache_cleanup(cache);
+ validate_cache(0, NULL);
/* Node exists, but file doesn't */
- new_iteration(cache);
- download_https("https://a.b.c/e", 0, 1);
- download_https("https://a.b.c/f/g/h", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", HTTP_SUCCESS, 0),
- NODE("f", 0, 0,
- NODE("g", 0, 0,
- NODE("h", HTTP_SUCCESS, 0))))));
- ck_assert_int_eq(0, system("rm -rf tmp/test.tal/https/a.b.c/f/g"));
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", HTTP_SUCCESS, 0))));
-
- cache_destroy(cache);
+ new_iteration(true);
+ run_cache_download("https://a.b.c/e", 0, 0, 1);
+ run_cache_download("https://a.b.c/f/g/h", 0, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/e", 0, 1, 1),
+ NODE("https://a.b.c/f/g/h", 0, 1, 1),
+ NULL);
+ ck_assert_int_eq(0, file_rm_rf("tmp/" TAL_FILE "/https/a.b.c/f/g/h"));
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/e", 0, 1, 1), NULL);
+
+ cleanup_test();
}
END_TEST
/* Set up */
dl_error = false;
- download_https("https://a.b.c/d", 0, 1);
+ run_cache_download("https://a.b.c/d", 0, 0, 1);
dl_error = true;
- download_https("https://a.b.c/e", -EINVAL, 1);
- validate_trees(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0),
- NODE("e", CNF_DIRECT, -EINVAL))),
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/e", -EINVAL, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NODE("https://a.b.c/e", -EINVAL, 0, 0),
+ NULL);
/* Deleted because file ENOENT. */
- cache_cleanup();
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
+ cache_cleanup(cache);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NULL);
/* Fail d */
- new_iteration(cache);
+ new_iteration(false);
dl_error = true;
- download_https("https://a.b.c/d", -EINVAL, 1);
- validate_trees(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", CNF_DIRECT, -EINVAL))),
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
-
- /* Clean up d because of error */
- cache_cleanup();
- validate_tree(cache->https, NULL);
+ run_cache_download("https://a.b.c/d", -EINVAL, 0, 1);
+ validate_cache(0, NODE("https://a.b.c/d", -EINVAL, 1, 1), NULL);
- cache_destroy(cache);
+ /* Not deleted, because not old */
+ new_iteration(false);
+ cache_cleanup(cache);
+ validate_cache(0, NODE("https://a.b.c/d", -EINVAL, 1, 1), NULL);
+
+ /* Become old */
+ new_iteration(true);
+ cache_cleanup(cache);
+ validate_cache(0, NULL);
+
+ cleanup_test();
}
END_TEST
{
setup_test();
- download_https("https://a.b.c/d", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
-
- download_https("https://a.b.c/d/.", 0, 0);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("d", HTTP_SUCCESS, 0))));
-
- download_https("https://a.b.c/d/..", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", HTTP_SUCCESS, 0)));
-
- download_https("https://a.b.c/./d/../e", 0, 1);
- validate_tree(cache->https,
- NODE("https", 0, 0,
- NODE("a.b.c", 0, 0,
- NODE("e", HTTP_SUCCESS, 0))));
+ run_cache_download("https://a.b.c/d", 0, 0, 1);
+ validate_cache(0, NODE("https://a.b.c/d", 0, 1, 1), NULL);
- cache_destroy(cache);
+ run_cache_download("https://a.b.c/d/.", 0, 0, 0);
+ validate_cache(0, NODE("https://a.b.c/d", 0, 1, 1), NULL);
+
+ run_cache_download("https://a.b.c/d/e/..", 0, 0, 0);
+ validate_cache(0, NODE("https://a.b.c/d", 0, 1, 1), NULL);
+
+ run_cache_download("https://a.b.c/./d/../e", 0, 0, 1);
+ validate_cache(0,
+ NODE("https://a.b.c/d", 0, 1, 1),
+ NODE("https://a.b.c/./d/../e", 0, 1, 1),
+ NULL);
+
+ cleanup_test();
}
END_TEST
START_TEST(test_metadata_json)
{
- const time_t NOW = 1693952610;
json_t *json;
char *str;
setup_test();
ck_assert_int_eq(0, system("rm -rf tmp/"));
- ck_assert_int_eq(0, system("mkdir -p tmp/test.tal"));
-
- cache->rsync = TNODE("rsync", 0, NOW + 0, NOW + 1, 0,
- TNODE("a.b.c", 0, NOW + 2, NOW + 3, 0,
- TNODE("d", SUCCESS, NOW + 4, NOW + 5, 0),
- TNODE("e", CNF_DIRECT, NOW + 6, NOW + 7, 1)),
- TNODE("x.y.z", 0, NOW + 8, NOW + 9, 0,
- TNODE("w", SUCCESS, NOW + 0, NOW + 1, 0)));
- cache->https = TNODE("https", 0, NOW + 2, NOW + 3, 0,
- TNODE("a", 0, NOW + 4, NOW + 5, 0,
- TNODE("b", HTTP_SUCCESS, NOW + 6, NOW + 7, 1),
- TNODE("c", HTTP_SUCCESS, NOW + 8, NOW + 9, 0)));
+ ck_assert_int_eq(0, system("mkdir -p tmp/" TAL_FILE));
+
+ add_node(cache, NODE("rsync://a.b.c/d", 0, 1, 0));
+ add_node(cache, NODE("rsync://a.b.c/e", 1, 0, 0));
+ add_node(cache, NODE("rsync://x.y.z/e", 0, 1, 0));
+ add_node(cache, NODE("https://a/b", 1, 1, 0));
+ add_node(cache, NODE("https://a/c", 0, 1, 0));
json = build_metadata_json(cache);
- ck_assert_int_eq(0, json_dump_file(json, "tmp/test.tal/metadata.json", JSON_COMPACT));
+ ck_assert_int_eq(0, json_dump_file(json, "tmp/" TAL_FILE "/metadata.json", JSON_COMPACT));
str = json_dumps(json, /* JSON_INDENT(4) */ JSON_COMPACT);
/* printf("%s\n", str); */
/* TODO (test) Time zones are hardcoded to CST */
ck_assert_str_eq(
- "[{\"basename\":\"rsync\",\"children\":["
- "{\"basename\":\"a.b.c\",\"children\":["
- "{\"basename\":\"d\",\"direct-download\":true,\"latest-result\":0,\"attempt-timestamp\":\"2023-09-05T16:23:35-0600\",\"successful-download\":true,\"success-timestamp\":\"2023-09-05T16:23:34-0600\"},"
- "{\"basename\":\"e\",\"direct-download\":true,\"latest-result\":1,\"attempt-timestamp\":\"2023-09-05T16:23:37-0600\"}]},"
- "{\"basename\":\"x.y.z\",\"children\":["
- "{\"basename\":\"w\",\"direct-download\":true,\"latest-result\":0,\"attempt-timestamp\":\"2023-09-05T16:23:31-0600\",\"successful-download\":true,\"success-timestamp\":\"2023-09-05T16:23:30-0600\"}]}]},"
- "{\"basename\":\"https\",\"children\":["
- "{\"basename\":\"a\",\"children\":["
- "{\"basename\":\"b\",\"direct-download\":true,\"latest-result\":1,\"attempt-timestamp\":\"2023-09-05T16:23:37-0600\",\"successful-download\":true,\"success-timestamp\":\"2023-09-05T16:23:36-0600\",\"is-file\":true},"
- "{\"basename\":\"c\",\"direct-download\":true,\"latest-result\":0,\"attempt-timestamp\":\"2023-09-05T16:23:39-0600\",\"successful-download\":true,\"success-timestamp\":\"2023-09-05T16:23:38-0600\",\"is-file\":true}]}]}]",
- str);
+ "[{\"url\":\"rsync://a.b.c/d\",\"attempt-timestamp\":\"1969-12-31T18:00:00-0600\",\"attempt-result\":0,\"success-timestamp\":\"1969-12-31T18:00:00-0600\"},"
+ "{\"url\":\"rsync://a.b.c/e\",\"attempt-timestamp\":\"1969-12-31T18:00:00-0600\",\"attempt-result\":1},"
+ "{\"url\":\"rsync://x.y.z/e\",\"attempt-timestamp\":\"1969-12-31T18:00:00-0600\",\"attempt-result\":0,\"success-timestamp\":\"1969-12-31T18:00:00-0600\"},"
+ "{\"url\":\"https://a/b\",\"attempt-timestamp\":\"1969-12-31T18:00:00-0600\",\"attempt-result\":1,\"success-timestamp\":\"1969-12-31T18:00:00-0600\"},"
+ "{\"url\":\"https://a/c\",\"attempt-timestamp\":\"1969-12-31T18:00:00-0600\",\"attempt-result\":0,\"success-timestamp\":\"1969-12-31T18:00:00-0600\"}]",
+ str);
+ printf("%s", str);
free(str);
cache_reset(cache);
load_metadata_json(cache);
- ck_assert_ptr_nonnull(cache->rsync);
- ck_assert_ptr_nonnull(cache->https);
-
- validate_trees(cache->rsync,
- TNODE("rsync", 0, 0, 0, 0,
- TNODE("a.b.c", 0, 0, 0, 0,
- TNODE("d", SUCCESS, NOW + 4, NOW + 5, 0),
- TNODE("e", CNF_DIRECT, NOW + 6, NOW + 7, 1)),
- TNODE("x.y.z", 0, 0, 0, 0,
- TNODE("w", SUCCESS, NOW + 0, NOW + 1, 0))),
- NULL);
- validate_trees(cache->https,
- TNODE("https", 0, 0, 0, 0,
- TNODE("a", 0, 0, 0, 0,
- TNODE("b", HTTP_SUCCESS, NOW + 6, NOW + 7, 1),
- TNODE("c", HTTP_SUCCESS, NOW + 8, NOW + 9, 0))),
- NULL);
-
- cache_destroy(cache);
-}
-END_TEST
+ ck_assert_ptr_nonnull(cache->ht);
-#define INIT(_root) \
- pb_init(&pb); \
- root = _root; \
- ctt_init(&ctt, cache, &root, &pb)
-#define DONE \
- delete_node(root); \
- pb_cleanup(&pb)
-
-#define ASSERT_NEXT_NODE(_basename, _path) \
- node = ctt_next(&ctt); \
- ck_assert_ptr_ne(NULL, node); \
- ck_assert_str_eq(_basename, node->basename); \
- ck_assert_str_eq(_path, pb.string)
-#define ASSERT_NEXT_NULL ck_assert_ptr_eq(NULL, ctt_next(&ctt))
-#define ASSERT_TREE(_root) validate_trees(root, _root, NULL)
-
-#define BRANCH(bn, ...) __NODE(bn, 0, 0, 0, 0, __VA_ARGS__, NULL)
-#define LEAF(bn) __NODE(bn, CNF_DIRECT, now, now, 0, NULL)
-
-START_TEST(test_ctt_traversal)
-{
- struct cache_tree_traverser ctt;
- struct path_builder pb;
- struct cache_node *root;
- struct cache_node *node;
- time_t now;
+ validate_cache(0,
+ NODE("rsync://a.b.c/d", 0, 1, 0),
+ NODE("rsync://a.b.c/e", 1, 0, 0),
+ NODE("rsync://x.y.z/e", 0, 1, 0),
+ NODE("https://a/b", 1, 1, 0),
+ NODE("https://a/c", 0, 1, 0),
+ NULL);
- setup_test();
-
- now = time(NULL);
- ck_assert_int_ne((time_t) -1, now);
-
- INIT(LEAF("a"));
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(LEAF("a"));
- DONE;
-
- INIT(BRANCH("a", LEAF("b")));
- ASSERT_NEXT_NODE("b", "a/b");
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(BRANCH("a", LEAF("b")));
- DONE;
-
- INIT(BRANCH("a",
- BRANCH("b",
- BRANCH("c",
- LEAF("d")))));
- ASSERT_NEXT_NODE("d", "a/b/c/d");
- ASSERT_NEXT_NODE("c", "a/b/c");
- ASSERT_NEXT_NODE("b", "a/b");
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(BRANCH("a",
- BRANCH("b",
- BRANCH("c",
- LEAF("d")))));
- DONE;
-
- INIT(BRANCH("a",
- LEAF("b"),
- BRANCH("c",
- LEAF("d")),
- LEAF("e")));
- ASSERT_NEXT_NODE("b", "a/b");
- ASSERT_NEXT_NODE("d", "a/c/d");
- ASSERT_NEXT_NODE("c", "a/c");
- ASSERT_NEXT_NODE("e", "a/e");
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(BRANCH("a",
- LEAF("b"),
- BRANCH("c",
- LEAF("d")),
- LEAF("e")));
- DONE;
-
- INIT(BRANCH("a",
- BRANCH("b",
- LEAF("c")),
- BRANCH("d",
- LEAF("e")),
- BRANCH("f",
- LEAF("g"))));
- ASSERT_NEXT_NODE("c", "a/b/c");
- ASSERT_NEXT_NODE("b", "a/b");
- ASSERT_NEXT_NODE("e", "a/d/e");
- ASSERT_NEXT_NODE("d", "a/d");
- ASSERT_NEXT_NODE("g", "a/f/g");
- ASSERT_NEXT_NODE("f", "a/f");
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(BRANCH("a",
- BRANCH("b",
- LEAF("c")),
- BRANCH("d",
- LEAF("e")),
- BRANCH("f",
- LEAF("g"))));
- DONE;
-
- INIT(BRANCH("a",
- BRANCH("b",
- LEAF("c")),
- BRANCH("d",
- LEAF("e")),
- BRANCH("f",
- BRANCH("g", NULL))));
- ASSERT_NEXT_NODE("c", "a/b/c");
- ASSERT_NEXT_NODE("b", "a/b");
- ASSERT_NEXT_NODE("e", "a/d/e");
- ASSERT_NEXT_NODE("d", "a/d");
- ASSERT_NEXT_NODE("a", "a");
- ASSERT_NEXT_NULL;
- ASSERT_TREE(BRANCH("a",
- BRANCH("b",
- LEAF("c")),
- BRANCH("d",
- LEAF("e"))));
- DONE;
-
- INIT(NULL);
- ASSERT_NEXT_NULL;
- ck_assert_ptr_eq(NULL, root);
- DONE;
-
- INIT(BRANCH("a", NULL));
- ASSERT_NEXT_NULL;
- ck_assert_ptr_eq(NULL, root);
- DONE;
-
- INIT(BRANCH("a",
- BRANCH("b", NULL),
- BRANCH("c", NULL),
- BRANCH("d", NULL)));
- ASSERT_NEXT_NULL;
- ck_assert_ptr_eq(NULL, root);
- DONE;
-
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
type = UT_RSYNC;
else
ck_abort_msg("Bad protocol: %s", str);
- ck_assert_int_eq(0, uri_create(&uri, "test.tal", type, NULL, str));
+ ck_assert_int_eq(0, uri_create(&uri, TAL_FILE, type, NULL, str));
uris_add(uris, uri);
}
va_end(args);
/* Only first URI is cached */
cache_reset(cache);
- download_rsync("rsync://a/b/c", 0, 1);
+ run_cache_download("rsync://a/b/c", 0, 1, 0);
PREPARE_URI_LIST(&uris, "rsync://a/b/c", "https://d/e", "https://f");
ck_assert_ptr_eq(uris.array[0], cache_recover(cache, &uris, false));
/* Only second URI is cached */
cache_reset(cache);
- download_https("https://d/e", 0, 1);
+ run_cache_download("https://d/e", 0, 0, 1);
PREPARE_URI_LIST(&uris, "rsync://a/b/c", "https://d/e", "https://f");
ck_assert_ptr_eq(uris.array[1], cache_recover(cache, &uris, false));
/* Only third URI is cached */
cache_reset(cache);
- download_https("https://f", 0, 1);
+ run_cache_download("https://f", 0, 0, 1);
PREPARE_URI_LIST(&uris, "rsync://a/b/c", "https://d/e", "https://f");
ck_assert_ptr_eq(uris.array[2], cache_recover(cache, &uris, false));
/* None was cached */
cache_reset(cache);
- download_rsync("rsync://d/e", 0, 1);
+ run_cache_download("rsync://d/e", 0, 1, 0);
PREPARE_URI_LIST(&uris, "rsync://a/b/c", "https://d/e", "https://f");
ck_assert_ptr_null(cache_recover(cache, &uris, false));
* or deltas.
*/
cache_reset(cache);
- cache->rsync = NODE("rsync", 0, 0,
- NODE("a", 0, 0,
- TNODE("1", SUCCESS, 100, 100, 0),
- TNODE("2", SUCCESS, 100, 100, 1),
- TNODE("3", SUCCESS, 100, 200, 0),
- TNODE("4", SUCCESS, 100, 200, 1),
- TNODE("5", SUCCESS, 200, 100, 0),
- TNODE("6", SUCCESS, 200, 100, 1)),
- NODE("b", 0, 0,
- TNODE("1", CNF_DIRECT, 100, 100, 0),
- TNODE("2", CNF_DIRECT, 100, 100, 1),
- TNODE("3", CNF_DIRECT, 100, 200, 0),
- TNODE("4", CNF_DIRECT, 100, 200, 1),
- TNODE("5", CNF_DIRECT, 200, 100, 0),
- TNODE("6", CNF_DIRECT, 200, 100, 1)),
- TNODE("c", SUCCESS, 300, 300, 0,
- TNODE("1", 0, 0, 0, 0)),
- TNODE("d", SUCCESS, 50, 50, 0,
- TNODE("1", 0, 0, 0, 0)));
+
+ add_node(cache, node("rsync://a/1", 100, 0, 1, 100));
+ add_node(cache, node("rsync://a/2", 100, 1, 1, 100));
+ add_node(cache, node("rsync://a/3", 200, 0, 1, 100));
+ add_node(cache, node("rsync://a/4", 200, 1, 1, 100));
+ add_node(cache, node("rsync://a/5", 100, 0, 1, 200));
+ add_node(cache, node("rsync://a/6", 100, 1, 1, 200));
+ add_node(cache, node("rsync://b/1", 100, 0, 0, 100));
+ add_node(cache, node("rsync://b/2", 100, 1, 0, 100));
+ add_node(cache, node("rsync://b/3", 200, 0, 0, 100));
+ add_node(cache, node("rsync://b/4", 200, 1, 0, 100));
+ add_node(cache, node("rsync://b/5", 100, 0, 0, 200));
+ add_node(cache, node("rsync://b/6", 100, 1, 0, 200));
/* Multiple successful caches: Prioritize the most recent one */
PREPARE_URI_LIST(&uris, "rsync://a/1", "rsync://a/3", "rsync://a/5");
ck_assert_ptr_null(cache_recover(cache, &uris, false));
uris_cleanup(&uris);
- /* Children of downloaded nodes */
- PREPARE_URI_LIST(&uris, "rsync://a/5", "rsync://c/1");
- ck_assert_ptr_eq(uris.array[1], cache_recover(cache, &uris, false));
- uris_cleanup(&uris);
-
- PREPARE_URI_LIST(&uris, "rsync://a/5", "rsync://c/2");
- ck_assert_ptr_eq(uris.array[1], cache_recover(cache, &uris, false));
- uris_cleanup(&uris);
-
- PREPARE_URI_LIST(&uris, "rsync://a/1", "rsync://d/1");
- ck_assert_ptr_eq(uris.array[0], cache_recover(cache, &uris, false));
- uris_cleanup(&uris);
-
- PREPARE_URI_LIST(&uris, "rsync://a/1", "rsync://d/2");
- ck_assert_ptr_eq(uris.array[0], cache_recover(cache, &uris, false));
- uris_cleanup(&uris);
-
/* Try them all at the same time */
PREPARE_URI_LIST(&uris,
"rsync://a", "rsync://a/1", "rsync://a/2", "rsync://a/3",
"rsync://a/4", "rsync://a/5", "rsync://a/6",
"rsync://b", "rsync://b/1", "rsync://b/2", "rsync://b/3",
"rsync://b/4", "rsync://b/5", "rsync://b/6",
- "rsync://c/2", "rsync://d/1", "rsync://e/1");
- ck_assert_ptr_eq(uris.array[14], cache_recover(cache, &uris, false));
+ "rsync://e/1");
+ ck_assert_ptr_eq(uris.array[5], cache_recover(cache, &uris, false));
uris_cleanup(&uris);
-
- struct uri_and_node un = { 0 };
-
- cache_reset(cache);
- cache->rsync = NODE("rsync", 0, 0,
- TNODE("1", CNF_SUCCESS, 200, 200, 0,
- TNODE("2", CNF_DIRECT, 200, 200, 1,
- TNODE("3", SUCCESS, 100, 100, 1,
- TNODE("4", SUCCESS, 200, 200, 1,
- TNODE("5", SUCCESS, 100, 100, 0,
- TNODE("6", SUCCESS, 200, 200, 0)))))));
-
- /* Try them all at the same time */
- PREPARE_URI_LIST(&uris, "rsync://1/2/3/4/5/6");
- __cache_recover(cache, &uris, false, &un);
- ck_assert_ptr_eq(uris.array[0], un.uri);
- ck_assert_str_eq("6", un.node->basename);
- uris_cleanup(&uris);
-
- /* TODO (test) HTTP (non-recursive) */
- /* TODO (test) more variations */
- /* TODO (test) node with DIRECT, then not direct, then DIRECT */
-
- cache_destroy(cache);
+ cleanup_test();
}
END_TEST
Suite *thread_pool_suite(void)
{
Suite *suite;
- TCase *rsync , *https, *dot, *meta, *ctt;
+ TCase *rsync , *https, *dot, *meta, *recover;
rsync = tcase_create("rsync");
tcase_add_test(rsync, test_cache_download_rsync);
meta = tcase_create("metadata.json");
tcase_add_test(meta, test_metadata_json);
- ctt = tcase_create("ctt");
- tcase_add_test(ctt, test_ctt_traversal);
-
- ctt = tcase_create("recover");
- tcase_add_test(ctt, test_recover);
+ recover = tcase_create("recover");
+ tcase_add_test(recover, test_recover);
suite = suite_create("local-cache");
suite_add_tcase(suite, rsync);
suite_add_tcase(suite, https);
suite_add_tcase(suite, dot);
suite_add_tcase(suite, meta);
- suite_add_tcase(suite, ctt);
+ suite_add_tcase(suite, recover);
return suite;
}