#include "cache/local_cache.h"
#include <dirent.h> /* opendir(), readdir(), closedir() */
+#include <jansson.h>
#include <strings.h> /* strcasecmp */
#include <sys/types.h> /* opendir(), closedir(), stat() */
#include <sys/stat.h> /* stat() */
#include <sys/queue.h> /* STAILQ */
-#include <unistd.h> /* stat() */
#include <time.h>
-#include <jansson.h>
+#include <unistd.h> /* stat() */
#include "alloc.h"
#include "file.h"
#include "http/http.h"
#include "rsync/rsync.h"
+/*
+ * Please note: Some of the functions in this module (the ones that have to do
+ * with jansson, both inside and outside of it) are recursive.
+ *
+ * This is fine. Infinite recursion is prevented through path_builder's
+ * MAX_CAPACITY (which is currently defined as 4096), which has to be done
+ * anyway.
+ *
+ * And given that you need at least one character and one slash per directory
+ * level, the maximum allowed recursion level is 2048, which happens to align
+ * with jansson's JSON_PARSER_MAX_DEPTH. (Which is also something we can't
+ * change.)
+ *
+ * FIXME test max recursion
+ */
+
/* FIXME needs locking */
/*
static time_t startup_time; /* When we started the last validation */
-static bool
-is_root(struct cache_node *node)
-{
- return node->parent == NULL;
-}
-
/* Minimizes multiple evaluation */
static struct cache_node *
add_child(struct cache_node *parent, char const *basename)
return root;
}
-/* FIXME recursive */
static void
-delete_node(struct cache_node *node, bool force)
+__delete_node(struct cache_node *node)
{
- struct cache_node *child, *tmp;
+ if (node->parent != NULL)
+ HASH_DEL(node->parent->children, node);
+ free(node->basename);
+ free(node);
- HASH_ITER(hh, node->children, child, tmp)
- delete_node(child, force);
+ if (node == rsync)
+ rsync = NULL;
+ else if (node == https)
+ https = NULL;
+}
- if (force || !is_root(node)) {
- if (node->parent != NULL)
- HASH_DEL(node->parent->children, node);
- free(node->basename);
- free(node);
+static void
+delete_node(struct cache_node *node)
+{
+ struct cache_node *parent;
+
+ if (node == NULL)
+ return;
+
+ if (node->parent != NULL) {
+ HASH_DEL(node->parent->children, node);
+ node->parent = NULL;
}
+
+ do {
+ while (node->children != NULL)
+ node = node->children;
+ parent = node->parent;
+ __delete_node(node);
+ node = parent;
+ } while (node != NULL);
+}
+
+static int
+get_metadata_json_filename(char **filename)
+{
+ struct path_builder pb;
+ int error;
+
+ path_init(&pb);
+ path_append(&pb, config_get_local_repository());
+ path_append(&pb, "metadata.json");
+
+ error = path_compile(&pb, filename);
+ if (error) {
+ pr_op_err("Unable to build metadata.json's path: %s",
+ strerror(error));
+ }
+
+ return error;
}
static int
return node;
cancel:
- delete_node(node, true);
+ delete_node(node);
return NULL;
}
static void
load_metadata_json(void)
{
- /*
- * Note: Loading metadata.json is one of few things Fort can fail at
- * without killing itself. It's just a cache of a cache.
- */
-
- struct path_builder pb;
char *filename;
json_t *root;
json_error_t jerror;
-
struct cache_node *node;
size_t d;
- int error;
+ /*
+ * Note: Loading metadata.json is one of few things Fort can fail at
+ * without killing itself. It's just a cache of a cache.
+ */
- path_init(&pb);
- path_append(&pb, config_get_local_repository());
- path_append(&pb, "metadata.json");
- error = path_compile(&pb, &filename);
- if (error) {
- pr_op_err("Unable to build metadata.json's path: %s",
- strerror(error));
- goto end;
- }
+ if (get_metadata_json_filename(&filename) != 0)
+ return;
root = json_load_file(filename, 0, &jerror);
else {
pr_op_warn("Ignoring unrecognized json node '%s'.",
node->basename);
- delete_node(node, true);
+ delete_node(node);
}
}
end:
json_decref(root);
- if (rsync == NULL)
- rsync = init_root(rsync, "rsync");
- if (https == NULL)
- https = init_root(https, "https");
}
void
return (node->flags & CNF_DIRECT) && (startup_time <= node->ts_attempt);
}
-static void destroy_tree(struct cache_node *);
-
-/* FIXME recursive */
static void
drop_children(struct cache_node *node)
{
struct cache_node *child, *tmp;
HASH_ITER(hh, node->children, child, tmp)
- destroy_tree(child);
+ delete_node(child);
}
/**
switch (uri_get_type(uri)) {
case UT_RSYNC:
- node = rsync;
+ node = rsync = init_root(rsync, "rsync");
recursive = true;
break;
case UT_HTTPS:
- node = https;
+ node = https = init_root(https, "https");
recursive = false;
break;
default:
return error;
}
+static struct cache_node *
+find_next(struct cache_node *sibling, struct cache_node *parent)
+{
+ if (sibling != NULL)
+ return sibling;
+
+ while (parent != NULL) {
+ while (!parent->children) {
+ sibling = parent;
+ parent = sibling->parent;
+ delete_node(sibling);
+ if (parent == NULL)
+ return NULL;
+ }
+
+ sibling = parent->hh.next;
+ if (sibling)
+ return sibling;
+
+ parent = parent->parent;
+ }
+
+ return NULL;
+}
+
+static void cleanup_nodes(struct cache_node *node)
+{
+ struct cache_node *parent, *next;
+
+ while (node != NULL) {
+ if (was_recently_downloaded(node) && !node->error) {
+ drop_children(node);
+ node = find_next(node->hh.next, node->parent);
+ } else if (node->children) {
+ node = node->children;
+ } else {
+ parent = node->parent;
+ next = node->hh.next;
+ delete_node(node);
+ node = find_next(next, parent);
+ }
+ }
+}
+
+/*
+ * @force: ignore nonexistent files
+ */
static void
-path_rm_rf(struct path_builder *pb, char const *filename)
+path_rm_r(struct path_builder *pb, char const *filename, bool force)
{
char const *path;
int error;
}
error = file_rm_rf(path);
- if (error)
+ if (error && !force)
pr_op_err("Cannot delete %s: %s", path, strerror(error));
}
-/* FIXME recursive */
+/* Safe removal, Postorder */
+struct cache_tree_traverser {
+ struct path_builder *pb;
+ struct cache_node *next;
+ bool next_sibling;
+};
+
static void
-cleanup_recursive(struct cache_node *node, struct path_builder *pb)
+ctt_init(struct cache_tree_traverser *ctt, struct cache_node *node,
+ struct path_builder *pb)
{
+ if (node == NULL) {
+ ctt->next = NULL;
+ return;
+ }
+
+ while (node->children != NULL) {
+ /* FIXME We need to recover from path too long... */
+ path_append(pb, node->basename);
+ node = node->children;
+ }
+ path_append(pb, "a");
+ ctt->pb = pb;
+ ctt->next = node;
+ ctt->next_sibling = true;
+}
+
+static struct cache_node *
+ctt_next(struct cache_tree_traverser *ctt)
+{
+ struct cache_node *next = ctt->next;
+
+ if (next == NULL)
+ return NULL;
+
+ path_pop(ctt->pb, true);
+ if (ctt->next_sibling)
+ path_append(ctt->pb, next->basename);
+
+ if (next->hh.next != NULL) {
+ ctt->next = next->hh.next;
+ ctt->next_sibling = true;
+ } else {
+ ctt->next = next->parent;
+ ctt->next_sibling = false;
+ }
+
+ return next;
+}
+
+static void cleanup_files(struct cache_node *node, char const *name)
+{
+ struct cache_tree_traverser ctt;
+ struct path_builder pb;
char const *path;
struct stat meta;
DIR *dir;
struct cache_node *child, *tmp;
int error;
- /* FIXME We need to recover from path too long... */
- path_append(pb, node->basename);
- error = path_peek(pb, &path);
- if (error) {
- pr_op_err("Cannot clean up directory (basename is '%s'): %s",
- node->basename, strerror(error));
- goto end;
+ path_init(&pb);
+ path_append(&pb, config_get_local_repository());
+
+ if (node == NULL) {
+ /* File might exist but node doesn't: Delete file */
+ path_append(&pb, name);
+ path_rm_r(&pb, name, true);
+ path_cancel(&pb);
+ return;
}
- if (stat(path, &meta) != 0) {
- error = errno;
- if (error == ENOENT) {
- /* Node exists but file doesn't: Delete node */
- delete_node(node, false);
- goto end;
+ ctt_init(&ctt, node, &pb);
+
+ while ((node = ctt_next(&ctt)) != NULL) {
+ error = path_peek(&pb, &path);
+ if (error) {
+ pr_op_err("Cannot clean up directory (basename is '%s'): %s",
+ node->basename, strerror(error));
+ break;
}
- pr_op_err("Cannot clean up '%s'; stat() returned errno %d: %s",
- path, error, strerror(error));
- goto end;
- }
+ if (stat(path, &meta) != 0) {
+ error = errno;
+ if (error == ENOENT) {
+ /* Node exists but file doesn't: Delete node */
+ delete_node(node);
+ continue;
+ }
- if (was_recently_downloaded(node) && !node->error)
- goto end; /* Node is active (ie. used recently): Keep it. */
+ pr_op_err("Cannot clean up '%s'; stat() returned errno %d: %s",
+ path, error, strerror(error));
+ break;
+ }
- /*
- * From now on, file exists but node is stale.
- * We'll aim to delete both.
- */
+ if (!node->children)
+ continue; /* Node represents file, file does exist. */
+ /* Node represents directory. */
- if (S_ISREG(meta.st_mode)) {
- /* Both node and file exist, but inactive: Delete */
- remove(path);
- delete_node(node, false);
+ if (!S_ISDIR(meta.st_mode)) {
+ /* File is not a directory; welp. */
+ remove(path);
+ delete_node(node);
+ }
- } else if (S_ISDIR(meta.st_mode)) {
dir = opendir(path);
if (dir == NULL) {
error = errno;
pr_op_err("Cannot clean up '%s'; S_ISDIR() but !opendir(): %s",
path, strerror(error));
- goto end;
+ continue; /* AAAAAAAAAAAAAAAAAH */
}
- /*
- * Directory exists but node is stale.
- * A child might be fresh, so recurse.
- */
-
FOREACH_DIR_FILE(dir, file) {
if (S_ISDOTS(file))
continue;
HASH_FIND_STR(node->children, file->d_name, child);
if (child != NULL) {
child->flags |= CNF_FOUND;
- /* File child's node does exist: Recurse. */
- cleanup_recursive(child, pb);
} else {
/* File child's node does not exist: Delete. */
- path_append(pb, file->d_name);
- path_rm_rf(pb, file->d_name);
- path_pop(pb, true);
+ path_append(&pb, file->d_name);
+ path_rm_r(&pb, file->d_name, false);
+ path_pop(&pb, true);
}
-
}
+
error = errno;
closedir(dir);
if (error) {
pr_op_err("Cannot clean up directory (basename is '%s'): %s",
node->basename, strerror(error));
- goto end;
+ HASH_ITER(hh, node->children, child, tmp)
+ child->flags &= ~CNF_FOUND;
+ continue; /* AAAAAAAAAAAAAAAAAH */
}
HASH_ITER(hh, node->children, child, tmp) {
child->flags &= ~CNF_FOUND;
} else {
/* Node child's file does not exist: Delete. */
- delete_node(child, false);
+ delete_node(child);
}
}
- if (node->children == NULL && !is_root(node)) {
+ if (node->children == NULL) {
/* Node is inactive and we rm'd its children: Delete. */
- path_rm_rf(pb, node->basename);
- delete_node(node, false);
+ path_rm_r(&pb, node->basename, false);
+ delete_node(node);
}
-
- } else {
- /* Outdated, not file nor directory: Delete. */
- remove(path);
- delete_node(node, false);
}
-end:
- path_pop(pb, true);
+ path_cancel(&pb);
}
static int
return 0;
}
-/* FIXME recursive */
static json_t *
node2json(struct cache_node *node)
{
{
json_t *child;
+ if (node == NULL)
+ return 0;
child = node2json(node);
if (child == NULL)
return -1;
}
static void
-write_metadata_json(char const *filename)
+write_metadata_json(void)
{
struct json_t *json;
+ char *filename;
json = build_metadata_json();
if (json == NULL)
return;
+ if (get_metadata_json_filename(&filename) != 0)
+ return;
+
if (json_dump_file(json, filename, JSON_COMPACT))
pr_op_err("Unable to write metadata.json; unknown cause.");
+ free(filename);
json_decref(json);
}
void
cache_cleanup(void)
{
- struct path_builder pb;
- char const *json_filename;
- int error;
-
- path_init(&pb);
- path_append(&pb, config_get_local_repository());
-
- cleanup_recursive(rsync, &pb);
- cleanup_recursive(https, &pb);
-
- path_append(&pb, "metadata.json");
- error = path_peek(&pb, &json_filename);
- if (error)
- pr_op_err("Cannot create metadata.json: %s", strerror(error));
- else
- write_metadata_json(json_filename);
-
- path_cancel(&pb);
-}
+ cleanup_nodes(rsync);
+ cleanup_files(rsync, "rsync");
-/* FIXME recursive */
-static void
-destroy_tree(struct cache_node *node)
-{
- if (node == NULL)
- return;
+ cleanup_nodes(https);
+ cleanup_files(https, "https");
- free(node->basename);
- drop_children(node);
- if (node->parent != NULL)
- HASH_DEL(node->parent->children, node);
- free(node);
+ write_metadata_json();
}
void
cache_teardown(void)
{
- destroy_tree(rsync);
- destroy_tree(https);
+ delete_node(rsync);
+ rsync = NULL;
+ delete_node(https);
+ https = NULL;
}
{
struct cache_node *expected_child, *actual_child, *tmp;
+ if (expected == NULL) {
+ ck_assert_ptr_eq(NULL, actual);
+ return;
+ }
+
ck_assert_str_eq(expected->basename, actual->basename);
ck_assert_int_eq(expected->flags, actual->flags);
if (expected->flags & CNF_DIRECT) {
}
static void
-validate_trees(struct cache_node *nodes, struct cache_node *files)
+validate_trees(struct cache_node *actual, struct cache_node *nodes,
+ struct cache_node *files)
{
- struct cache_node *actual;
struct path_builder pb;
- if (is_rsync(nodes))
- actual = rsync;
- else if (is_https(nodes))
- actual = https;
- else
- ck_abort_msg("unknown root node: %s", nodes->basename);
-
printf("------------------------------\n");
printf("Expected nodes:\n");
print_tree(nodes, 1);
path_cancel(&pb);
- destroy_tree(nodes);
+ delete_node(nodes);
if (nodes != files)
- destroy_tree(files);
+ delete_node(files);
}
static void
-validate_tree(struct cache_node *expected)
+validate_tree(struct cache_node *actual, struct cache_node *expected)
{
- validate_trees(expected, expected);
+ validate_trees(actual, expected, expected);
}
static void
backtrack_times(struct cache_node *node)
{
struct cache_node *child, *tmp;
+
+ if (node == NULL)
+ return;
+
node->ts_success -= 1000;
node->ts_attempt -= 1000;
HASH_ITER(hh, node->children, child, tmp)
cache_prepare();
download_rsync("rsync://a.b.c/d/e", 0, 1);
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", 0, 0,
/* Redownload same file, nothing should happen */
download_rsync("rsync://a.b.c/d/e", 0, 0);
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", 0, 0,
* e/f.
*/
download_rsync("rsync://a.b.c/d/e/f", 0, 0);
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", 0, 0,
* while the filesystem will not.
*/
download_rsync("rsync://a.b.c/d", 0, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0))),
);
download_rsync("rsync://a.b.c/e", 0, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
);
download_rsync("rsync://x.y.z/e", 0, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
download_rsync("rsync://a.b.c/d", 0, 1);
dl_error = true;
download_rsync("rsync://a.b.c/e", -EINVAL, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
/* Regardless of error, not reattempted because same iteration */
dl_error = true;
download_rsync("rsync://a.b.c/e", -EINVAL, 0);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
dl_error = false;
download_rsync("rsync://a.b.c/e", -EINVAL, 0);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
download_rsync("rsync://a.b.c/d", 0, 1);
download_rsync("rsync://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
download_rsync("rsync://a.b.c/d", 0, 1);
download_rsync("rsync://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
download_rsync("rsync://a.b.c/e", 0, 1);
download_rsync("rsync://a.b.c/f", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
__cache_prepare();
download_rsync("rsync://a.b.c/d", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0))));
__cache_prepare();
download_rsync("rsync://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0))));
__cache_prepare();
download_rsync("rsync://a.b.c/e/f/g", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0,
__cache_prepare();
download_rsync("rsync://a.b.c/e/f", 0, 1);
cache_cleanup();
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0,
__cache_prepare();
download_rsync("rsync://a.b.c/e", 0, 1);
cache_cleanup();
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0))),
/* Empty the tree */
__cache_prepare();
cache_cleanup();
- validate_tree(NODE("rsync", 0, 0));
+ validate_tree(rsync, NULL);
/* Node exists, but file doesn't */
__cache_prepare();
download_rsync("rsync://a.b.c/e", 0, 1);
download_rsync("rsync://a.b.c/f/g/h", 0, 1);
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0),
NODE("h", SUCCESS, 0))))));
ck_assert_int_eq(0, system("rm -rf tmp/rsync/a.b.c/f/g"));
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", SUCCESS, 0))));
download_rsync("rsync://a.b.c/d", 0, 1);
dl_error = true;
download_rsync("rsync://a.b.c/e", -EINVAL, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0),
* does have a file.
*/
cache_cleanup();
- validate_tree(
+ validate_tree(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", SUCCESS, 0))));
__cache_prepare();
dl_error = true;
download_rsync("rsync://a.b.c/d", -EINVAL, 1);
- validate_trees(
+ validate_trees(rsync,
NODE("rsync", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", CNF_DIRECT, -EINVAL))),
/* Clean up d because of error */
cache_cleanup();
- validate_tree(NODE("rsync", 0, 0));
+ validate_tree(rsync, NULL);
cache_teardown();
}
/* Download *file* e. */
download_https("https://a.b.c/d/e", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", 0, 0,
/* e is now a dir; need to replace it. */
download_https("https://a.b.c/d/e/f", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", 0, 0,
/* d is now a file; need to replace it. */
download_https("https://a.b.c/d", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0))));
/* Download something else 1 */
download_https("https://a.b.c/e", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
/* Download something else 2 */
download_https("https://x.y.z/e", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
download_https("https://a.b.c/d", 0, 1);
dl_error = true;
download_https("https://a.b.c/e", -EINVAL, 1);
- validate_trees(
+ validate_trees(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
/* Regardless of error, not reattempted because same iteration */
dl_error = true;
download_https("https://a.b.c/e", -EINVAL, 0);
- validate_trees(
+ validate_trees(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
dl_error = false;
download_https("https://a.b.c/e", -EINVAL, 0);
- validate_trees(
+ validate_trees(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
download_https("https://a.b.c/d", 0, 1);
download_https("https://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
__cache_prepare();
download_https("https://a.b.c/d", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0))));
__cache_prepare();
download_https("https://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", HTTP_SUCCESS, 0))));
__cache_prepare();
download_https("https://a.b.c/e/f/g", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", 0, 0,
__cache_prepare();
download_https("https://a.b.c/e/f", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", 0, 0,
__cache_prepare();
download_https("https://a.b.c/e", 0, 1);
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", HTTP_SUCCESS, 0))));
/* Empty the tree */
__cache_prepare();
cache_cleanup();
- validate_tree(NODE("https", 0, 0));
+ validate_tree(https, NULL);
/* Node exists, but file doesn't */
__cache_prepare();
download_https("https://a.b.c/e", 0, 1);
download_https("https://a.b.c/f/g/h", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", HTTP_SUCCESS, 0),
NODE("h", HTTP_SUCCESS, 0))))));
ck_assert_int_eq(0, system("rm -rf tmp/https/a.b.c/f/g"));
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", HTTP_SUCCESS, 0))));
download_https("https://a.b.c/d", 0, 1);
dl_error = true;
download_https("https://a.b.c/e", -EINVAL, 1);
- validate_trees(
+ validate_trees(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0),
/* Deleted because file ENOENT. */
cache_cleanup();
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0))));
__cache_prepare();
dl_error = true;
download_https("https://a.b.c/d", -EINVAL, 1);
- validate_trees(
+ validate_trees(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", CNF_DIRECT, -EINVAL))),
/* Clean up d because of error */
cache_cleanup();
- validate_tree(NODE("https", 0, 0));
+ validate_tree(https, NULL);
cache_teardown();
}
cache_prepare();
download_https("https://a.b.c/d", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0))));
download_https("https://a.b.c/d/.", 0, 0);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("d", HTTP_SUCCESS, 0))));
download_https("https://a.b.c/d/..", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", HTTP_SUCCESS, 0)));
download_https("https://a.b.c/./d/../e", 0, 1);
- validate_tree(
+ validate_tree(https,
NODE("https", 0, 0,
NODE("a.b.c", 0, 0,
NODE("e", HTTP_SUCCESS, 0))));
rsync = https = NULL;
load_metadata_json();
- validate_trees(
+ validate_trees(rsync,
TNODE("rsync", 0, NOW + 0, NOW + 1, 0,
TNODE("a.b.c", 0, NOW + 2, NOW + 3, 0,
TNODE("d", SUCCESS, NOW + 4, NOW + 5, 0),
TNODE("x.y.z", 0, NOW + 8, NOW + 9, 0,
TNODE("w", SUCCESS, NOW + 0, NOW + 1, 0))),
NULL);
- validate_trees(
+ validate_trees(https,
TNODE("https", 0, NOW + 2, NOW + 3, 0,
TNODE("a", 0, NOW + 4, NOW + 5, 0,
TNODE("b", HTTP_SUCCESS, NOW + 6, NOW + 7, 0),
tcase_add_test(https, test_cache_cleanup_https_error);
dot = tcase_create("dot");
- tcase_add_test(https, test_dots);
+ tcase_add_test(dot, test_dots);
meta = tcase_create("metadata.json");
- tcase_add_test(https, test_metadata_json);
+ tcase_add_test(meta, test_metadata_json);
suite = suite_create("local-cache");
suite_add_tcase(suite, rsync);