From: Alberto Leiva Popper Date: Wed, 13 Sep 2023 18:13:15 +0000 (-0600) Subject: Remove recursion from cache's non-jansson ops X-Git-Tag: 1.6.0~69 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=cc2640d02c4f9b627e445a134bd7a49a25f08e81;p=thirdparty%2FFORT-validator.git Remove recursion from cache's non-jansson ops --- diff --git a/src/cache/local_cache.c b/src/cache/local_cache.c index 80a03b85..406f3068 100644 --- a/src/cache/local_cache.c +++ b/src/cache/local_cache.c @@ -3,13 +3,13 @@ #include "cache/local_cache.h" #include /* opendir(), readdir(), closedir() */ +#include #include /* strcasecmp */ #include /* opendir(), closedir(), stat() */ #include /* stat() */ #include /* STAILQ */ -#include /* stat() */ #include -#include +#include /* stat() */ #include "alloc.h" #include "file.h" @@ -19,6 +19,22 @@ #include "http/http.h" #include "rsync/rsync.h" +/* + * Please note: Some of the functions in this module (the ones that have to do + * with jansson, both inside and outside of it) are recursive. + * + * This is fine. Infinite recursion is prevented through path_builder's + * MAX_CAPACITY (which is currently defined as 4096), which has to be done + * anyway. + * + * And given that you need at least one character and one slash per directory + * level, the maximum allowed recursion level is 2048, which happens to align + * with jansson's JSON_PARSER_MAX_DEPTH. (Which is also something we can't + * change.) + * + * FIXME test max recursion + */ + /* FIXME needs locking */ /* @@ -71,12 +87,6 @@ static struct cache_node *https; static time_t startup_time; /* When we started the last validation */ -static bool -is_root(struct cache_node *node) -{ - return node->parent == NULL; -} - /* Minimizes multiple evaluation */ static struct cache_node * add_child(struct cache_node *parent, char const *basename) @@ -109,21 +119,59 @@ init_root(struct cache_node *root, char const *name) return root; } -/* FIXME recursive */ static void -delete_node(struct cache_node *node, bool force) +__delete_node(struct cache_node *node) { - struct cache_node *child, *tmp; + if (node->parent != NULL) + HASH_DEL(node->parent->children, node); + free(node->basename); + free(node); - HASH_ITER(hh, node->children, child, tmp) - delete_node(child, force); + if (node == rsync) + rsync = NULL; + else if (node == https) + https = NULL; +} - if (force || !is_root(node)) { - if (node->parent != NULL) - HASH_DEL(node->parent->children, node); - free(node->basename); - free(node); +static void +delete_node(struct cache_node *node) +{ + struct cache_node *parent; + + if (node == NULL) + return; + + if (node->parent != NULL) { + HASH_DEL(node->parent->children, node); + node->parent = NULL; } + + do { + while (node->children != NULL) + node = node->children; + parent = node->parent; + __delete_node(node); + node = parent; + } while (node != NULL); +} + +static int +get_metadata_json_filename(char **filename) +{ + struct path_builder pb; + int error; + + path_init(&pb); + path_append(&pb, config_get_local_repository()); + path_append(&pb, "metadata.json"); + + error = path_compile(&pb, filename); + if (error) { + pr_op_err("Unable to build metadata.json's path: %s", + strerror(error)); + } + + return error; } static int @@ -218,37 +266,26 @@ json2node(json_t *json, struct cache_node *parent) return node; cancel: - delete_node(node, true); + delete_node(node); return NULL; } static void load_metadata_json(void) { - /* - * Note: Loading metadata.json is one of few things Fort can fail at - * without killing itself. It's just a cache of a cache. - */ - - struct path_builder pb; char *filename; json_t *root; json_error_t jerror; - struct cache_node *node; size_t d; - int error; + /* + * Note: Loading metadata.json is one of few things Fort can fail at + * without killing itself. It's just a cache of a cache. + */ - path_init(&pb); - path_append(&pb, config_get_local_repository()); - path_append(&pb, "metadata.json"); - error = path_compile(&pb, &filename); - if (error) { - pr_op_err("Unable to build metadata.json's path: %s", - strerror(error)); - goto end; - } + if (get_metadata_json_filename(&filename) != 0) + return; root = json_load_file(filename, 0, &jerror); @@ -275,16 +312,12 @@ load_metadata_json(void) else { pr_op_warn("Ignoring unrecognized json node '%s'.", node->basename); - delete_node(node, true); + delete_node(node); } } end: json_decref(root); - if (rsync == NULL) - rsync = init_root(rsync, "rsync"); - if (https == NULL) - https = init_root(https, "https"); } void @@ -340,16 +373,13 @@ was_recently_downloaded(struct cache_node *node) return (node->flags & CNF_DIRECT) && (startup_time <= node->ts_attempt); } -static void destroy_tree(struct cache_node *); - -/* FIXME recursive */ static void drop_children(struct cache_node *node) { struct cache_node *child, *tmp; HASH_ITER(hh, node->children, child, tmp) - destroy_tree(child); + delete_node(child); } /** @@ -372,11 +402,11 @@ cache_download(struct rpki_uri *uri, bool *changed) switch (uri_get_type(uri)) { case UT_RSYNC: - node = rsync; + node = rsync = init_root(rsync, "rsync"); recursive = true; break; case UT_HTTPS: - node = https; + node = https = init_root(https, "https"); recursive = false; break; default: @@ -449,8 +479,55 @@ end: return error; } +static struct cache_node * +find_next(struct cache_node *sibling, struct cache_node *parent) +{ + if (sibling != NULL) + return sibling; + + while (parent != NULL) { + while (!parent->children) { + sibling = parent; + parent = sibling->parent; + delete_node(sibling); + if (parent == NULL) + return NULL; + } + + sibling = parent->hh.next; + if (sibling) + return sibling; + + parent = parent->parent; + } + + return NULL; +} + +static void cleanup_nodes(struct cache_node *node) +{ + struct cache_node *parent, *next; + + while (node != NULL) { + if (was_recently_downloaded(node) && !node->error) { + drop_children(node); + node = find_next(node->hh.next, node->parent); + } else if (node->children) { + node = node->children; + } else { + parent = node->parent; + next = node->hh.next; + delete_node(node); + node = find_next(next, parent); + } + } +} + +/* + * @force: ignore nonexistent files + */ static void -path_rm_rf(struct path_builder *pb, char const *filename) +path_rm_r(struct path_builder *pb, char const *filename, bool force) { char const *path; int error; @@ -463,14 +540,64 @@ path_rm_rf(struct path_builder *pb, char const *filename) } error = file_rm_rf(path); - if (error) + if (error && !force) pr_op_err("Cannot delete %s: %s", path, strerror(error)); } -/* FIXME recursive */ +/* Safe removal, Postorder */ +struct cache_tree_traverser { + struct path_builder *pb; + struct cache_node *next; + bool next_sibling; +}; + static void -cleanup_recursive(struct cache_node *node, struct path_builder *pb) +ctt_init(struct cache_tree_traverser *ctt, struct cache_node *node, + struct path_builder *pb) { + if (node == NULL) { + ctt->next = NULL; + return; + } + + while (node->children != NULL) { + /* FIXME We need to recover from path too long... */ + path_append(pb, node->basename); + node = node->children; + } + path_append(pb, "a"); + ctt->pb = pb; + ctt->next = node; + ctt->next_sibling = true; +} + +static struct cache_node * +ctt_next(struct cache_tree_traverser *ctt) +{ + struct cache_node *next = ctt->next; + + if (next == NULL) + return NULL; + + path_pop(ctt->pb, true); + if (ctt->next_sibling) + path_append(ctt->pb, next->basename); + + if (next->hh.next != NULL) { + ctt->next = next->hh.next; + ctt->next_sibling = true; + } else { + ctt->next = next->parent; + ctt->next_sibling = false; + } + + return next; +} + +static void cleanup_files(struct cache_node *node, char const *name) +{ + struct cache_tree_traverser ctt; + struct path_builder pb; char const *path; struct stat meta; DIR *dir; @@ -478,55 +605,58 @@ cleanup_recursive(struct cache_node *node, struct path_builder *pb) struct cache_node *child, *tmp; int error; - /* FIXME We need to recover from path too long... */ - path_append(pb, node->basename); - error = path_peek(pb, &path); - if (error) { - pr_op_err("Cannot clean up directory (basename is '%s'): %s", - node->basename, strerror(error)); - goto end; + path_init(&pb); + path_append(&pb, config_get_local_repository()); + + if (node == NULL) { + /* File might exist but node doesn't: Delete file */ + path_append(&pb, name); + path_rm_r(&pb, name, true); + path_cancel(&pb); + return; } - if (stat(path, &meta) != 0) { - error = errno; - if (error == ENOENT) { - /* Node exists but file doesn't: Delete node */ - delete_node(node, false); - goto end; + ctt_init(&ctt, node, &pb); + + while ((node = ctt_next(&ctt)) != NULL) { + error = path_peek(&pb, &path); + if (error) { + pr_op_err("Cannot clean up directory (basename is '%s'): %s", + node->basename, strerror(error)); + break; } - pr_op_err("Cannot clean up '%s'; stat() returned errno %d: %s", - path, error, strerror(error)); - goto end; - } + if (stat(path, &meta) != 0) { + error = errno; + if (error == ENOENT) { + /* Node exists but file doesn't: Delete node */ + delete_node(node); + continue; + } - if (was_recently_downloaded(node) && !node->error) - goto end; /* Node is active (ie. used recently): Keep it. */ + pr_op_err("Cannot clean up '%s'; stat() returned errno %d: %s", + path, error, strerror(error)); + break; + } - /* - * From now on, file exists but node is stale. - * We'll aim to delete both. - */ + if (!node->children) + continue; /* Node represents file, file does exist. */ + /* Node represents directory. */ - if (S_ISREG(meta.st_mode)) { - /* Both node and file exist, but inactive: Delete */ - remove(path); - delete_node(node, false); + if (!S_ISDIR(meta.st_mode)) { + /* File is not a directory; welp. */ + remove(path); + delete_node(node); + } - } else if (S_ISDIR(meta.st_mode)) { dir = opendir(path); if (dir == NULL) { error = errno; pr_op_err("Cannot clean up '%s'; S_ISDIR() but !opendir(): %s", path, strerror(error)); - goto end; + continue; /* AAAAAAAAAAAAAAAAAH */ } - /* - * Directory exists but node is stale. - * A child might be fresh, so recurse. - */ - FOREACH_DIR_FILE(dir, file) { if (S_ISDOTS(file)) continue; @@ -534,22 +664,22 @@ cleanup_recursive(struct cache_node *node, struct path_builder *pb) HASH_FIND_STR(node->children, file->d_name, child); if (child != NULL) { child->flags |= CNF_FOUND; - /* File child's node does exist: Recurse. */ - cleanup_recursive(child, pb); } else { /* File child's node does not exist: Delete. */ - path_append(pb, file->d_name); - path_rm_rf(pb, file->d_name); - path_pop(pb, true); + path_append(&pb, file->d_name); + path_rm_r(&pb, file->d_name, false); + path_pop(&pb, true); } - } + error = errno; closedir(dir); if (error) { pr_op_err("Cannot clean up directory (basename is '%s'): %s", node->basename, strerror(error)); - goto end; + HASH_ITER(hh, node->children, child, tmp) + child->flags &= ~CNF_FOUND; + continue; /* AAAAAAAAAAAAAAAAAH */ } HASH_ITER(hh, node->children, child, tmp) { @@ -562,24 +692,18 @@ cleanup_recursive(struct cache_node *node, struct path_builder *pb) child->flags &= ~CNF_FOUND; } else { /* Node child's file does not exist: Delete. */ - delete_node(child, false); + delete_node(child); } } - if (node->children == NULL && !is_root(node)) { + if (node->children == NULL) { /* Node is inactive and we rm'd its children: Delete. */ - path_rm_rf(pb, node->basename); - delete_node(node, false); + path_rm_r(&pb, node->basename, false); + delete_node(node); } - - } else { - /* Outdated, not file nor directory: Delete. */ - remove(path); - delete_node(node, false); } -end: - path_pop(pb, true); + path_cancel(&pb); } static int @@ -599,7 +723,6 @@ tt2json(time_t tt, json_t **result) return 0; } -/* FIXME recursive */ static json_t * node2json(struct cache_node *node) { @@ -690,6 +813,8 @@ append_node(json_t *root, struct cache_node *node, char const *name) { json_t *child; + if (node == NULL) + return 0; child = node2json(node); if (child == NULL) return -1; @@ -723,60 +848,42 @@ build_metadata_json(void) } static void -write_metadata_json(char const *filename) +write_metadata_json(void) { struct json_t *json; + char *filename; json = build_metadata_json(); if (json == NULL) return; + if (get_metadata_json_filename(&filename) != 0) + return; + if (json_dump_file(json, filename, JSON_COMPACT)) pr_op_err("Unable to write metadata.json; unknown cause."); + free(filename); json_decref(json); } void cache_cleanup(void) { - struct path_builder pb; - char const *json_filename; - int error; - - path_init(&pb); - path_append(&pb, config_get_local_repository()); - - cleanup_recursive(rsync, &pb); - cleanup_recursive(https, &pb); - - path_append(&pb, "metadata.json"); - error = path_peek(&pb, &json_filename); - if (error) - pr_op_err("Cannot create metadata.json: %s", strerror(error)); - else - write_metadata_json(json_filename); - - path_cancel(&pb); -} + cleanup_nodes(rsync); + cleanup_files(rsync, "rsync"); -/* FIXME recursive */ -static void -destroy_tree(struct cache_node *node) -{ - if (node == NULL) - return; + cleanup_nodes(https); + cleanup_files(https, "https"); - free(node->basename); - drop_children(node); - if (node->parent != NULL) - HASH_DEL(node->parent->children, node); - free(node); + write_metadata_json(); } void cache_teardown(void) { - destroy_tree(rsync); - destroy_tree(https); + delete_node(rsync); + rsync = NULL; + delete_node(https); + https = NULL; } diff --git a/src/cache/local_cache.h b/src/cache/local_cache.h index 9bf16ef4..bb041ca0 100644 --- a/src/cache/local_cache.h +++ b/src/cache/local_cache.h @@ -14,7 +14,6 @@ int cache_download(struct rpki_uri *uri, bool *); /* FIXME call this */ void cache_cleanup(void); -/* FIXME call this */ void cache_teardown(void); #endif /* SRC_CACHE_LOCAL_CACHE_H_ */ diff --git a/src/main.c b/src/main.c index f60cacb4..2b10c0c7 100644 --- a/src/main.c +++ b/src/main.c @@ -4,6 +4,7 @@ #include "nid.h" #include "thread_var.h" #include "validation_run.h" +#include "cache/local_cache.h" #include "http/http.h" #include "incidence/incidence.h" #include "rtr/rtr.h" @@ -121,5 +122,6 @@ revert_config: revert_log: log_teardown(); just_quit: + cache_teardown(); return convert_to_result(error); } diff --git a/test/cache/local_cache_test.c b/test/cache/local_cache_test.c index a2fa9ffe..c01a9267 100644 --- a/test/cache/local_cache_test.c +++ b/test/cache/local_cache_test.c @@ -170,6 +170,11 @@ validate_node(struct cache_node *expected, struct cache_node *expected_parent, { struct cache_node *expected_child, *actual_child, *tmp; + if (expected == NULL) { + ck_assert_ptr_eq(NULL, actual); + return; + } + ck_assert_str_eq(expected->basename, actual->basename); ck_assert_int_eq(expected->flags, actual->flags); if (expected->flags & CNF_DIRECT) { @@ -296,18 +301,11 @@ end: } static void -validate_trees(struct cache_node *nodes, struct cache_node *files) +validate_trees(struct cache_node *actual, struct cache_node *nodes, + struct cache_node *files) { - struct cache_node *actual; struct path_builder pb; - if (is_rsync(nodes)) - actual = rsync; - else if (is_https(nodes)) - actual = https; - else - ck_abort_msg("unknown root node: %s", nodes->basename); - printf("------------------------------\n"); printf("Expected nodes:\n"); print_tree(nodes, 1); @@ -330,21 +328,25 @@ validate_trees(struct cache_node *nodes, struct cache_node *files) path_cancel(&pb); - destroy_tree(nodes); + delete_node(nodes); if (nodes != files) - destroy_tree(files); + delete_node(files); } static void -validate_tree(struct cache_node *expected) +validate_tree(struct cache_node *actual, struct cache_node *expected) { - validate_trees(expected, expected); + validate_trees(actual, expected, expected); } static void backtrack_times(struct cache_node *node) { struct cache_node *child, *tmp; + + if (node == NULL) + return; + node->ts_success -= 1000; node->ts_attempt -= 1000; HASH_ITER(hh, node->children, child, tmp) @@ -370,7 +372,7 @@ START_TEST(test_cache_download_rsync) cache_prepare(); download_rsync("rsync://a.b.c/d/e", 0, 1); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", 0, 0, @@ -378,7 +380,7 @@ START_TEST(test_cache_download_rsync) /* Redownload same file, nothing should happen */ download_rsync("rsync://a.b.c/d/e", 0, 0); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", 0, 0, @@ -390,7 +392,7 @@ START_TEST(test_cache_download_rsync) * e/f. */ download_rsync("rsync://a.b.c/d/e/f", 0, 0); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", 0, 0, @@ -401,7 +403,7 @@ START_TEST(test_cache_download_rsync) * while the filesystem will not. */ download_rsync("rsync://a.b.c/d", 0, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0))), @@ -412,7 +414,7 @@ START_TEST(test_cache_download_rsync) ); download_rsync("rsync://a.b.c/e", 0, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -425,7 +427,7 @@ START_TEST(test_cache_download_rsync) ); download_rsync("rsync://x.y.z/e", 0, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -454,7 +456,7 @@ START_TEST(test_cache_download_rsync_error) download_rsync("rsync://a.b.c/d", 0, 1); dl_error = true; download_rsync("rsync://a.b.c/e", -EINVAL, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -466,7 +468,7 @@ START_TEST(test_cache_download_rsync_error) /* Regardless of error, not reattempted because same iteration */ dl_error = true; download_rsync("rsync://a.b.c/e", -EINVAL, 0); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -477,7 +479,7 @@ START_TEST(test_cache_download_rsync_error) dl_error = false; download_rsync("rsync://a.b.c/e", -EINVAL, 0); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -503,7 +505,7 @@ START_TEST(test_cache_cleanup_rsync) download_rsync("rsync://a.b.c/d", 0, 1); download_rsync("rsync://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -514,7 +516,7 @@ START_TEST(test_cache_cleanup_rsync) download_rsync("rsync://a.b.c/d", 0, 1); download_rsync("rsync://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -526,7 +528,7 @@ START_TEST(test_cache_cleanup_rsync) download_rsync("rsync://a.b.c/e", 0, 1); download_rsync("rsync://a.b.c/f", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -537,7 +539,7 @@ START_TEST(test_cache_cleanup_rsync) __cache_prepare(); download_rsync("rsync://a.b.c/d", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0)))); @@ -546,7 +548,7 @@ START_TEST(test_cache_cleanup_rsync) __cache_prepare(); download_rsync("rsync://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0)))); @@ -555,7 +557,7 @@ START_TEST(test_cache_cleanup_rsync) __cache_prepare(); download_rsync("rsync://a.b.c/e/f/g", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0, @@ -571,7 +573,7 @@ START_TEST(test_cache_cleanup_rsync) __cache_prepare(); download_rsync("rsync://a.b.c/e/f", 0, 1); cache_cleanup(); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0, @@ -586,7 +588,7 @@ START_TEST(test_cache_cleanup_rsync) __cache_prepare(); download_rsync("rsync://a.b.c/e", 0, 1); cache_cleanup(); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0))), @@ -599,13 +601,13 @@ START_TEST(test_cache_cleanup_rsync) /* Empty the tree */ __cache_prepare(); cache_cleanup(); - validate_tree(NODE("rsync", 0, 0)); + validate_tree(rsync, NULL); /* Node exists, but file doesn't */ __cache_prepare(); download_rsync("rsync://a.b.c/e", 0, 1); download_rsync("rsync://a.b.c/f/g/h", 0, 1); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0), @@ -614,7 +616,7 @@ START_TEST(test_cache_cleanup_rsync) NODE("h", SUCCESS, 0)))))); ck_assert_int_eq(0, system("rm -rf tmp/rsync/a.b.c/f/g")); cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("e", SUCCESS, 0)))); @@ -634,7 +636,7 @@ START_TEST(test_cache_cleanup_rsync_error) download_rsync("rsync://a.b.c/d", 0, 1); dl_error = true; download_rsync("rsync://a.b.c/e", -EINVAL, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0), @@ -650,7 +652,7 @@ START_TEST(test_cache_cleanup_rsync_error) * does have a file. */ cache_cleanup(); - validate_tree( + validate_tree(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", SUCCESS, 0)))); @@ -659,7 +661,7 @@ START_TEST(test_cache_cleanup_rsync_error) __cache_prepare(); dl_error = true; download_rsync("rsync://a.b.c/d", -EINVAL, 1); - validate_trees( + validate_trees(rsync, NODE("rsync", 0, 0, NODE("a.b.c", 0, 0, NODE("d", CNF_DIRECT, -EINVAL))), @@ -669,7 +671,7 @@ START_TEST(test_cache_cleanup_rsync_error) /* Clean up d because of error */ cache_cleanup(); - validate_tree(NODE("rsync", 0, 0)); + validate_tree(rsync, NULL); cache_teardown(); } @@ -684,7 +686,7 @@ START_TEST(test_cache_download_https) /* Download *file* e. */ download_https("https://a.b.c/d/e", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", 0, 0, @@ -692,7 +694,7 @@ START_TEST(test_cache_download_https) /* e is now a dir; need to replace it. */ download_https("https://a.b.c/d/e/f", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", 0, 0, @@ -701,14 +703,14 @@ START_TEST(test_cache_download_https) /* d is now a file; need to replace it. */ download_https("https://a.b.c/d", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0)))); /* Download something else 1 */ download_https("https://a.b.c/e", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -716,7 +718,7 @@ START_TEST(test_cache_download_https) /* Download something else 2 */ download_https("https://x.y.z/e", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -738,7 +740,7 @@ START_TEST(test_cache_download_https_error) download_https("https://a.b.c/d", 0, 1); dl_error = true; download_https("https://a.b.c/e", -EINVAL, 1); - validate_trees( + validate_trees(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -750,7 +752,7 @@ START_TEST(test_cache_download_https_error) /* Regardless of error, not reattempted because same iteration */ dl_error = true; download_https("https://a.b.c/e", -EINVAL, 0); - validate_trees( + validate_trees(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -761,7 +763,7 @@ START_TEST(test_cache_download_https_error) dl_error = false; download_https("https://a.b.c/e", -EINVAL, 0); - validate_trees( + validate_trees(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -784,7 +786,7 @@ START_TEST(test_cache_cleanup_https) download_https("https://a.b.c/d", 0, 1); download_https("https://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -794,7 +796,7 @@ START_TEST(test_cache_cleanup_https) __cache_prepare(); download_https("https://a.b.c/d", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0)))); @@ -803,7 +805,7 @@ START_TEST(test_cache_cleanup_https) __cache_prepare(); download_https("https://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", HTTP_SUCCESS, 0)))); @@ -814,7 +816,7 @@ START_TEST(test_cache_cleanup_https) __cache_prepare(); download_https("https://a.b.c/e/f/g", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", 0, 0, @@ -828,7 +830,7 @@ START_TEST(test_cache_cleanup_https) __cache_prepare(); download_https("https://a.b.c/e/f", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", 0, 0, @@ -838,7 +840,7 @@ START_TEST(test_cache_cleanup_https) __cache_prepare(); download_https("https://a.b.c/e", 0, 1); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", HTTP_SUCCESS, 0)))); @@ -846,13 +848,13 @@ START_TEST(test_cache_cleanup_https) /* Empty the tree */ __cache_prepare(); cache_cleanup(); - validate_tree(NODE("https", 0, 0)); + validate_tree(https, NULL); /* Node exists, but file doesn't */ __cache_prepare(); download_https("https://a.b.c/e", 0, 1); download_https("https://a.b.c/f/g/h", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", HTTP_SUCCESS, 0), @@ -861,7 +863,7 @@ START_TEST(test_cache_cleanup_https) NODE("h", HTTP_SUCCESS, 0)))))); ck_assert_int_eq(0, system("rm -rf tmp/https/a.b.c/f/g")); cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", HTTP_SUCCESS, 0)))); @@ -881,7 +883,7 @@ START_TEST(test_cache_cleanup_https_error) download_https("https://a.b.c/d", 0, 1); dl_error = true; download_https("https://a.b.c/e", -EINVAL, 1); - validate_trees( + validate_trees(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0), @@ -892,7 +894,7 @@ START_TEST(test_cache_cleanup_https_error) /* Deleted because file ENOENT. */ cache_cleanup(); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0)))); @@ -901,7 +903,7 @@ START_TEST(test_cache_cleanup_https_error) __cache_prepare(); dl_error = true; download_https("https://a.b.c/d", -EINVAL, 1); - validate_trees( + validate_trees(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", CNF_DIRECT, -EINVAL))), @@ -911,7 +913,7 @@ START_TEST(test_cache_cleanup_https_error) /* Clean up d because of error */ cache_cleanup(); - validate_tree(NODE("https", 0, 0)); + validate_tree(https, NULL); cache_teardown(); } @@ -925,24 +927,24 @@ START_TEST(test_dots) cache_prepare(); download_https("https://a.b.c/d", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0)))); download_https("https://a.b.c/d/.", 0, 0); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("d", HTTP_SUCCESS, 0)))); download_https("https://a.b.c/d/..", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", HTTP_SUCCESS, 0))); download_https("https://a.b.c/./d/../e", 0, 1); - validate_tree( + validate_tree(https, NODE("https", 0, 0, NODE("a.b.c", 0, 0, NODE("e", HTTP_SUCCESS, 0)))); @@ -993,7 +995,7 @@ START_TEST(test_metadata_json) rsync = https = NULL; load_metadata_json(); - validate_trees( + validate_trees(rsync, TNODE("rsync", 0, NOW + 0, NOW + 1, 0, TNODE("a.b.c", 0, NOW + 2, NOW + 3, 0, TNODE("d", SUCCESS, NOW + 4, NOW + 5, 0), @@ -1001,7 +1003,7 @@ START_TEST(test_metadata_json) TNODE("x.y.z", 0, NOW + 8, NOW + 9, 0, TNODE("w", SUCCESS, NOW + 0, NOW + 1, 0))), NULL); - validate_trees( + validate_trees(https, TNODE("https", 0, NOW + 2, NOW + 3, 0, TNODE("a", 0, NOW + 4, NOW + 5, 0, TNODE("b", HTTP_SUCCESS, NOW + 6, NOW + 7, 0), @@ -1030,10 +1032,10 @@ Suite *thread_pool_suite(void) tcase_add_test(https, test_cache_cleanup_https_error); dot = tcase_create("dot"); - tcase_add_test(https, test_dots); + tcase_add_test(dot, test_dots); meta = tcase_create("metadata.json"); - tcase_add_test(https, test_metadata_json); + tcase_add_test(meta, test_metadata_json); suite = suite_create("local-cache"); suite_add_tcase(suite, rsync);