Unstable.
fort_SOURCES += config/output_format.h config/output_format.c
fort_SOURCES += config/str.c config/str.h
fort_SOURCES += config/string_array.h config/string_array.c
+fort_SOURCES += config/time.h config/time.c
fort_SOURCES += config/types.h
fort_SOURCES += config/uint.c config/uint.h
fort_SOURCES += config/work_offline.c config/work_offline.h
fort_SOURCES += resource.h resource.c
fort_SOURCES += resource/ip4.h resource/ip4.c
fort_SOURCES += resource/ip6.h resource/ip6.c
-fort_SOURCES += rpp.h rpp.c
fort_SOURCES += rrdp.h rrdp.c
fort_SOURCES += rsync.h rsync.c
fort_SOURCES += rtr/db/db_table.c rtr/db/db_table.h
fort_SOURCES += types/name.h types/name.c
fort_SOURCES += types/path.h types/path.c
fort_SOURCES += types/router_key.c types/router_key.h
+fort_SOURCES += types/rpp.h types/rpp.c
fort_SOURCES += types/serial.h types/serial.c
fort_SOURCES += types/sorted_array.h types/sorted_array.c
fort_SOURCES += types/str.h types/str.c
dl: download
eof: end of file
err: error
+fb: (Cached) fallback
fd: File Descriptor (see `man 2 accept`)
hdr: header
hh: hash (table) hook
ht: hash table
id: identifier
len: length
+map: mapping (between a URL and a local path in the cache)
max: maximum
min: minimum
msg: message
sdata->certificates->list.count);
}
- error = handle_sdata_certificate(sdata->certificates->list.array[0],
+ return handle_sdata_certificate(sdata->certificates->list.array[0],
ee, sid, encoded, &sinfo->signature);
- if (error)
- return error;
-
- return 0;
}
/*
#include <ftw.h>
#include <stdbool.h>
+#include <sys/queue.h>
#include <sys/stat.h>
#include "alloc.h"
#include "configure_ac.h"
#include "file.h"
#include "http.h"
+#include "json_util.h"
#include "log.h"
-#include "rpp.h"
#include "rrdp.h"
#include "rsync.h"
+#include "types/array.h"
#include "types/path.h"
#include "types/url.h"
#include "types/uthash.h"
struct cache_node {
struct cache_mapping map;
+ /* XXX change to boolean? */
int fresh; /* Refresh already attempted? */
int dlerr; /* Result code of recent download attempt */
time_t mtim; /* Last successful download time, or zero */
struct cache_table {
char const *name;
bool enabled;
- unsigned int next_id;
- size_t pathlen;
+ struct cache_sequence seq;
struct cache_node *nodes; /* Hash Table */
dl_cb download;
};
struct cache_node *fallback;
};
-#define CACHE_METAFILE "cache.json"
-#define TAGNAME_VERSION "fort-version"
+struct cache_commit {
+ char *caRepository;
+ struct cache_mapping *files;
+ size_t nfiles;
+ STAILQ_ENTRY(cache_commit) lh;
+};
-#define CACHEDIR_TAG "CACHEDIR.TAG"
+STAILQ_HEAD(cache_commits, cache_commit) commits = STAILQ_HEAD_INITIALIZER(commits);
+#define CACHE_METAFILE "cache.json"
+#define TAGNAME_VERSION "fort-version"
#define TAL_METAFILE "tal.json"
-#define TAGNAME_TYPE "type"
-#define TAGNAME_URL "url"
-#define TAGNAME_ATTEMPT_TS "attempt-timestamp"
-#define TAGNAME_ATTEMPT_ERR "attempt-result"
-#define TAGNAME_SUCCESS_TS "success-timestamp"
-#define TAGNAME_NOTIF "notification"
-
-#define TYPEVALUE_TA_HTTP "TA (HTTP)"
-#define TYPEVALUE_RPP "RPP"
-#define TYPEVALUE_NOTIF "RRDP Notification"
#ifdef UNIT_TESTING
static void __delete_node_cb(struct cache_node const *);
HASH_DEL(tbl->nodes, node);
- free(node->map.url);
- free(node->map.path);
- if (node->rrdp)
- rrdp_state_cleanup(node->rrdp);
+ map_cleanup(&node->map);
+ rrdp_state_free(node->rrdp);
free(node);
}
return pb.string;
}
+char *
+get_rsync_module(char const *url)
+{
+ array_index u;
+ unsigned int slashes;
+
+ slashes = 0;
+ for (u = 0; url[u] != 0; u++)
+ if (url[u] == '/') {
+ slashes++;
+ if (slashes == 4)
+ return pstrndup(url, u);
+ }
+
+ if (slashes == 3 && url[u - 1] != '/')
+ return pstrdup(url);
+
+ pr_val_err("Url '%s' does not appear to have an rsync module.", url);
+ return NULL;
+}
+
+char const *
+strip_rsync_module(char const *url)
+{
+ array_index u;
+ unsigned int slashes;
+
+ slashes = 0;
+ for (u = 0; url[u] != 0; u++)
+ if (url[u] == '/') {
+ slashes++;
+ if (slashes == 4)
+ return url + u + 1;
+ }
+
+ return NULL;
+}
+
static int
write_simple_file(char const *filename, char const *content)
{
memset(tbl, 0, sizeof(*tbl));
tbl->name = name;
tbl->enabled = enabled;
- tbl->pathlen = strlen(config_get_local_repository()) + strlen(name) + 6;
+ cseq_init(&tbl->seq, path_join(config_get_local_repository(), name));
tbl->download = dl;
}
init_tables(void)
{
init_table(&cache.rsync, "rsync", config_get_rsync_enabled(), dl_rsync);
- init_table(&cache.rsync, "https", config_get_http_enabled(), dl_http);
- init_table(&cache.rsync, "rrdp", config_get_http_enabled(), dl_rrdp);
+ init_table(&cache.https, "https", config_get_http_enabled(), dl_http);
+ init_table(&cache.rrdp, "rrdp", config_get_http_enabled(), dl_rrdp);
init_table(&cache.fallback, "fallback", true, NULL);
}
static void
init_cache_metafile(void)
{
-// char *filename;
-// json_t *root;
-// json_error_t jerror;
-// char const *file_version;
-// int error;
-//
-// filename = get_cache_filename(CACHE_METAFILE, true);
-// root = json_load_file(filename, 0, &jerror);
-//
-// if (root == NULL) {
-// if (json_error_code(&jerror) == json_error_cannot_open_file)
-// pr_op_debug("%s does not exist.", filename);
-// else
-// pr_op_err("Json parsing failure at %s (%d:%d): %s",
-// filename, jerror.line, jerror.column, jerror.text);
-// goto invalid_cache;
-// }
-// if (json_typeof(root) != JSON_OBJECT) {
-// pr_op_err("The root tag of %s is not an object.", filename);
-// goto invalid_cache;
-// }
-//
-// error = json_get_str(root, TAGNAME_VERSION, &file_version);
-// if (error) {
-// if (error > 0)
-// pr_op_err("%s is missing the " TAGNAME_VERSION " tag.",
-// filename);
-// goto invalid_cache;
-// }
-//
-// if (strcmp(file_version, PACKAGE_VERSION) == 0)
-// goto end;
-//
-//invalid_cache:
-// pr_op_info("The cache appears to have been built by a different version of Fort. I'm going to clear it, just to be safe.");
-// file_rm_rf(config_get_local_repository());
-//
-//end: json_decref(root);
-// free(filename);
+ char *filename;
+ json_t *root;
+ json_error_t jerror;
+ char const *file_version;
+ int error;
+
+ filename = get_cache_filename(CACHE_METAFILE, true);
+ root = json_load_file(filename, 0, &jerror);
+
+ if (root == NULL) {
+ if (json_error_code(&jerror) == json_error_cannot_open_file)
+ pr_op_debug("%s does not exist.", filename);
+ else
+ pr_op_err("Json parsing failure at %s (%d:%d): %s",
+ filename, jerror.line, jerror.column, jerror.text);
+ goto invalid_cache;
+ }
+ if (json_typeof(root) != JSON_OBJECT) {
+ pr_op_err("The root tag of %s is not an object.", filename);
+ goto invalid_cache;
+ }
+
+ error = json_get_str(root, TAGNAME_VERSION, &file_version);
+ if (error) {
+ if (error > 0)
+ pr_op_err("%s is missing the " TAGNAME_VERSION " tag.",
+ filename);
+ goto invalid_cache;
+ }
+
+ if (strcmp(file_version, PACKAGE_VERSION) == 0)
+ goto end;
+
+invalid_cache:
+ pr_op_info("The cache appears to have been built by a different version of Fort. I'm going to clear it, just to be safe.");
+ file_rm_rf(config_get_local_repository());
+
+end: json_decref(root);
+ free(filename);
}
static void
{
char *filename;
- filename = get_cache_filename(CACHEDIR_TAG, false);
+ filename = get_cache_filename("CACHEDIR.TAG", false);
if (filename == NULL)
return;
int
cache_setup(void)
{
+ // XXX Lock the cache directory
init_tables();
init_cache_metafile();
init_tmp_dir();
free(filename);
}
-//static char *
-//get_tal_json_filename(void)
-//{
-// struct path_builder pb;
-// return pb_init_cache(&pb, TAL_METAFILE) ? NULL : pb.string;
-//}
-//
-//static struct cache_node *
-//json2node(json_t *json)
-//{
-// struct cache_node *node;
-// char const *type_str;
-// enum map_type type;
-// char const *url;
-// json_t *notif;
-// int error;
-//
-// node = pzalloc(sizeof(struct cache_node));
-//
-// error = json_get_str(json, TAGNAME_TYPE, &type_str);
-// if (error) {
-// if (error > 0)
-// pr_op_err("Node is missing the '" TAGNAME_TYPE "' tag.");
-// goto fail;
-// }
-//
-// if (strcmp(type_str, TYPEVALUE_TA_HTTP) == 0)
-// type = MAP_HTTP;
-// else if (strcmp(type_str, TYPEVALUE_RPP) == 0)
-// type = MAP_RSYNC;
-// else if (strcmp(type_str, TYPEVALUE_NOTIF) == 0)
-// type = MAP_NOTIF;
-// else {
-// pr_op_err("Unknown node type: %s", type_str);
-// goto fail;
-// }
-//
-// error = json_get_str(json, TAGNAME_URL, &url);
-// if (error) {
-// if (error > 0)
-// pr_op_err("Node is missing the '" TAGNAME_URL "' tag.");
-// goto fail;
-// }
-//
-// if (type == MAP_NOTIF) {
-// error = json_get_object(json, TAGNAME_NOTIF, ¬if);
-// switch (error) {
-// case 0:
-// error = rrdp_json2notif(notif, &node->notif);
-// if (error)
-// goto fail;
-// break;
-// case ENOENT:
-// node->notif = NULL;
-// break;
-// default:
-// goto fail;
-// }
-// }
-//
-// error = map_create(&node->map, type, url);
-// if (error) {
-// pr_op_err("Cannot parse '%s' into a URI.", url);
-// goto fail;
-// }
-//
-// error = json_get_ts(json, TAGNAME_ATTEMPT_TS, &node->attempt.ts);
-// if (error) {
-// if (error > 0)
-// pr_op_err("Node '%s' is missing the '"
-// TAGNAME_ATTEMPT_TS "' tag.", url);
-// goto fail;
-// }
-//
-// if (json_get_int(json, TAGNAME_ATTEMPT_ERR, &node->attempt.result) < 0)
-// goto fail;
-//
-// error = json_get_ts(json, TAGNAME_SUCCESS_TS, &node->success.ts);
-// if (error < 0)
-// goto fail;
-// node->success.happened = (error == 0);
-//
-// pr_op_debug("Node '%s' loaded successfully.", url);
-// return node;
-//
-//fail:
-// map_refput(node->map);
-// rrdp_notif_free(node->notif);
-// free(node);
-// return NULL;
-//}
+static char *
+get_tal_json_filename(void)
+{
+ struct path_builder pb;
+ return pb_init_cache(&pb, TAL_METAFILE) ? NULL : pb.string;
+}
+
+static struct cache_node *
+json2node(json_t *json)
+{
+ struct cache_node *node;
+ char const *str;
+ json_t *rrdp;
+ int error;
+
+ node = pzalloc(sizeof(struct cache_node));
+
+ if (json_get_str(json, "url", &str))
+ goto fail;
+ node->map.url = pstrdup(str);
+ if (json_get_str(json, "path", &str))
+ goto fail;
+ node->map.path = pstrdup(str);
+ if (json_get_int(json, "dlerr", &node->dlerr))
+ goto fail;
+ if (json_get_ts(json, "mtim", &node->mtim))
+ goto fail;
+ error = json_get_object(json, "rrdp", &rrdp);
+ if (error < 0)
+ goto fail;
+ if (error == 0 && rrdp_json2state(rrdp, &node->rrdp))
+ goto fail;
+
+ return node;
+
+fail: map_cleanup(&node->map);
+ return NULL;
+}
+
+static void
+json2tbl(json_t *root, struct cache_table *tbl)
+{
+ json_t *array, *child;
+ int index;
+ struct cache_node *node;
+ size_t urlen;
+
+ // XXX load (and save) seqs
+ if (json_get_ulong(root, "next", &tbl->seq.next_id))
+ return;
+ if (json_get_array(root, tbl->name, &array))
+ return;
+
+ json_array_foreach(array, index, child) {
+ node = json2node(child);
+ if (node == NULL)
+ continue;
+ urlen = strlen(node->map.url);
+ // XXX worry about dupes
+ HASH_ADD_KEYPTR(hh, tbl->nodes, node->map.url, urlen, node);
+ }
+}
static void
load_tal_json(void)
{
-// char *filename;
-// json_t *root;
-// json_error_t jerror;
-// size_t n;
-// struct cache_node *node;
-//
-// /*
-// * Note: Loading TAL_METAFILE is one of few things Fort can fail at
-// * without killing itself. It's just a cache of a cache.
-// */
-//
-// filename = get_tal_json_filename();
-// if (filename == NULL)
-// return;
-//
-// pr_op_debug("Loading %s.", filename);
-//
-// root = json_load_file(filename, 0, &jerror);
-//
-// if (root == NULL) {
-// if (json_error_code(&jerror) == json_error_cannot_open_file)
-// pr_op_debug("%s does not exist.", filename);
-// else
-// pr_op_err("Json parsing failure at %s (%d:%d): %s",
-// filename, jerror.line, jerror.column, jerror.text);
-// goto end;
-// }
-// if (json_typeof(root) != JSON_ARRAY) {
-// pr_op_err("The root tag of %s is not an array.", filename);
-// goto end;
-// }
-//
-// for (n = 0; n < json_array_size(root); n++) {
-// node = json2node(json_array_get(root, n));
-// if (node != NULL)
-// add_node(cache, node);
-// }
-//
-//end: json_decref(root);
-// free(filename);
+ char *filename;
+ json_t *root;
+ json_error_t jerror;
+
+ /*
+ * Note: Loading TAL_METAFILE is one of few things Fort can fail at
+ * without killing itself. It's just a cache of a cache.
+ */
+
+ filename = get_tal_json_filename();
+ if (filename == NULL)
+ return;
+
+ pr_op_debug("Loading %s.", filename);
+
+ root = json_load_file(filename, 0, &jerror);
+
+ if (root == NULL) {
+ if (json_error_code(&jerror) == json_error_cannot_open_file)
+ pr_op_debug("%s does not exist.", filename);
+ else
+ pr_op_err("Json parsing failure at %s (%d:%d): %s",
+ filename, jerror.line, jerror.column, jerror.text);
+ goto end;
+ }
+ if (json_typeof(root) != JSON_OBJECT) {
+ pr_op_err("The root tag of %s is not an object.", filename);
+ goto end;
+ }
+
+ json2tbl(root, &cache.rsync);
+ json2tbl(root, &cache.https);
+ json2tbl(root, &cache.rrdp);
+ json2tbl(root, &cache.fallback);
+
+end: json_decref(root);
+ free(filename);
}
void
cache_prepare(void)
{
- memset(&cache, 0, sizeof(cache));
load_tal_json();
}
-//static json_t *
-//node2json(struct cache_node *node)
-//{
-// json_t *json;
-// char const *type;
-// json_t *notification;
-//
-// json = json_obj_new();
-// if (json == NULL)
-// return NULL;
-//
-// switch (map_get_type(node->map)) {
-// case MAP_HTTP:
-// type = TYPEVALUE_TA_HTTP;
-// break;
-// case MAP_RSYNC:
-// type = TYPEVALUE_RPP;
-// break;
-// case MAP_NOTIF:
-// type = TYPEVALUE_NOTIF;
-// break;
-// default:
-// goto cancel;
-// }
-//
-// if (json_add_str(json, TAGNAME_TYPE, type))
-// goto cancel;
-// if (json_add_str(json, TAGNAME_URL, map_get_url(node->map)))
-// goto cancel;
-// if (node->notif != NULL) {
-// notification = rrdp_notif2json(node->notif);
-// if (json_object_add(json, TAGNAME_NOTIF, notification))
-// goto cancel;
-// }
-// if (json_add_ts(json, TAGNAME_ATTEMPT_TS, node->attempt.ts))
-// goto cancel;
-// if (json_add_int(json, TAGNAME_ATTEMPT_ERR, node->attempt.result))
-// goto cancel;
-// if (node->success.happened)
-// if (json_add_ts(json, TAGNAME_SUCCESS_TS, node->success.ts))
-// goto cancel;
-//
-// return json;
-//
-//cancel:
-// json_decref(json);
-// return NULL;
-//}
-//
-//static json_t *
-//build_tal_json(struct rpki_cache *cache)
-//{
-// struct cache_node *node, *tmp;
-// json_t *root, *child;
-//
-// root = json_array_new();
-// if (root == NULL)
-// return NULL;
-//
-// HASH_ITER(hh, cache->ht, node, tmp) {
-// child = node2json(node);
-// if (child != NULL && json_array_append_new(root, child)) {
-// pr_op_err("Cannot push %s json node into json root; unknown cause.",
-// map_op_get_printable(node->map));
-// continue;
-// }
-// }
-//
-// return root;
-//}
+static json_t *
+node2json(struct cache_node *node)
+{
+ json_t *json;
+
+ json = json_obj_new();
+ if (json == NULL)
+ return NULL;
+
+ if (json_add_str(json, "url", node->map.url))
+ goto fail;
+ if (json_add_str(json, "path", node->map.path))
+ goto fail;
+ if (json_add_int(json, "dlerr", node->dlerr)) // XXX relevant?
+ goto fail;
+ if (json_add_ts(json, "mtim", node->mtim))
+ goto fail;
+ if (node->rrdp)
+ if (json_object_add(json, "rrdp", rrdp_state2json(node->rrdp)))
+ goto fail;
+
+ return json;
+
+fail: json_decref(json);
+ return NULL;
+}
+
+static json_t *
+tbl2json(struct cache_table *tbl)
+{
+ struct json_t *json, *nodes;
+ struct cache_node *node, *tmp;
+
+ json = json_obj_new();
+ if (!json)
+ return NULL;
+
+ if (json_add_ulong(json, "next", tbl->seq.next_id))
+ goto fail;
+
+ nodes = json_array_new();
+ if (!nodes)
+ goto fail;
+ if (json_object_add(json, "nodes", nodes))
+ goto fail;
+
+ HASH_ITER(hh, tbl->nodes, node, tmp)
+ if (json_array_add(nodes, node2json(node)))
+ goto fail;
+
+ return json;
+
+fail: json_decref(json);
+ return NULL;
+}
+
+static json_t *
+build_tal_json(void)
+{
+ json_t *json;
+
+ json = json_obj_new();
+ if (json == NULL)
+ return NULL;
+
+ if (json_object_add(json, "rsync", tbl2json(&cache.rsync)))
+ goto fail;
+ if (json_object_add(json, "https", tbl2json(&cache.https)))
+ goto fail;
+ if (json_object_add(json, "rrdp", tbl2json(&cache.rrdp)))
+ goto fail;
+ if (json_object_add(json, "fallback", tbl2json(&cache.fallback)))
+ goto fail;
+
+ return json;
+
+fail: json_decref(json);
+ return NULL;
+}
static void
write_tal_json(void)
{
-// char *filename;
-// struct json_t *json;
-//
-// json = build_tal_json(cache);
-// if (json == NULL)
-// return;
-//
-// filename = get_tal_json_filename();
-// if (filename == NULL)
-// goto end;
-//
-// if (json_dump_file(json, filename, JSON_INDENT(2)))
-// pr_op_err("Unable to write %s; unknown cause.", filename);
-//
-//end: json_decref(json);
-// free(filename);
+ char *filename;
+ struct json_t *json;
+
+ json = build_tal_json();
+ if (json == NULL)
+ return;
+
+ filename = get_tal_json_filename();
+ if (filename == NULL)
+ goto end;
+
+ if (json_dump_file(json, filename, JSON_INDENT(2)))
+ pr_op_err("Unable to write %s; unknown cause.", filename);
+
+end: json_decref(json);
+ free(filename);
}
static int
{
int error;
- error = rsync_download(&module->map);
+ error = rsync_download(module->map.url, module->map.path);
if (error)
return error;
mtim = time_nonfatal();
- error = rrdp_update(¬if->map, notif->mtim, &changed, ¬if->rrdp);
+ error = rrdp_update(¬if->map, notif->mtim, &changed, &cache.rrdp.seq,
+ ¬if->rrdp);
if (error)
return error;
return node;
}
-static char *
-create_path(struct cache_table *tbl)
-{
- char *path;
- int len;
-
- do {
- path = pmalloc(tbl->pathlen);
-
- len = snprintf(path, tbl->pathlen, "%s/%s/%X",
- config_get_local_repository(), tbl->name, tbl->next_id);
- if (len < 0) {
- pr_val_err("Cannot compute new cache path: Unknown cause.");
- return NULL;
- }
- if (len < tbl->pathlen) {
- tbl->next_id++;
- return path; /* Happy path */
- }
-
- tbl->pathlen++;
- free(path);
- } while (true);
-}
-
static struct cache_node *
provide_node(struct cache_table *tbl, char const *url)
{
node = pzalloc(sizeof(struct cache_node));
node->map.url = pstrdup(url);
- node->map.path = create_path(tbl);
+ node->map.path = cseq_next(&tbl->seq);
if (!node->map.path) {
free(node->map.url);
free(node);
{
struct cache_node *node;
- if (!tbl->enabled)
- return NULL;
-
pr_val_debug("Trying %s (online)...", uri);
- node = provide_node(tbl, uri);
+ if (!tbl->enabled) {
+ pr_val_debug("Protocol disabled.");
+ return NULL;
+ }
+
+ if (tbl == &cache.rsync) {
+ char *module = get_rsync_module(uri);
+ if (module == NULL)
+ return NULL;
+ node = provide_node(tbl, module);
+ free(module);
+ } else {
+ node = provide_node(tbl, uri);
+ }
if (!node)
return NULL;
node = do_refresh(&cache.rsync, url);
// XXX Maybe strdup path so the caller can't corrupt our string
- return node ? node->map.path : NULL;
+ return (node && !node->dlerr) ? node->map.path : NULL;
}
/* Do not free nor modify the result. */
cache_fallback_url(char const *url)
{
struct cache_node *node;
+
+ pr_val_debug("Trying %s (offline)...", url);
+
node = find_node(&cache.fallback, url, strlen(url));
- return node ? node->map.path : NULL;
+ if (!node) {
+ pr_val_debug("Cache data unavailable.");
+ return NULL;
+ }
+
+ return node->map.path;
}
/*
* Attempts to refresh the RPP described by @sias, returns the resulting
- * repository's mapping.
+ * repository's mapper.
*
* XXX Need to normalize the sias.
* XXX Fallback only if parent is fallback
// XXX mutex
// XXX review result signs
// XXX normalize rpkiNotify & caRepository?
- // XXX do module if rsync
cage = pzalloc(sizeof(struct cache_cage));
cage->fallback = get_fallback(sias->caRepository);
{
if (node == NULL)
return NULL;
+ // XXX RRDP is const, rsync needs to be freed
return (node->rrdp)
? /* RRDP */ rrdp_file(node->rrdp, url)
- : /* rsync */ join_paths(node->map.path, url + RPKI_SCHEMA_LEN); // XXX wrong; need to get the module.
+ : /* rsync */ path_join(node->map.path, strip_rsync_module(url));
}
char const *
return file;
}
-/* Returns true if previously enabled */
+/* Returns true if fallback should be attempted */
bool
cage_disable_refresh(struct cache_cage *cage)
{
bool enabled = (cage->refresh != NULL);
cage->refresh = NULL;
- return enabled;
+
+ if (cage->fallback == NULL) {
+ pr_val_debug("There is no fallback.");
+ return false;
+ }
+ if (!enabled) {
+ pr_val_debug("Fallback exhausted.");
+ return false;
+ }
+
+ pr_val_debug("Attempting fallback.");
+ return true;
+}
+
+/*
+ * Steals ownership of @rpp->files and @rpp->nfiles, but they're not going to be
+ * modified nor deleted until the cache cleanup.
+ */
+void
+cache_commit_rpp(char const *caRepository, struct rpp *rpp)
+{
+ struct cache_commit *commit;
+
+ commit = pmalloc(sizeof(struct cache_commit));
+ // XXX missing context
+ commit->caRepository = pstrdup(caRepository);
+ commit->files = rpp->files;
+ commit->nfiles = rpp->nfiles;
+ STAILQ_INSERT_TAIL(&commits, commit, lh);
+
+ rpp->files = NULL;
+ rpp->nfiles = 0;
+}
+
+void
+cache_commit_file(struct cache_mapping *map)
+{
+ struct cache_commit *commit;
+
+ commit = pmalloc(sizeof(struct cache_commit));
+ // XXX missing context
+ commit->caRepository = NULL;
+ commit->files = pmalloc(sizeof(*map));
+ commit->files[0].url = pstrdup(map->url);
+ commit->files[0].path = pstrdup(map->path);
+ commit->nfiles = 1;
+ STAILQ_INSERT_TAIL(&commits, commit, lh);
}
static void
{
struct cache_node *node, *tmp;
- printf("%s (%s):", tbl->name, tbl->enabled ? "enabled" : "disabled");
+ if (HASH_COUNT(tbl->nodes) == 0)
+ return;
+
+ printf(" %s (%s):\n", tbl->name, tbl->enabled ? "enabled" : "disabled");
HASH_ITER(hh, tbl->nodes, node, tmp)
cachent_print(node);
}
free(tmpdir);
}
+static bool
+is_fallback(char const *path)
+{
+ // XXX just cd to the freaking cache, ffs
+ path += strlen(config_get_local_repository());
+ return str_starts_with(path, "fallback/") ||
+ str_starts_with(path, "/fallback/");
+}
+
+/* Hard-links @rpp's approved files into the fallback directory. */
+static void
+commit_rpp(struct cache_commit *commit, struct cache_node *fb)
+{
+ struct cache_mapping *src;
+ char const *dst;
+ array_index i;
+
+ for (i = 0; i < commit->nfiles; i++) {
+ src = commit->files + i;
+
+ if (is_fallback(src->path))
+ continue;
+
+ /*
+ * (fine)
+ * Note, this is accidentally working perfectly for rsync too.
+ * Might want to rename some of this.
+ */
+ dst = rrdp_create_fallback(fb->map.path, &fb->rrdp, src->url);
+ if (!dst)
+ goto skip;
+
+ pr_op_debug("Hard-linking: %s -> %s", src->path, dst);
+ if (link(src->path, dst) < 0)
+ pr_op_warn("Could not hard-link cache file: %s",
+ strerror(errno));
+
+skip: free(src->path);
+ src->path = pstrdup(dst);
+ }
+}
+
+/* Deletes abandoned (ie. no longer ref'd by manifests) fallback hard links. */
+static void
+discard_trash(struct cache_commit *commit, struct cache_node *fallback)
+{
+ DIR *dir;
+ struct dirent *file;
+ char *file_path;
+ array_index i;
+
+ dir = opendir(fallback->map.path);
+ if (dir == NULL) {
+ pr_op_err("opendir() error: %s", strerror(errno));
+ return;
+ }
+
+ FOREACH_DIR_FILE(dir, file) {
+ if (S_ISDOTS(file))
+ continue;
+
+ /*
+ * TODO (fine) Bit slow; wants a hash table,
+ * and maybe skip @file_path's reallocation.
+ */
+
+ file_path = path_join(fallback->map.path, file->d_name);
+
+ for (i = 0; i < commit->nfiles; i++) {
+ if (commit->files[i].path == NULL)
+ continue;
+ if (strcmp(file_path, commit->files[i].path) == 0)
+ goto next;
+ }
+
+ /*
+ * Uh... maybe keep the file until an expiration threshold?
+ * None of the current requirements seem to mandate it.
+ * It sounds pretty unreasonable for a signed valid manifest to
+ * "forget" a file, then legitimately relist it without actually
+ * providing it.
+ */
+ pr_op_debug("Removing hard link: %s", file_path);
+ if (unlink(file_path) < 0)
+ pr_op_warn("Could not unlink %s: %s",
+ file_path, strerror(errno));
+
+next: free(file_path);
+ }
+
+ if (errno)
+ pr_op_err("Fallback directory traversal errored: %s",
+ strerror(errno));
+ closedir(dir);
+}
+
+static void
+commit_fallbacks(void)
+{
+ struct cache_commit *commit;
+ struct cache_node *fb, *tmp;
+ array_index i;
+
+ while (!STAILQ_EMPTY(&commits)) {
+ commit = STAILQ_FIRST(&commits);
+ STAILQ_REMOVE_HEAD(&commits, lh);
+
+ if (commit->caRepository) {
+ fb = provide_node(&cache.fallback, commit->caRepository);
+
+ if (mkdir(fb->map.path, CACHE_FILEMODE) < 0) {
+ if (errno != EEXIST) {
+ pr_op_warn("Failed to create %s: %s",
+ fb->map.path, strerror(errno));
+ goto skip;
+ }
+ }
+
+ commit_rpp(commit, fb);
+ discard_trash(commit, fb);
+
+ } else { /* TA */
+ struct cache_mapping *map = &commit->files[0];
+
+ fb = provide_node(&cache.fallback, map->url);
+ if (is_fallback(map->path))
+ goto freshen;
+
+ pr_op_debug("Hard-linking TA: %s -> %s",
+ map->path, fb->map.path);
+ if (link(map->path, fb->map.path) < 0)
+ pr_op_warn("Could not hard-link cache file: %s",
+ strerror(errno));
+ }
+
+freshen: fb->fresh = 1;
+skip: free(commit->caRepository);
+ for (i = 0; i < commit->nfiles; i++) {
+ free(commit->files[i].url);
+ free(commit->files[i].path);
+ }
+ free(commit->files);
+ free(commit);
+ }
+
+ HASH_ITER(hh, cache.fallback.nodes, fb, tmp) {
+ if (fb->fresh)
+ continue;
+
+ /*
+ * XXX This one, on the other hand, would definitely benefit
+ * from an expiration threshold.
+ */
+ pr_op_debug("Removing orphaned fallback: %s", fb->map.path);
+ if (file_rm_rf(fb->map.path) < 0)
+ pr_op_warn("Could not remove %s; unknown cause.",
+ fb->map.path);
+ delete_node(&cache.fallback, fb);
+ }
+}
+
static void
remove_abandoned(void)
{
cleanup_cache(void)
{
// XXX Review
-
pr_op_debug("Cleaning up temporal files.");
cleanup_tmp();
+ pr_op_debug("Creating fallbacks for valid RPPs.");
+ commit_fallbacks();
+
pr_op_debug("Cleaning up old abandoned and unknown cache files.");
remove_abandoned();
cleanup_cache();
write_tal_json();
cache_foreach(delete_node);
+ free(cache.rsync.seq.prefix);
+ free(cache.https.seq.prefix);
+ free(cache.rrdp.seq.prefix);
+ free(cache.fallback.seq.prefix);
}
void
#include <stdbool.h>
#include "types/map.h"
+#include "types/rpp.h"
int cache_setup(void); /* Init this module */
void cache_teardown(void); /* Destroy this module */
struct cache_cage *cache_refresh_sias(struct sia_uris *);
char const *cage_map_file(struct cache_cage *, char const *);
bool cage_disable_refresh(struct cache_cage *);
+void cache_commit_rpp(char const *, struct rpp *);
+void cache_commit_file(struct cache_mapping *);
void cache_print(void); /* Dump cache in stdout */
return result;
}
+
+int
+time2str(time_t tt, char *str)
+{
+ struct tm tmbuffer, *tm;
+
+ memset(&tmbuffer, 0, sizeof(tmbuffer));
+ tm = gmtime_r(&tt, &tmbuffer);
+ if (tm == NULL)
+ return errno;
+ if (strftime(str, FORT_TS_LEN, FORT_TS_FORMAT, tm) == 0)
+ return ENOSPC;
+
+ return 0;
+}
+
+int
+str2time(char const *str, time_t *tt)
+{
+ char const *consumed;
+ struct tm tm;
+ time_t time;
+ int error;
+
+ memset(&tm, 0, sizeof(tm));
+ consumed = strptime(str, FORT_TS_FORMAT, &tm);
+ if (consumed == NULL || (*consumed) != 0)
+ return pr_op_err("String '%s' does not appear to be a timestamp.",
+ str);
+ time = timegm(&tm);
+ if (time == ((time_t) -1)) {
+ error = errno;
+ return pr_op_err("String '%s' does not appear to be a timestamp: %s",
+ str, strerror(error));
+ }
+
+ *tt = time;
+ return 0;
+}
time_t time_nonfatal(void);
time_t time_fatal(void);
+/*
+ * Careful with this; several of the conversion specification characters
+ * documented in the Linux man page are not actually portable.
+ */
+#define FORT_TS_FORMAT "%Y-%m-%dT%H:%M:%SZ"
+#define FORT_TS_LEN 21 /* strlen("YYYY-mm-ddTHH:MM:SSZ") + 1 */
+int time2str(time_t, char *);
+int str2time(char const *, time_t *);
+
#endif /* SRC_RTR_COMMON_H_ */
#include "config/boolean.h"
#include "config/incidences.h"
#include "config/str.h"
+#include "config/time.h"
#include "config/uint.h"
#include "config/work_offline.h"
#include "configure_ac.h"
/**
* rfc6487#section-7.2, last paragraph.
* Prevents arbitrarily long paths and loops.
+ *
+ * XXX X509_VERIFY_MAX_CHAIN_CERTS
*/
unsigned int maximum_certificate_depth;
/** File or directory where the .slurm file(s) is(are) located */
enum file_type ft;
char *payload;
+
+ struct {
+ /*
+ * If nonzero, all RPKI object expiration dates are compared to
+ * this number instead of the current time.
+ * Meant for test repositories we don't want to have to keep
+ * regenerating.
+ */
+ time_t validation_time;
+ } debug;
};
static void print_usage(FILE *, bool);
.max = 100,
},
+ {
+ .id = 13000,
+ .name = "debug.validation-time",
+ .type = >_time,
+ .offset = offsetof(struct rpki_config, debug.validation_time),
+ },
+
{
.id = 13000,
.name = "file-type",
return rpki_config.payload;
}
+time_t
+config_get_validation_time(void)
+{
+ return rpki_config.debug.validation_time;
+}
+
void
config_set_rsync_enabled(bool value)
{
unsigned int config_get_thread_pool_server_max(void);
enum file_type config_get_file_type(void);
char const *config_get_payload(void);
+time_t config_get_validation_time(void);
/* Logging getters */
bool config_get_op_log_enabled(void);
--- /dev/null
+#include "config/time.h"
+
+#include <errno.h>
+#include <getopt.h>
+#include <time.h>
+
+#include "common.h"
+#include "log.h"
+
+static void
+print_time(struct option_field const *field, void *value)
+{
+ time_t tt;
+ char str[FORT_TS_LEN];
+ int error;
+
+ tt = *((time_t *)value);
+ if (tt == 0)
+ return;
+
+ error = time2str(tt, str);
+ if (error)
+ pr_crit("time2str: %d", error);
+
+ pr_op_info("%s: %s", field->name, str);
+}
+
+static int
+parse_argv_time(struct option_field const *field, char const *str,
+ void *result)
+{
+ if (str == NULL || strlen(str) == 0)
+ return pr_op_err("--%s needs an argument.", field->name);
+
+ return str2time(str, result);
+}
+
+static int
+parse_json_time(struct option_field const *opt, json_t *json, void *result)
+{
+ if (!json_is_string(json))
+ return pr_op_err("The '%s' element is not a JSON string.",
+ opt->name);
+
+ return str2time(json_string_value(json), result);
+}
+
+const struct global_type gt_time = {
+ .has_arg = required_argument,
+ .size = sizeof(time_t),
+ .print = print_time,
+ .parse.argv = parse_argv_time,
+ .parse.json = parse_json_time,
+ .arg_doc = FORT_TS_FORMAT,
+};
--- /dev/null
+#ifndef SRC_CONFIG_TIME_H_
+#define SRC_CONFIG_TIME_H_
+
+#include "config/types.h"
+
+extern const struct global_type gt_time;
+
+#endif /* SRC_CONFIG_TIME_H_ */
handle_aki(void *ext, void *arg)
{
AUTHORITY_KEYID *aki = ext;
-// X509 *parent;
+ X509 *parent = arg;
if (aki->keyid == NULL) {
return pr_val_err("The %s lacks a keyIdentifier.",
ext_aki()->name);
}
- /* XXX
- parent = x509stack_peek(validation_certstack(state_retrieve()));
- if (parent == NULL)
- return pr_val_err("Certificate has no parent.");
-
return validate_public_key_hash(parent, aki->keyid, "AKI");
- */
- return 0;
}
size_t written;
int error;
+ pr_val_debug("Writing file: %s", path);
+
error = mkdir_p(path, false);
if (error)
return error;
char *dst;
struct timespec times[2];
- dst = join_paths(merge_dst, &src[src_offset]);
+ dst = path_join(merge_dst, &src[src_offset]);
if (S_ISDIR(st->st_mode)) {
pr_op_debug("mkdir -p %s", dst);
/* TODO (performance) optimize that 32 */
return nftw(path, rm, 32, FTW_DEPTH | FTW_PHYS);
}
+
+void
+cseq_init(struct cache_sequence *seq, char *prefix)
+{
+ seq->prefix = prefix;
+ seq->next_id = 0;
+ seq->pathlen = strlen(prefix) + 4;
+}
+
+char *
+cseq_next(struct cache_sequence *seq)
+{
+ char *path;
+ int len;
+
+ do {
+ path = pmalloc(seq->pathlen);
+
+ // XXX not generic enough
+ len = snprintf(path, seq->pathlen, "%s/%lX",
+ seq->prefix, seq->next_id);
+ if (len < 0) {
+ pr_val_err("Cannot compute new cache path: Unknown cause.");
+ return NULL;
+ }
+ if (len < seq->pathlen) {
+ seq->next_id++;
+ return path; /* Happy path */
+ }
+
+ seq->pathlen++;
+ free(path);
+ } while (true);
+}
int file_rm_f(char const *);
int file_rm_rf(char const *);
+struct cache_sequence {
+ char *prefix;
+ unsigned long next_id;
+ size_t pathlen;
+};
+
+void cseq_init(struct cache_sequence *, char *);
+char *cseq_next(struct cache_sequence *);
+
/*
* Remember that this API is awkward:
*
#include <limits.h>
#include <time.h>
+#include "common.h"
#include "log.h"
-/*
- * Careful with this; several of the conversion specification characters
- * documented in the Linux man page are not actually portable.
- */
-#define JSON_TS_FORMAT "%Y-%m-%dT%H:%M:%SZ"
-#define JSON_TS_LEN 21 /* strlen("YYYY-mm-ddTHH:MM:SSZ") + 1 */
-
int
json_get_str(json_t *parent, char const *name, char const **result)
{
return 0;
}
-static int
-str2tt(char const *str, time_t *tt)
+int
+json_get_ulong(json_t *parent, char const *name, unsigned long *result)
{
- char const *consumed;
- struct tm tm;
- time_t time;
+ json_int_t json_int;
int error;
- memset(&tm, 0, sizeof(tm));
- consumed = strptime(str, JSON_TS_FORMAT, &tm);
- if (consumed == NULL || (*consumed) != 0)
- return pr_op_err("String '%s' does not appear to be a timestamp.",
- str);
- time = timegm(&tm);
- if (time == ((time_t) -1)) {
- error = errno;
- return pr_op_err("String '%s' does not appear to be a timestamp: %s",
- str, strerror(error));
- }
+ *result = 0;
- *tt = time;
+ error = json_get_int_t(parent, name, &json_int);
+ if (error)
+ return error;
+ if (json_int < 0 || ULONG_MAX < json_int)
+ return pr_op_err("Tag '%s' (%" JSON_INTEGER_FORMAT
+ ") is out of range [0, %lu].",
+ name, json_int, ULONG_MAX);
+
+ *result = json_int;
return 0;
}
if (error)
return error;
- return str2tt(str, result);
+ return str2time(str, result);
}
int
}
int
-json_add_str(json_t *parent, char const *name, char const *value)
+json_add_ulong(json_t *parent, char const *name, unsigned long value)
{
- if (json_object_set_new(parent, name, json_string(value)))
+ if (json_object_set_new(parent, name, json_integer(value)))
return pr_op_err(
- "Cannot convert %s '%s' to json; unknown cause.",
+ "Cannot convert %s '%lu' to json; unknown cause.",
name, value
);
return 0;
}
-static int
-tt2str(time_t tt, char *str)
+int
+json_add_str(json_t *parent, char const *name, char const *value)
{
- struct tm tmbuffer, *tm;
-
- memset(&tmbuffer, 0, sizeof(tmbuffer));
- tm = gmtime_r(&tt, &tmbuffer);
- if (tm == NULL)
- return errno;
- if (strftime(str, JSON_TS_LEN, JSON_TS_FORMAT, tm) == 0)
- return ENOSPC;
+ if (json_object_set_new(parent, name, json_string(value)))
+ return pr_op_err(
+ "Cannot convert %s '%s' to json; unknown cause.",
+ name, value
+ );
return 0;
}
int
json_add_ts(json_t *parent, char const *name, time_t value)
{
- char str[JSON_TS_LEN];
+ char str[FORT_TS_LEN];
int error;
- error = tt2str(value, str);
+ error = time2str(value, str);
if (error) {
pr_op_err("Cannot convert timestamp '%s' to json: %s",
name, strerror(error));
int json_get_int(json_t *, char const *, int *);
int json_get_u32(json_t *, char const *, uint32_t *);
+int json_get_ulong(json_t *, char const *, unsigned long *);
int json_get_ts(json_t *, char const *, time_t *);
int json_get_str(json_t *, char const *, char const **);
int json_get_array(json_t *, char const *, json_t **);
bool json_valid_members_count(json_t *, size_t);
int json_add_int(json_t *, char const *, int);
+int json_add_ulong(json_t *, char const *, unsigned long);
int json_add_str(json_t *, char const *, char const *);
int json_add_ts(json_t *, char const *, time_t);
#define SRC_OBJECT_BGPSEC_H_
#include "resource.h"
-#include "rpp.h"
+#include "types/rpp.h"
int handle_bgpsec(X509 *, struct resources *, struct rpp *);
struct rpki_certificate *cert;
cert = pzalloc(sizeof(*cert));
- cert->refcount++;
-
map_copy(&cert->map, map);
-
cert->parent = parent;
- parent->refcount++;
-
- cert->rpp.ancestors = X509_chain_up_ref(parent->rpp.ancestors);
- if (!cert->rpp.ancestors)
- goto fail;
- if (sk_X509_push(cert->rpp.ancestors, parent->x509) <= 0)
- goto fail;
- if (!X509_up_ref(parent->x509))
- goto fail;
-
+ cert->refcount = 1;
SLIST_INSERT_HEAD(stack, cert, lh);
- return;
-fail: rpki_certificate_free(cert);
+ parent->refcount++;
}
void
ee->resources = resources_create(RPKI_POLICY_RFC6484, force_inherit);
ee->parent = parent;
ee->refcount = 1;
+
+ parent->refcount++;
}
void
resources_destroy(cert->resources);
sias_cleanup(&cert->sias);
// XXX Recursive. Try refcounting the resources.
- rpki_certificate_free(cert->parent);
+ if (cert->parent)
+ rpki_certificate_free(cert->parent);
rpp_cleanup(&cert->rpp);
}
}
}
+static STACK_OF(X509) *
+build_trusted_stack(struct rpki_certificate *cert)
+{
+ STACK_OF(X509) *stack;
+ int ret;
+
+ stack = sk_X509_new_null();
+ if (!stack) {
+ val_crypto_err("sk_X509_new_null() returned NULL.");
+ return NULL;
+ }
+
+ for (cert = cert->parent; cert != NULL; cert = cert->parent) {
+ ret = sk_X509_push(stack, cert->x509);
+ if (ret <= 0) {
+ val_crypto_err("sk_X509_push returned %d.", ret);
+ sk_X509_pop_free(stack, X509_free);
+ return NULL;
+ }
+ }
+
+ return stack;
+}
+
+static STACK_OF(X509_CRL) *
+build_crl_stack(struct rpki_certificate *cert)
+{
+ STACK_OF(X509_CRL) *stack;
+ int ok;
+
+ stack = sk_X509_CRL_new_null();
+ if (!stack) {
+ val_crypto_err("sk_X509_CRL_new_null() returned NULL.");
+ return NULL;
+ }
+ ok = sk_X509_CRL_push(stack, cert->parent->rpp.crl.obj);
+ if (ok != 1) {
+ val_crypto_err("sk_X509_CRL_push() returned %d.", ok);
+ return NULL;
+ }
+
+ return stack;
+}
+
static void
pr_debug_x509_dates(X509 *x509)
{
/* Reference: openbsd/src/usr.bin/openssl/verify.c */
X509_STORE_CTX *ctx;
+ STACK_OF(X509) *trusted;
STACK_OF(X509_CRL) *crls;
int ok;
int error;
ctx = X509_STORE_CTX_new();
if (ctx == NULL) {
val_crypto_err("X509_STORE_CTX_new() returned NULL");
- return -EINVAL;
+ return EINVAL;
}
/* Returns 0 or 1 , all callers test ! only. */
ok = X509_STORE_CTX_init(ctx, validation_store(state_retrieve()),
cert->x509, NULL);
if (!ok) {
- val_crypto_err("X509_STORE_CTX_init() returned %d", ok);
- goto abort;
+ error = val_crypto_err("X509_STORE_CTX_init() returned %d", ok);
+ goto end1;
}
- X509_STORE_CTX_trusted_stack(ctx, cert->rpp.ancestors);
+ trusted = build_trusted_stack(cert);
+ if (!trusted) {
+ error = EINVAL;
+ goto end1;
+ }
+ X509_STORE_CTX_trusted_stack(ctx, trusted);
- crls = sk_X509_CRL_new_null();
- if (!crls)
- enomem_panic();
- if (sk_X509_CRL_push(crls, cert->rpp.crl.obj) != 1) {
- // XXX
+ crls = build_crl_stack(cert);
+ if (!crls) {
+ error = EINVAL;
+ goto end2;
}
- // XXX These CRLs will only be used if CRL verification is enabled in
- // the associated X509_VERIFY_PARAM structure.
- X509_STORE_CTX_set0_crls(ctx, crls); // XXX needs free
- // sk_X509_CRL_pop_free(cert->crl.stack, X509_CRL_free);
+ X509_STORE_CTX_set0_crls(ctx, crls);
if (log_val_enabled(LOG_DEBUG))
pr_debug_x509_dates(cert->x509);
*/
error = X509_STORE_CTX_get_error(ctx);
if (error == X509_V_ERR_CRL_HAS_EXPIRED)
- complain_crl_stale(cert->rpp.crl.obj);
+ complain_crl_stale(cert->parent->rpp.crl.obj);
else if (error)
pr_val_err("Certificate validation failed: %s",
X509_verify_cert_error_string(error));
- else
+ else {
/*
* ...But don't trust X509_STORE_CTX_get_error() either.
* That said, there's not much to do about !error,
* so hope for the best.
*/
val_crypto_err("Certificate validation failed: %d", ok);
- goto abort;
+ error = EINVAL;
+ }
+ goto end3;
}
- X509_STORE_CTX_free(ctx);
- return 0;
+ error = 0;
-abort:
- X509_STORE_CTX_free(ctx);
- return -EINVAL;
+end3: sk_X509_CRL_free(crls);
+end2: sk_X509_free(trusted);
+end1: X509_STORE_CTX_free(ctx);
+ return error;
}
static int
for (i = 0; i < blocks->list.count && !error; i++)
error = resources_add_ip(cert->resources,
- cert->parent->resources,
+ cert->parent ? cert->parent->resources : NULL,
blocks->list.array[i]);
end:
if (error)
return error;
- error = resources_add_asn(cert->resources, cert->parent->resources,
+ error = resources_add_asn(cert->resources,
+ cert->parent ? cert->parent->resources : NULL,
ids, cert->type != CERTYPE_BGPSEC);
ASN_STRUCT_FREE(asn_DEF_ASIdentifiers, ids);
validate_ca_extensions(struct rpki_certificate *cert)
{
struct extension_handler handlers[] = {
- /* ext reqd handler arg */
- { ext_bc(), true, handle_bc, },
- { ext_ski(), true, handle_ski_ca, cert->x509 },
- { ext_aki(), true, handle_aki, },
- { ext_ku(), true, handle_ku_ca, },
- { ext_cdp(), true, handle_cdp, &cert->sias },
- { ext_aia(), true, handle_aia, &cert->sias },
- { ext_sia(), true, handle_sia_ca, &cert->sias },
- { ext_cp(), true, handle_cp, &cert->policy },
+ /* ext reqd handler arg */
+ { ext_bc(), true, handle_bc, },
+ { ext_ski(), true, handle_ski_ca, cert->x509 },
+ { ext_aki(), true, handle_aki, cert->parent->x509 },
+ { ext_ku(), true, handle_ku_ca, },
+ { ext_cdp(), true, handle_cdp, &cert->sias },
+ { ext_aia(), true, handle_aia, &cert->sias },
+ { ext_sia(), true, handle_sia_ca, &cert->sias },
+ { ext_cp(), true, handle_cp, &cert->policy },
/* These are handled by certificate_get_resources(). */
- { ext_ir(), false, },
- { ext_ar(), false, },
- { ext_ir2(), false, },
- { ext_ar2(), false, },
+ { ext_ir(), false, },
+ { ext_ar(), false, },
+ { ext_ir2(), false, },
+ { ext_ar2(), false, },
{ NULL },
};
int error;
error = certificate_validate_aia(cert);
if (error)
return error;
- return validate_cdp(&cert->sias, cert->rpp.crl.map->url);
+ return validate_cdp(&cert->sias, cert->parent->rpp.crl.map->url);
}
int
{
struct ski_arguments ski_args;
struct extension_handler handlers[] = {
- /* ext reqd handler arg */
- { ext_ski(), true, handle_ski_ee, &ski_args },
- { ext_aki(), true, handle_aki, },
- { ext_ku(), true, handle_ku_ee, },
- { ext_cdp(), true, handle_cdp, &cert->sias },
- { ext_aia(), true, handle_aia, &cert->sias },
- { ext_sia(), true, handle_sia_ee, &cert->sias },
- { ext_cp(), true, handle_cp, &cert->policy },
- { ext_ir(), false, },
- { ext_ar(), false, },
- { ext_ir2(), false, },
- { ext_ar2(), false, },
+ /* ext reqd handler arg */
+ { ext_ski(), true, handle_ski_ee, &ski_args },
+ { ext_aki(), true, handle_aki, cert->parent->x509 },
+ { ext_ku(), true, handle_ku_ee, },
+ { ext_cdp(), true, handle_cdp, &cert->sias },
+ { ext_aia(), true, handle_aia, &cert->sias },
+ { ext_sia(), true, handle_sia_ee, &cert->sias },
+ { ext_cp(), true, handle_cp, &cert->policy },
+ { ext_ir(), false, },
+ { ext_ar(), false, },
+ { ext_ir2(), false, },
+ { ext_ar2(), false, },
{ NULL },
};
static enum cert_type
get_certificate_type(struct rpki_certificate *cert)
{
- if (cert->rpp.ancestors == NULL)
+ if (cert->parent == NULL)
return CERTYPE_TA;
if (X509_check_purpose(cert->x509, -1, -1) <= 0)
return 0;
}
+static unsigned int
+chain_length(struct rpki_certificate *cert)
+{
+ unsigned int a;
+ for (a = 0; cert != NULL; a++)
+ cert = cert->parent;
+ return a;
+}
+
static int
init_resources(struct rpki_certificate *cert)
{
{
int error;
- if (sk_X509_num(cert->rpp.ancestors) >= config_get_max_cert_depth())
+ if (chain_length(cert) >= config_get_max_cert_depth())
return pr_val_err("Certificate chain maximum depth exceeded.");
fnstack_push_map(&cert->map);
switch (cert->type) {
case CERTYPE_TA:
+ pr_val_debug("Type: TA");
break;
case CERTYPE_CA:
pr_val_debug("Type: CA");
goto end;
}
- error = manifest_validate(ca->sias.rpkiManifest, mft, cage, ca);
+ error = manifest_traverse(ca->sias.rpkiManifest, mft, cage, ca);
if (error) {
if (cage_disable_refresh(cage))
goto retry;
ghostbusters_traverse(map, ca);
}
+ cache_commit_rpp(ca->sias.caRepository, &ca->rpp);
+
end: free(cage);
return error;
}
traverse_tree(struct cache_mapping const *ta_map, struct validation *state)
{
struct cert_stack stack;
- struct rpki_certificate ta = { .map = *ta_map };
+ struct rpki_certificate *ta;
struct rpki_certificate *ca;
int error;
SLIST_INIT(&stack);
/* == Root certificate == */
- error = certificate_traverse(&ta, &stack); // XXX clean up TA
+ ta = pzalloc(sizeof(struct rpki_certificate));
+ map_copy(&ta->map, ta_map);
+ ta->refcount = 1;
+
+ error = certificate_traverse(ta, &stack);
if (error)
- return error;
+ goto end;
/*
* From now on, the tree should be considered valid, even if subsequent
rpki_certificate_free(ca);
}
- return 0;
+end: rpki_certificate_free(ta);
+ return error;
}
#include "cache.h"
#include "certificate_refs.h"
#include "resource.h"
-#include "rpp.h"
#include "state.h"
+#include "types/rpp.h"
/* Certificate types in the RPKI */
enum cert_type {
}
static int
-validate_extensions(X509_CRL *crl)
+validate_extensions(X509_CRL *crl, X509 *parent)
{
struct extension_handler handlers[] = {
/* ext reqd handler arg */
- { ext_aki(), true, handle_aki, },
+ { ext_aki(), true, handle_aki, parent },
{ ext_cn(), true, handle_crlnum, },
{ NULL },
};
if (error)
return error;
- return validate_extensions(crl);
+ return validate_extensions(crl, parent);
}
int
/* Validate everything */
error = signed_object_validate(&sobj, &arcs, &ee);
if (error)
- goto end2;
+ goto end3;
error = handle_vcard(&sobj);
if (error)
- goto end2;
+ goto end3;
error = refs_validate_ee(&ee.sias, parent->rpp.crl.map->url, map->url);
-end2: rpki_certificate_cleanup(&ee);
+end3: rpki_certificate_cleanup(&ee);
signed_object_cleanup(&sobj);
end1: fnstack_pop();
return error;
#include "asn1/asn1c/Manifest.h"
#include "asn1/decode.h"
#include "common.h"
+#include "config.h"
#include "hash.h"
#include "log.h"
#include "object/crl.h"
TM_ARGS(nextUpdate));
}
- now_tt = time_fatal();
+ now_tt = config_get_validation_time();
+ if (now_tt == 0)
+ now_tt = time_fatal();
+
if (gmtime_r(&now_tt, &now) == NULL) {
error = errno;
return pr_val_err("gmtime_r(now) error %d: %s", error,
}
int
-manifest_validate(char const *url, char const *path, struct cache_cage *cage,
+manifest_traverse(char const *url, char const *path, struct cache_cage *cage,
struct rpki_certificate *parent)
{
static OID oid = OID_MANIFEST;
/* Validate everything */
error = signed_object_validate(&sobj, &arcs, &ee);
if (error)
- goto end4;
+ goto end5;
error = validate_manifest(mft);
if (error)
- goto end4;
+ goto end5;
error = refs_validate_ee(&ee.sias, parent->rpp.crl.map->url, url);
-end4: rpki_certificate_cleanup(&ee);
+end5: rpki_certificate_cleanup(&ee);
+ if (error)
+ rpp_cleanup(&parent->rpp);
end3: ASN_STRUCT_FREE(asn_DEF_Manifest, mft);
end2: signed_object_cleanup(&sobj);
end1: fnstack_pop();
#include "cache.h"
#include "object/certificate.h"
-int manifest_validate(char const *url, char const *path,
+int manifest_traverse(char const *url, char const *path,
struct cache_cage *cage, struct rpki_certificate *parent);
#endif /* SRC_OBJECT_MANIFEST_H_ */
/* Validate and handle everything */
error = signed_object_validate(&sobj, &arcs, &ee);
if (error)
- goto end3;
+ goto end4;
error = __handle_roa(roa, ee.resources);
if (error)
- goto end3;
+ goto end4;
error = refs_validate_ee(&ee.sias, parent->rpp.crl.map->url, map->url);
-end3: rpki_certificate_cleanup(&ee);
+end4: rpki_certificate_cleanup(&ee);
ASN_STRUCT_FREE(asn_DEF_RouteOriginAttestation, roa);
end2: signed_object_cleanup(&sobj);
end1: fnstack_pop();
thread->error = validation_prepare(&state, &tal, &collector);
if (thread->error) {
db_table_destroy(db);
- goto end2;
+ goto end1;
}
ARRAYLIST_FOREACH(&tal.urls, url) {
continue;
if (traverse_tree(&map, state) != 0)
continue;
- goto end1; /* Happy path */
+ goto end2; /* Happy path */
}
ARRAYLIST_FOREACH(&tal.urls, url) {
continue;
if (traverse_tree(&map, state) != 0)
continue;
- goto end1; /* Happy path */
+ goto end2; /* Happy path */
}
pr_op_err("None of the TAL URIs yielded a successful traversal.");
db_table_destroy(db);
db = NULL;
-end1: thread->db = db;
-end2: tal_cleanup(&tal);
+end2: thread->db = db;
+ validation_destroy(state);
+end1: tal_cleanup(&tal);
}
static void *
static BIO *
__rsync2bio(char const *src, char const *dst)
{
- struct cache_mapping map;
int error;
// XXX use the cache
- map.url = (char *)src;
- map.path = (char *)dst;
-
- error = rsync_download(&map);
+ error = rsync_download(src, dst);
if (error) {
pr_op_err("rysnc download failed: %s", strerror(abs(error)));
return NULL;
static int
inherit_aors(struct resources *resources, struct resources *parent, int family)
{
- // XXX is this really crit worthy?
if (parent == NULL)
- pr_crit("Parent has no resources.");
+ return pr_val_err("Root certificate is trying to inherit IP resources from a parent.");
switch (family) {
case AF_INET:
inherit_asiors(struct resources *resources, struct resources *parent)
{
if (parent == NULL)
- pr_crit("Parent has no resources.");
+ return pr_val_err("Root certificate is trying to inherit AS resources from a parent.");
if (resources->asns != NULL)
return pr_val_err("Certificate inherits ASN resources while also defining others of its own.");
#include <sys/queue.h>
#include "base64.h"
+#include "cache.h"
#include "cachetmp.h"
#include "common.h"
#include "config.h"
/* Subset of the notification that is relevant to the TAL's cachefile */
struct rrdp_state {
- char const *repo; /* Points to cache_node's map.path */
-
struct rrdp_session session;
struct cache_file *files; /* Hash table */
- unsigned int next_id;
- size_t pathlen;
+ struct cache_sequence seq;
/*
* The 1st one contains the hash of the session.serial delta.
return file;
}
-static void
-state_flush_files(struct rrdp_state *state)
-{
- struct cache_file *file, *tmp;
-
- HASH_ITER(hh, state->files, file, tmp) {
- HASH_DEL(state->files, file);
- free(file->map.url);
- free(file->map.path);
- free(file);
- }
-}
-
static void
metadata_cleanup(struct file_metadata *meta)
{
return 0;
}
-static char *
-create_path(struct rrdp_state *state)
-{
- char *path;
- int len;
-
- do {
- path = pmalloc(state->pathlen);
-
- len = snprintf(path, state->pathlen, "%s/%X",
- state->repo, state->next_id);
- if (len < 0) {
- pr_val_err("Cannot compute new cache path: Unknown cause.");
- return NULL;
- }
- if (len < state->pathlen) {
- state->next_id++;
- return path; /* Happy path */
- }
-
- state->pathlen++;
- free(path);
- } while (true);
-}
-
static int
handle_publish(xmlTextReaderPtr reader, struct parser_args *args)
{
file = pzalloc(sizeof(struct cache_file));
file->map.url = pstrdup(tag.meta.uri);
- file->map.path = create_path(args->state);
+ file->map.path = cseq_next(&args->state->seq);
if (!file->map.path) {
free(file->map.url);
free(file);
}
HASH_DEL(args->state->files, file);
- free(file->map.url);
- free(file->map.path);
+ map_cleanup(&file->map);
free(file);
end: metadata_cleanup(&tag.meta);
}
static int
-dl_notif(struct cache_mapping *map, time_t mtim, bool *changed,
+dl_notif(struct cache_mapping const *map, time_t mtim, bool *changed,
struct update_notification *new)
{
char *tmppath;
* snapshot, and explodes them into @notif->path.
*/
int
-rrdp_update(struct cache_mapping *notif, time_t mtim, bool *changed,
- struct rrdp_state **state)
+rrdp_update(struct cache_mapping const *notif, time_t mtim, bool *changed,
+ struct cache_sequence *rrdp_seq, struct rrdp_state **state)
{
struct rrdp_state *old;
struct update_notification new;
new.session.serial.str);
if ((*state) == NULL) {
+ char *cage;
+
pr_val_debug("This is a new Notification.");
+ cage = cseq_next(rrdp_seq);
+ if (!cage)
+ goto clean_notif;
+
old = pzalloc(sizeof(struct rrdp_state));
- old->repo = notif->path;
/* session postponed! */
- old->pathlen = strlen(old->repo) + 5;
+ cseq_init(&old->seq, cage);
STAILQ_INIT(&old->delta_hashes);
error = handle_snapshot(&new, old);
if (error) {
- state_flush_files(old);
- free(old);
+ rrdp_state_free(old);
goto clean_notif;
}
} else {
pr_val_debug("The Notification changed, but the session ID and serial didn't, and no session desync was detected.");
+ *changed = false;
goto clean_notif;
}
return file ? file->map.path : NULL;
}
+char const *
+rrdp_create_fallback(char const *cage, struct rrdp_state **_state,
+ char const *url)
+{
+ struct rrdp_state *state;
+ struct cache_file *file;
+ size_t len;
+
+ state = *_state;
+ if (state == NULL) {
+ *_state = state = pzalloc(sizeof(struct rrdp_state));
+ cseq_init(&state->seq, pstrdup(cage));
+ }
+
+ file = pzalloc(sizeof(struct cache_file));
+ file->map.url = pstrdup(url);
+ file->map.path = cseq_next(&state->seq);
+ if (!file->map.path) {
+ free(file->map.url);
+ free(file);
+ return NULL;
+ }
+
+ len = strlen(file->map.url);
+ HASH_ADD_KEYPTR(hh, state->files, file->map.url, len, file);
+
+ return file->map.path;
+}
+
#define TAGNAME_SESSION "session_id"
#define TAGNAME_SERIAL "serial"
#define TAGNAME_DELTAS "deltas"
return (bin < 10) ? (bin + '0') : (bin + 'a' - 10);
}
-json_t *
-rrdp_state2json(struct rrdp_state *state)
+static json_t *
+files2json(struct rrdp_state *state)
{
json_t *json;
- json_t *deltas;
- char hash_str[2 * RRDP_HASH_LEN + 1];
- struct rrdp_hash *hash;
- size_t i;
+ struct cache_file *file, *tmp;
- if (state == NULL)
+ json = json_obj_new();
+ if (json == NULL)
return NULL;
- json = json_object();
- if (json == NULL)
- enomem_panic();
+ HASH_ITER(hh, state->files, file, tmp)
+ if (json_add_str(json, file->map.url, file->map.path))
+ goto fail;
- if (json_add_str(json, TAGNAME_SESSION, state->session.session_id))
- goto fail;
- if (json_add_str(json, TAGNAME_SERIAL, state->session.serial.str))
- goto fail;
+ return json;
- if (STAILQ_EMPTY(&state->delta_hashes))
- return json; /* Happy path, but unlikely. */
+fail: json_decref(json);
+ return NULL;
+}
- deltas = json_array();
- if (deltas == NULL)
- enomem_panic();
- if (json_object_add(json, TAGNAME_DELTAS, deltas))
- goto fail;
+static json_t *
+dh2json(struct rrdp_state *state)
+{
+ json_t *json;
+ char hash_str[2 * RRDP_HASH_LEN + 1];
+ struct rrdp_hash *hash;
+ array_index i;
+
+ json = json_array_new();
+ if (json == NULL)
+ return NULL;
hash_str[2 * RRDP_HASH_LEN] = '\0';
STAILQ_FOREACH(hash, &state->delta_hashes, hook) {
hash_str[2 * i ] = hash_b2c(hash->bytes[i] >> 4);
hash_str[2 * i + 1] = hash_b2c(hash->bytes[i] );
}
- if (json_array_append(deltas, json_string(hash_str)))
+ if (json_array_add(json, json_string(hash_str)))
goto fail;
}
return json;
-fail:
- json_decref(json);
+fail: json_decref(json);
+ return NULL;
+}
+
+json_t *
+rrdp_state2json(struct rrdp_state *state)
+{
+ json_t *json;
+
+ json = json_object();
+ if (json == NULL)
+ enomem_panic();
+
+ if (json_add_str(json, TAGNAME_SESSION, state->session.session_id))
+ goto fail;
+ if (json_add_str(json, TAGNAME_SERIAL, state->session.serial.str))
+ goto fail;
+ if (state->files)
+ if (json_object_add(json, "files", files2json(state)))
+ goto fail;
+ if (json_add_ulong(json, "next", state->seq.next_id))
+ goto fail;
+ if (!STAILQ_EMPTY(&state->delta_hashes))
+ if (json_object_add(json, TAGNAME_DELTAS, dh2json(state)))
+ goto fail;
+
+ return json;
+
+fail: json_decref(json);
return NULL;
}
return error;
}
-void
-rrdp_state_cleanup(struct rrdp_state *state)
-{
- session_cleanup(&state->session);
- state_flush_files(state);
- clear_delta_hashes(state);
-}
-
void
rrdp_state_free(struct rrdp_state *state)
{
- if (state != NULL) {
- rrdp_state_cleanup(state);
- free(state);
+ struct cache_file *file, *tmp;
+
+ if (state == NULL)
+ return;
+
+ session_cleanup(&state->session);
+ HASH_ITER(hh, state->files, file, tmp) {
+ HASH_DEL(state->files, file);
+ map_cleanup(&file->map);
+ free(file);
}
+ free(state->seq.prefix);
+ clear_delta_hashes(state);
+ free(state);
}
#include <stdbool.h>
#include <time.h>
+#include "file.h"
#include "types/map.h"
struct rrdp_state;
-int rrdp_update(struct cache_mapping *, time_t, bool *, struct rrdp_state **);
+int rrdp_update(struct cache_mapping const *, time_t, bool *,
+ struct cache_sequence *, struct rrdp_state **);
char const *rrdp_file(struct rrdp_state *, char const *);
+char const *rrdp_create_fallback(char const *, struct rrdp_state **,
+ char const *);
+
json_t *rrdp_state2json(struct rrdp_state *);
int rrdp_json2state(json_t *, struct rrdp_state **);
-void rrdp_state_cleanup(struct rrdp_state *);
void rrdp_state_free(struct rrdp_state *);
#endif /* SRC_RRDP_H_ */
}
static void
-prepare_rsync(char **args, struct cache_mapping *map)
+prepare_rsync(char **args, char const *url, char const *path)
{
size_t i = 0;
args[i++] = "--include=*.roa";
args[i++] = "--exclude=*";
#endif
- args[i++] = map->url;
- args[i++] = map->path;
+ args[i++] = (char *)url;
+ args[i++] = (char *)path;
args[i++] = NULL;
}
return exhaust_read_fds(STDERR_READ(fds), STDOUT_READ(fds));
}
-/* rsync @src @dst */
+/* rsync @url @path */
int
-rsync_download(struct cache_mapping *map)
+rsync_download(char const *url, char const *path)
{
char *args[32];
/* Descriptors to pipe stderr (first element) and stdout (second) */
int error;
/* Prepare everything for the child exec */
- prepare_rsync(args, map);
+ prepare_rsync(args, url, path);
- pr_val_info("rsync: %s -> %s", map->url, map->path);
+ pr_val_info("rsync: %s -> %s", url, path);
if (log_val_enabled(LOG_DEBUG)) {
pr_val_debug("Executing rsync:");
for (i = 0; args[i] != NULL; i++)
pr_val_debug(" %s", args[i]);
}
- error = mkdir_p(map->path, true);
+ error = mkdir_p(path, true);
if (error)
return error;
if (retries == config_get_rsync_retry_count()) {
if (retries > 0)
pr_val_warn("Max RSYNC retries (%u) reached on '%s', won't retry again.",
- retries, map->url);
+ retries, url);
return EIO;
}
pr_val_warn("Retrying RSYNC '%s' in %u seconds, %u attempts remaining.",
- map->url,
+ url,
config_get_rsync_retry_interval(),
config_get_rsync_retry_count() - retries);
retries++;
#ifndef SRC_RSYNC_RSYNC_H_
#define SRC_RSYNC_RSYNC_H_
-#include "types/map.h"
-
-int rsync_download(struct cache_mapping *);
+int rsync_download(char const *, char const *);
#endif /* SRC_RSYNC_RSYNC_H_ */
#include "state.h"
#include "alloc.h"
+#include "config.h"
#include "log.h"
#include "thread_var.h"
enomem_panic();
X509_VERIFY_PARAM_set_flags(params, X509_V_FLAG_CRL_CHECK);
+ if (config_get_validation_time() != 0)
+ X509_VERIFY_PARAM_set_time(params, config_get_validation_time());
X509_STORE_set1_param(result->x509_data.store, params);
X509_STORE_set_verify_cb(result->x509_data.store, cb);
* XXX I'm starting to use this more. Probably clean the slashes.
*/
char *
-join_paths(char const *path1, char const *path2)
+path_join(char const *path1, char const *path2)
{
size_t n;
char *result;
// XXX needed?
if (path1[0] == 0)
return pstrdup(path2);
- if (path2[0] == 0)
+ if (path2 == NULL || path2[0] == 0)
return pstrdup(path1);
n = strlen(path1) + strlen(path2) + 2;
char *path_parent(char const *);
char *path_childn(char const *, char const *, size_t);
char const *path_filename(char const *);
-char *join_paths(char const *, char const *);
+char *path_join(char const *, char const *);
#endif /* SRC_TYPES_PATH_H_ */
-#include "rpp.h"
+#include "types/rpp.h"
#include "types/array.h"
{
array_index i;
- sk_X509_pop_free(rpp->ancestors, X509_free);
-
for (i = 0; i < rpp->nfiles; i++)
map_cleanup(&rpp->files[i]);
free(rpp->files);
+ rpp->files = NULL;
+ rpp->nfiles = 0;
- if (rpp->crl.obj != NULL)
+ rpp->crl.map = NULL;
+ if (rpp->crl.obj != NULL) {
X509_CRL_free(rpp->crl.obj);
+ rpp->crl.obj = NULL;
+ }
}
#ifndef SRC_RPP_H_
#define SRC_RPP_H_
-// XXX move to types?
-
#include <openssl/x509.h>
#include "types/map.h"
/* Repository Publication Point */
struct rpp {
- STACK_OF(X509) *ancestors; /* 1st = root, last = parent */
-
struct cache_mapping *files;
size_t nfiles; /* Number of maps in @files */
check_PROGRAMS += base64.test
check_PROGRAMS += cachent.test
check_PROGRAMS += cache.test
+check_PROGRAMS += common.test
check_PROGRAMS += db_table.test
check_PROGRAMS += deltas_array.test
check_PROGRAMS += hash.test
-check_PROGRAMS += json_util.test
check_PROGRAMS += mft.test
check_PROGRAMS += path.test
check_PROGRAMS += pdu_handler.test
cache_test_SOURCES = cache_test.c
cache_test_LDADD = ${MY_LDADD} ${JANSSON_LIBS} ${XML2_LIBS}
+common_test_SOURCES = common_test.c
+common_test_LDADD = ${MY_LDADD}
+
db_table_test_SOURCES = rtr/db/db_table_test.c
db_table_test_LDADD = ${MY_LDADD}
hash_test_SOURCES = hash_test.c
hash_test_LDADD = ${MY_LDADD}
-json_util_test_SOURCES = json_util_test.c
-json_util_test_LDADD = ${MY_LDADD}
-
mft_test_SOURCES = object/manifest_test.c
mft_test_LDADD = ${MY_LDADD}
#include "base64.c"
#include "common.c"
#include "cache.c"
-#include "cachent.c"
#include "cachetmp.c"
-#include "cache_util.c"
#include "file.c"
#include "hash.c"
#include "json_util.c"
#include "rrdp_util.h"
#include "relax_ng.c"
#include "rrdp.c"
+#include "types/map.c"
#include "types/path.c"
#include "types/str.c"
#include "types/url.c"
}
int
-rsync_download(char const *src, char const *dst, char const *cmpdir)
+rsync_download(char const *url, char const *path)
{
rsync_counter++;
if (dl_error)
return dl_error;
- ck_assert_int_eq(0, mkdir_p(dst, true));
- touch_file(dst);
+ ck_assert_int_eq(0, mkdir_p(path, true));
+ touch_file(path);
return 0;
}
setup_test(void)
{
dl_error = 0;
- ck_assert_int_eq(0, system("rm -rf tmp/"));
- cache_prepare();
- ck_assert_int_eq(0, system("mkdir -p tmp/rsync tmp/https tmp/tmp"));
+ ck_assert_int_eq(0, system("rm -rf tmp"));
+ init_tables();
+ ck_assert_int_eq(0, system("mkdir -p tmp/rsync tmp/https tmp/rrdp tmp/fallback"));
}
-static int
-okay(struct cache_mapping const *map, void *arg)
-{
- return 0;
-}
-
-static void
-run_dl_rsync(char const *caRepository, int expected_error,
- unsigned int expected_calls)
+static struct cache_cage *
+run_dl_rsync(char *caRepository, unsigned int expected_calls)
{
- struct sia_uris sias;
-
- sias.caRepository = pstrdup(caRepository);
- sias.rpkiNotify = NULL;
- sias.rpkiManifest = NULL;
+ struct sia_uris sias = { .caRepository = caRepository };
+ struct cache_cage *cage;
rsync_counter = 0;
https_counter = 0;
printf("---- Downloading... ----\n");
- ck_assert_int_eq(expected_error, cache_download_alt(&sias, okay, NULL));
+ cage = cache_refresh_sias(&sias);
printf("---- Downloaded. ----\n");
ck_assert_uint_eq(expected_calls, rsync_counter);
ck_assert_uint_eq(0, https_counter);
- sias_cleanup(&sias);
+ return cage;
}
static void
-run_dl_https(char const *url, int expected_error, unsigned int expected_calls)
+run_dl_https(char const *url, unsigned int expected_calls,
+ char const *expected_result)
{
- struct strlist uris;
-
- strlist_init(&uris);
- strlist_add(&uris, pstrdup(url));
+ char const *result;
rsync_counter = 0;
https_counter = 0;
printf("---- Downloading... ----\n");
- ck_assert_int_eq(expected_error, cache_download_uri(&uris, okay, NULL));
+ result = cache_refresh_url(url);
printf("---- Downloaded. ----\n");
ck_assert_uint_eq(0, rsync_counter);
ck_assert_uint_eq(expected_calls, https_counter);
- strlist_cleanup(&uris);
+ ck_assert_str(expected_result, result);
+ ck_assert_str(NULL, cache_fallback_url(url));
+}
+
+
+static void
+ck_cage(struct cache_cage *cage, char const *url,
+ char const *refresh, char const *fallback)
+{
+ struct cache_node *bkp;
+
+ ck_assert_str(refresh, cage_map_file(cage, url));
+
+ bkp = cage->refresh;
+ cage_disable_refresh(cage);
+
+ ck_assert_str(fallback, cage_map_file(cage, url));
+
+ cage->refresh = bkp;
}
static int
}
static void
-run_cleanup(void)
+queue_commit(char const *caRepository, char const *path1, char const *path2)
{
- print_tree();
+ struct rpp rpp = { 0 };
+
+ rpp.nfiles = 2;
+ rpp.files = pzalloc(rpp.nfiles * sizeof(struct cache_mapping));
+ rpp.files[0].url = path_join(caRepository, "manifest.mft");
+ rpp.files[0].path = pstrdup(path1);
+ rpp.files[1].url = path_join(caRepository, "cert.cer");
+ rpp.files[1].path = pstrdup(path2);
- pr_op_debug("---- Cleaning up... ----");
- cleanup_cache();
- pr_op_debug("---- Cleant. ----");
+ cache_commit_rpp(caRepository, &rpp);
}
-static bool
-ck_path(struct cache_node *node)
+/* Only validates the first character of the file. */
+static void
+ck_file(char const *path, char const *expected)
{
- int error;
+ FILE *file;
+ char actual[2];
- if (!node->tmppath)
- return true;
+ file = fopen(path, "rb");
+ if (!file)
+ ck_abort_msg("fopen(%s): %s", path, strerror(errno));
+ ck_assert_int_eq(1, fread(actual, 1, 1, file));
+ fclose(file);
+ actual[1] = 0;
- error = file_exists(node->tmppath);
- if (error)
- ck_abort_msg("Missing file in cache: %s (%s)", node->tmppath,
- strerror(error));
+ ck_assert_str_eq(expected, actual);
+}
+
+static va_list fs_valist;
- return true;
+static int
+ck_filesystem_file(const char *fpath, const struct stat *sb, int typeflag,
+ struct FTW *ftwbuf)
+{
+ static va_list args;
+ char const *path;
+ bool found = false;
+
+ if ((sb->st_mode & S_IFMT) != S_IFREG)
+ return 0;
+
+ va_copy(args, fs_valist);
+ while ((path = va_arg(args, char const *)) != NULL)
+ if (strcmp(fpath, path) == 0) {
+ found = true;
+ break;
+ }
+ va_end(args);
+
+ if (!found)
+ ck_abort_msg("Unexpected file: %s", fpath);
+ return 0;
}
static void
-ck_cache(struct cache_node *rsync, struct cache_node *https)
+ck_filesystem(char const *root, ...)
{
- printf("---- Validating tree... ----\n");
+ char const *path;
+ int error;
- printf("Expected nodes:\n");
- cachent_print(rsync);
- cachent_print(https);
- printf("\n");
+ va_start(fs_valist, root);
+ while ((path = va_arg(fs_valist, char const *)) != NULL)
+ ck_file(path, va_arg(fs_valist, char const *));
+ va_end(fs_valist);
- print_tree();
+ va_start(fs_valist, root);
+ errno = 0;
+ error = nftw(root, ck_filesystem_file, 32, FTW_PHYS);
+ if (error)
+ ck_abort_msg("nftw: %d %d", error, errno);
+ va_end(fs_valist);
+}
- /* Compare expected and cache */
- // XXX fix
- PR_DEBUG_MSG("%s", ">> Comparing expected and cache...");
- cachent_traverse(rsync, ck_path);
- cachent_traverse(https, ck_path);
+static void
+init_node_rsync(struct cache_node *node, char *url, char *path,
+ int fresh, int dlerr)
+{
+ node->map.url = url;
+ node->map.path = path;
+ node->fresh = fresh;
+ node->dlerr = dlerr;
+ node->rrdp = NULL;
+}
- /* Compare expected and actual */
- PR_DEBUG_MSG("%s", ">> Comparing expected and actual...");
- ck_assert_cachent_eq(rsync, cache.rsync);
- ck_assert_cachent_eq(https, cache.https);
+static void
+init_node_https(struct cache_node *node, char *url, char *path,
+ int fresh, int dlerr)
+{
+ node->map.url = url;
+ node->map.path = path;
+ node->fresh = fresh;
+ node->dlerr = dlerr;
+ node->rrdp = NULL;
+}
- cachent_delete(rsync);
- cachent_delete(https);
+static void
+ck_cache_node_eq(struct cache_node *expected, struct cache_node *actual)
+{
+ ck_assert_str_eq(expected->map.url, actual->map.url);
+ ck_assert_str_eq(expected->map.path, actual->map.path);
+ ck_assert_int_eq(expected->fresh, actual->fresh);
+ ck_assert_int_eq(expected->dlerr, actual->dlerr);
+ if (expected->rrdp == NULL)
+ ck_assert_ptr_eq(expected->rrdp, actual->rrdp);
+ // XXX else
+}
- printf("---- Validated. ----\n");
+static void
+ck_cache(struct cache_node *expecteds, struct cache_table *tbl)
+{
+ struct cache_node *actual, *tmp;
+ unsigned int n;
+
+ for (n = 0; expecteds[n].map.url != NULL; n++)
+ ;
+ ck_assert_uint_eq(n, HASH_COUNT(tbl->nodes));
+
+ n = 0;
+ HASH_ITER(hh, tbl->nodes, actual, tmp) {
+ ck_cache_node_eq(&expecteds[n], actual);
+ n++;
+ }
}
static void
-ck_cache_rsync(struct cache_node *rsync)
+ck_cache_rsync(struct cache_node *expected)
{
- ck_cache(rsync, hnode(HE2UP, NULL));
+ ck_cache(expected, &cache.rsync);
}
static void
-ck_cache_https(struct cache_node *https)
+ck_cache_https(struct cache_node *expected)
{
- ck_cache(rnode(RE2UP, NULL), https);
+ ck_cache(expected, &cache.https);
}
static time_t
static time_t epoch;
-static bool
-unfreshen(struct cache_node *node)
+static void
+unfreshen(struct cache_table *tbl, struct cache_node *node)
{
- PR_DEBUG_MSG("Unfreshening %s.", node->url);
- node->flags &= ~(CNF_FRESH | CNF_VALID);
- node->mtim = epoch;
- return true;
+ node->fresh = 0;
}
static int
times[0].tv_nsec = 0;
times[1].tv_sec = epoch;
times[1].tv_nsec = 0;
- PR_DEBUG_MSG("changing times of %s", fpath);
ck_assert_int_eq(0, utimensat(AT_FDCWD, fpath, times, AT_SYMLINK_NOFOLLOW));
static void
new_iteration(bool outdate)
{
- pr_op_debug("--- Unfreshening... ---");
epoch = outdate ? get_days_ago(30) : get_days_ago(1);
- cachent_traverse(cache.rsync, unfreshen);
- cachent_traverse(cache.https, unfreshen);
+
+ pr_op_debug("--- Unfreshening... ---");
+ cache_foreach(unfreshen);
ck_assert_int_eq(0, nftw("tmp/rsync", nftw_unfreshen, 32, FTW_PHYS));
- ck_assert_int_eq(0, nftw("tmp/https", nftw_unfreshen, 32, FTW_PHYS));
pr_op_debug("---- Tree now stale. ----");
cache_print();
{
dl_error = 0;
cache_commit();
-// ck_assert_int_eq(0, system("rm -rf tmp/"));
}
/* Tests */
-static const int DOWNLOADED = CNF_RSYNC | CNF_CACHED | CNF_FRESH;
-static const int VALIDATED = RSYNC_INHERIT | CNF_VALID;
-static const int FULL = DOWNLOADED | VALIDATED;
-static const int STALE = CNF_RSYNC | CNF_CACHED;
-/* Intermediary between a downloaded and a validated node */
-static const int BRANCH = RSYNC_INHERIT;
-static const int FAILED = CNF_FRESH;
-
START_TEST(test_cache_download_rsync)
{
+ struct cache_node nodes[4] = { 0 };
+ struct cache_cage *cage;
+
setup_test();
printf("==== Startup ====\n");
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL), NULL), NULL));
+ cage = run_dl_rsync("rsync://a.b.c/d", 1);
+ ck_assert_ptr_ne(NULL, cage);
+ ck_cage(cage, "rsync://a.b.c/d", "tmp/rsync/0", NULL);
+ ck_cage(cage, "rsync://a.b.c/d/e/f.cer", "tmp/rsync/0/e/f.cer", NULL);
+ init_node_rsync(&nodes[0], "rsync://a.b.c/d", "tmp/rsync/0", 1, 0);
+ ck_cache_rsync(nodes);
+ free(cage);
printf("==== Redownload same file, nothing should happen ====\n");
- run_dl_rsync("rsync://a.b.c/d", 0, 0);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL), NULL), NULL));
+ cage = run_dl_rsync("rsync://a.b.c/d", 0);
+ ck_assert_ptr_ne(NULL, cage);
+ ck_cage(cage, "rsync://a.b.c/d", "tmp/rsync/0", NULL);
+ ck_cage(cage, "rsync://a.b.c/d/e/f.cer", "tmp/rsync/0/e/f.cer", NULL);
+ ck_cache_rsync(nodes);
+ free(cage);
/*
* rsyncs are recursive, which means if we've been recently asked to
* download d, we needn't bother redownloading d/e.
*/
printf("==== Don't redownload child ====\n");
- run_dl_rsync("rsync://a.b.c/d/e", 0, 0);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0",
- rfnode(RO2UP("a.b.c/d/e"), VALIDATED, NULL), NULL), NULL), NULL));
+ cage = run_dl_rsync("rsync://a.b.c/d/e", 0);
+ ck_assert_ptr_ne(NULL, cage);
+ ck_cage(cage, "rsync://a.b.c/d", "tmp/rsync/0", NULL);
+ ck_cage(cage, "rsync://a.b.c/d/e/f.cer", "tmp/rsync/0/e/f.cer", NULL);
+ ck_cache_rsync(nodes);
+ free(cage);
/*
* rsyncs get truncated, because it results in much faster
* and there would be consequences for violating it.
*/
printf("==== rsync truncated ====\n");
- run_dl_rsync("rsync://x.y.z/m/n/o", 0, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0",
- rfnode(RO2UP("a.b.c/d/e"), VALIDATED, NULL), NULL), NULL),
- rnode(RO2UP("x.y.z"),
- rftnode(RO2UP("x.y.z/m"), DOWNLOADED, "tmp/tmp/1",
- rfnode(RO2UP("x.y.z/m/n"), BRANCH,
- rfnode(RO2UP("x.y.z/m/n/o"), VALIDATED, NULL), NULL), NULL), NULL), NULL));
+ cage = run_dl_rsync("rsync://x.y.z/m/n/o", 1);
+ ck_assert_ptr_ne(NULL, cage);
+ ck_cage(cage, "rsync://x.y.z/m", "tmp/rsync/1", NULL);
+ ck_cage(cage, "rsync://x.y.z/m/n/o", "tmp/rsync/1/n/o", NULL);
+ init_node_rsync(&nodes[1], "rsync://x.y.z/m", "tmp/rsync/1", 1, 0);
+ ck_cache_rsync(nodes);
+ free(cage);
printf("==== Sibling ====\n");
- run_dl_rsync("rsync://a.b.c/e/f", 0, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0",
- rfnode(RO2UP("a.b.c/d/e"), VALIDATED, NULL), NULL),
- rftnode(RO2UP("a.b.c/e"), DOWNLOADED, "tmp/tmp/2",
- rfnode(RO2UP("a.b.c/e/f"), VALIDATED, NULL), NULL), NULL),
- rnode(RO2UP("x.y.z"),
- rftnode(RO2UP("x.y.z/m"), DOWNLOADED, "tmp/tmp/1",
- rfnode(RO2UP("x.y.z/m/n"), BRANCH,
- rfnode(RO2UP("x.y.z/m/n/o"), VALIDATED, NULL), NULL), NULL), NULL), NULL));
+ cage = run_dl_rsync("rsync://a.b.c/e/f", 1);
+ ck_assert_ptr_ne(NULL, cage);
+ ck_cage(cage, "rsync://a.b.c/e", "tmp/rsync/2", NULL);
+ ck_cage(cage, "rsync://a.b.c/e/f/x/y/z", "tmp/rsync/2/f/x/y/z", NULL);
+ init_node_rsync(&nodes[2], "rsync://a.b.c/e", "tmp/rsync/2", 1, 0);
+ ck_cache_rsync(nodes);
+ free(cage);
cleanup_test();
}
START_TEST(test_cache_download_rsync_error)
{
+ struct cache_node nodes[3] = { 0 };
+
setup_test();
+ init_node_rsync(&nodes[0], "rsync://a.b.c/d", "tmp/rsync/0", 1, 0);
+ init_node_rsync(&nodes[1], "rsync://a.b.c/e", "tmp/rsync/1", 1, EINVAL);
+
printf("==== Startup ====\n");
dl_error = 0;
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- dl_error = -EINVAL;
- run_dl_rsync("rsync://a.b.c/e", -EINVAL, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL),
- rfnode(RO2UP("a.b.c/e"), FAILED, NULL), NULL), NULL));
+ free(run_dl_rsync("rsync://a.b.c/d", 1));
+ dl_error = EINVAL;
+ ck_assert_ptr_eq(NULL, run_dl_rsync("rsync://a.b.c/e", 1));
+ ck_cache_rsync(nodes);
printf("==== Regardless of error, not reattempted because same iteration ====\n");
dl_error = EINVAL;
- run_dl_rsync("rsync://a.b.c/e", -EINVAL, 0);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL),
- rfnode(RO2UP("a.b.c/e"), FAILED, NULL), NULL), NULL));
-
+ ck_assert_ptr_eq(NULL, run_dl_rsync("rsync://a.b.c/e", 0));
+ ck_cache_rsync(nodes);
dl_error = 0;
- run_dl_rsync("rsync://a.b.c/e", -EINVAL, 0);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL),
- rfnode(RO2UP("a.b.c/e"), FAILED, NULL), NULL), NULL));
+ ck_assert_ptr_eq(NULL, run_dl_rsync("rsync://a.b.c/e", 0));
+ ck_cache_rsync(nodes);
cleanup_test();
}
END_TEST
-START_TEST(test_cache_cleanup_rsync)
+START_TEST(test_rsync_commit)
{
+ unsigned int i;
+
setup_test();
- printf("==== First iteration: Tree is created. No prunes, because nothing's outdated ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), FULL, NULL),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
-
- printf("==== One iteration with no changes, for paranoia ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), FULL, NULL),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
-
- printf("==== Add one sibling ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_dl_rsync("rsync://a.b.c/f", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), FULL, NULL),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL),
- rfnode(RO2UP("a.b.c/f"), FULL, NULL), NULL), NULL));
-
- printf("==== Nodes don't get updated, but they're still too young ====\n");
- new_iteration(false);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), STALE, NULL),
- rfnode(RO2UP("a.b.c/e"), STALE, NULL),
- rfnode(RO2UP("a.b.c/f"), STALE, NULL), NULL), NULL));
-
- printf("==== Remove some branches ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), FULL, NULL), NULL), NULL));
-
- printf("==== Remove old branch and add sibling at the same time ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
-
- printf("==== Try child ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/e/f/g", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
-
- printf("==== Parent again ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
-
- printf("==== Empty the tree ====\n");
- new_iteration(true);
- run_cleanup();
- ck_cache_rsync(rnode(RE2UP, NULL));
-
-
- printf("==== Node exists, but file doesn't ====\n");
- new_iteration(true);
- run_dl_rsync("rsync://a.b.c/e", 0, 1);
- run_dl_rsync("rsync://a.b.c/f", 0, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/e"), FULL, "tmp/tmp/B", NULL),
- rftnode(RO2UP("a.b.c/f"), FULL, "tmp/tmp/C", NULL), NULL), NULL));
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL),
- rfnode(RO2UP("a.b.c/f"), FULL, NULL), NULL), NULL));
- ck_assert_int_eq(0, file_rm_rf("tmp/rsync/a.b.c/f"));
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/e"), FULL, NULL), NULL), NULL));
+ ck_assert_int_eq(0, system("mkdir -p tmp/rsync/0 tmp/rsync/1 tmp/rsync/2 tmp/rsync/3"));
+
+ /* RPP0: Will remain constant */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/0/0", "A"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/0/1", "B"));
+ /* RPP1: Will be added in its second cycle */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/1/0", "C"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/1/1", "D"));
+ /* RPP2: Will be removed in its second cycle */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/2/0", "E"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/2/1", "F"));
+ /* RPP3: Will be updated in its second cycle */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/3/0", "G")); /* Keeper */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/3/1", "H")); /* Added */
+ ck_assert_int_eq(0, write_simple_file("tmp/rsync/3/2", "I")); /* Removed */
+
+ /* Commit 1: Empty -> Empty */
+ /* Commit 2: Empty -> Empty (just free noise) */
+ for (i = 0; i < 2; i++) {
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
+
+ new_iteration(false);
+ }
- cleanup_test();
-}
-END_TEST
+ /* Commit 3: Empty -> Populated */
+ queue_commit("rsync://domain/mod/rpp0", "tmp/rsync/0/0", "tmp/rsync/0/1");
+ queue_commit("rsync://domain/mod/rpp2", "tmp/rsync/2/0", "tmp/rsync/2/1");
+ queue_commit("rsync://domain/mod/rpp3", "tmp/rsync/3/0", "tmp/rsync/3/2");
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ /* RPP0 */ "tmp/fallback/0/0", "A", "tmp/fallback/0/1", "B",
+ /* RPP2 */ "tmp/fallback/1/0", "E", "tmp/fallback/1/1", "F",
+ /* RPP3 */ "tmp/fallback/2/0", "G", "tmp/fallback/2/1", "I",
+ NULL);
-START_TEST(test_cache_cleanup_rsync_error)
-{
- setup_test();
+ new_iteration(false);
+
+ /* Commit 4: Populated -> Populated */
+ /* XXX check the refresh does, in fact, only return fallbacks when the RPP doesn't change */
+ queue_commit("rsync://domain/mod/rpp0", "tmp/fallback/0/0", "tmp/fallback/0/1");
+ queue_commit("rsync://domain/mod/rpp1", "tmp/rsync/1/0", "tmp/rsync/1/1");
+ queue_commit("rsync://domain/mod/rpp3", "tmp/fallback/2/0", "tmp/rsync/3/1");
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ /* RPP0 */ "tmp/fallback/0/0", "A", "tmp/fallback/0/1", "B",
+ /* RPP3 */ "tmp/fallback/2/0", "G", "tmp/fallback/2/2", "H",
+ /* RPP1 */ "tmp/fallback/3/0", "C", "tmp/fallback/3/1", "D",
+ NULL);
- printf("==== Set up ====\n");
- dl_error = 0;
- run_dl_rsync("rsync://a.b.c/d", 0, 1);
- dl_error = -EINVAL;
- run_dl_rsync("rsync://a.b.c/e", -EINVAL, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rftnode(RO2UP("a.b.c/d"), FULL, "tmp/tmp/0", NULL),
- rfnode(RO2UP("a.b.c/e"), FAILED, NULL), NULL), NULL));
-
- printf("==== Node deleted because file doesn't exist ====\n");
- run_cleanup();
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), FULL, NULL), NULL), NULL));
-
- printf("==== Node and file preserved because young ====\n");
- /* (Deletion does not depend on success or failure.) */
new_iteration(false);
- dl_error = -EINVAL;
- run_dl_rsync("rsync://a.b.c/d", -EINVAL, 1);
- ck_cache_rsync(
- rnode(RE2UP,
- rnode(RO2UP("a.b.c"),
- rfnode(RO2UP("a.b.c/d"), DOWNLOADED, NULL), NULL), NULL));
- printf("==== Error node deleted because old ====\n");
- new_iteration(true);
- run_cleanup();
- ck_cache_rsync(rnode(RE2UP, NULL));
+ /* Commit 5: Populated -> Empty */
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
- cleanup_test();
+ cache_foreach(delete_node);
}
END_TEST
-/* XXX ================================================================ */
-
-static const int HDOWNLOADED = CNF_CACHED | CNF_FRESH;
-static const int HVALIDATED = CNF_CACHED | CNF_VALID;
-static const int HFULL = HDOWNLOADED | HVALIDATED;
-static const int HFAILED = CNF_FRESH;
-
START_TEST(test_cache_download_https)
{
+ struct cache_node nodes[4] = { 0 };
+
setup_test();
- printf("==== Download *file* e ====\n");
- run_dl_https("https://a.b.c/d/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hnode(HO2UP("a.b.c/d"),
- hftnode(HO2UP("a.b.c/d/e"), HFULL, "tmp/tmp/0", NULL), NULL), NULL), NULL));
+ printf("==== Download file ====\n");
+ run_dl_https("https://a.b.c/d/e", 1, "tmp/https/0");
+ init_node_https(&nodes[0], "https://a.b.c/d/e", "tmp/https/0", 1, 0);
+ ck_cache_https(nodes);
+
+ printf("==== Download same file ====\n");
+ run_dl_https("https://a.b.c/d/e", 0, "tmp/https/0");
+ ck_cache_https(nodes);
printf("==== Download something else 1 ====\n");
- run_dl_https("https://a.b.c/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hnode(HO2UP("a.b.c/d"),
- hftnode(HO2UP("a.b.c/d/e"), HFULL, "tmp/tmp/0", NULL), NULL),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/1", NULL), NULL), NULL));
+ run_dl_https("https://a.b.c/e", 1, "tmp/https/1");
+ init_node_https(&nodes[1], "https://a.b.c/e", "tmp/https/1", 1, 0);
+ ck_cache_https(nodes);
printf("==== Download something else 2 ====\n");
- run_dl_https("https://x.y.z/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hnode(HO2UP("a.b.c/d"),
- hftnode(HO2UP("a.b.c/d/e"), HFULL, "tmp/tmp/0", NULL), NULL),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/1", NULL), NULL),
- hnode(HO2UP("x.y.z"),
- hftnode(HO2UP("x.y.z/e"), HFULL, "tmp/tmp/2", NULL), NULL), NULL));
+ run_dl_https("https://x.y.z/e", 1, "tmp/https/2");
+ init_node_https(&nodes[2], "https://x.y.z/e", "tmp/https/2", 1, 0);
+ ck_cache_https(nodes);
cleanup_test();
}
START_TEST(test_cache_download_https_error)
{
+ struct cache_node nodes[3] = { 0 };
+
setup_test();
+ init_node_https(&nodes[0], "https://a.b.c/d", "tmp/https/0", 1, 0);
+ init_node_https(&nodes[1], "https://a.b.c/e", "tmp/https/1", 1, EINVAL);
+
printf("==== Startup ====\n");
dl_error = 0;
- run_dl_https("https://a.b.c/d", 0, 1);
- dl_error = -EINVAL;
- run_dl_https("https://a.b.c/e", -EINVAL, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/d"), HFULL, "tmp/tmp/0", NULL),
- hftnode(HO2UP("a.b.c/e"), HFAILED, NULL, NULL), NULL), NULL));
+ run_dl_https("https://a.b.c/d", 1, "tmp/https/0");
+ dl_error = EINVAL;
+ run_dl_https("https://a.b.c/e", 1, NULL);
+ ck_cache_https(nodes);
printf("==== Regardless of error, not reattempted because same iteration ====\n");
dl_error = -EINVAL;
- run_dl_https("https://a.b.c/d", 0, 0);
+ run_dl_https("https://a.b.c/d", 0, "tmp/https/0");
+ run_dl_https("https://a.b.c/e", 0, NULL);
dl_error = 0;
- run_dl_https("https://a.b.c/e", -EINVAL, 0);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/d"), HFULL, "tmp/tmp/0", NULL),
- hftnode(HO2UP("a.b.c/e"), HFAILED, NULL, NULL), NULL), NULL));
+ run_dl_https("https://a.b.c/d", 0, "tmp/https/0");
+ run_dl_https("https://a.b.c/e", 0, NULL);
+ ck_cache_https(nodes);
cleanup_test();
}
END_TEST
-// XXX not testing alts so far
-
-START_TEST(test_cache_cleanup_https)
+/* See comments at test_rsync_commit(). */
+START_TEST(test_https_commit)
{
- setup_test();
+ struct cache_mapping map;
+ unsigned int i;
- printf("==== First iteration; make a tree and clean it ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/d", 0, 1);
- run_dl_https("https://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/d"), HFULL, NULL),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL), NULL), NULL));
-
- printf("==== Remove one branch ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/d", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/d"), HFULL, NULL), NULL), NULL));
+ setup_test();
- printf("==== Change the one branch ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL), NULL), NULL));
+ ck_assert_int_eq(0, write_simple_file("tmp/https/50", "A")); /* Keeper */
+ ck_assert_int_eq(0, write_simple_file("tmp/https/51", "B")); /* Added */
+ ck_assert_int_eq(0, write_simple_file("tmp/https/52", "C")); /* Removed */
- printf("==== Add a child to the same branch, do not update the old one ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e/f/g", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hnode(HO2UP("a.b.c/e"),
- hnode(HO2UP("a.b.c/e/f"),
- hfnode(HO2UP("a.b.c/e/f/g"), HFULL, NULL), NULL), NULL), NULL), NULL));
-
- printf("==== Download parent, do not update child ====\n");
- /* (Children need to die, because parent is now a file) */
- new_iteration(true);
- run_dl_https("https://a.b.c/e/f", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hnode(HO2UP("a.b.c/e"),
- hfnode(HO2UP("a.b.c/e/f"), HFULL, NULL), NULL), NULL), NULL));
+ /* 1, 2 */
+ for (i = 0; i < 2; i++) {
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
- printf("==== Do it again ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e", 0, 1);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL), NULL), NULL));
+ new_iteration(false);
+ }
+ /* 3 */
+ map.url = "https://domain/rpki/ta50.cer";
+ map.path = "tmp/https/50";
+ cache_commit_file(&map);
+ map.url = "https://domain/rpki/ta52.cer";
+ map.path = "tmp/https/52";
+ cache_commit_file(&map);
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ "tmp/fallback/0", "A",
+ "tmp/fallback/1", "C",
+ NULL);
- printf("==== Empty the tree ====\n");
- new_iteration(true);
- run_cleanup();
- ck_cache_https(hnode(HE2UP, NULL));
+ new_iteration(false);
- printf("==== Node exists, but file doesn't ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e", 0, 1);
- run_dl_https("https://a.b.c/f/g/h", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/7", NULL),
- hnode(HO2UP("a.b.c/f"),
- hnode(HO2UP("a.b.c/f/g"),
- hftnode(HO2UP("a.b.c/f/g/h"), HFULL, "tmp/tmp/8", NULL), NULL), NULL), NULL), NULL));
- run_cleanup(); /* Move from tmp/tmp to tmp/https */
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL),
- hnode(HO2UP("a.b.c/f"),
- hnode(HO2UP("a.b.c/f/g"),
- hfnode(HO2UP("a.b.c/f/g/h"), HFULL, NULL), NULL), NULL), NULL), NULL));
- ck_assert_int_eq(0, file_rm_rf("tmp/https/a.b.c/f/g/h"));
- run_cleanup(); /* Actual test */
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL), NULL), NULL));
+ /* 4 */
+ map.url = "https://domain/rpki/ta50.cer";
+ map.path = "tmp/fallback/0";
+ cache_commit_file(&map);
+ map.url = "https://domain/rpki/ta51.cer";
+ map.path = "tmp/https/51";
+ cache_commit_file(&map);
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ "tmp/fallback/0", "A",
+ "tmp/fallback/2", "B",
+ NULL);
- printf("==== Temporal version disappears before we get a commit ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/9", NULL), NULL), NULL));
- ck_assert_int_eq(0, file_rm_rf("tmp/tmp/9"));
- run_cleanup();
- ck_cache_https(hnode(HE2UP, NULL));
-
- printf("==== Temporal version disappears after we get a commit ====\n");
- new_iteration(true);
- run_dl_https("https://a.b.c/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/A", NULL), NULL), NULL));
- run_cleanup(); /* Commit */
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL, NULL), NULL), NULL));
new_iteration(false);
- run_dl_https("https://a.b.c/e", 0, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/e"), HFULL, "tmp/tmp/B", NULL), NULL), NULL));
- ck_assert_int_eq(0, file_rm_rf("tmp/tmp/B"));
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/e"), HFULL, NULL), NULL), NULL));
- cleanup_test();
+ /* 5 */
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
+
+ cache_foreach(delete_node);
}
END_TEST
-START_TEST(test_cache_cleanup_https_error)
+/* See comments at test_rsync_commit(). */
+START_TEST(test_rrdp_commit)
{
+ unsigned int i;
+
setup_test();
- printf("==== Set up ====\n");
- dl_error = 0;
- run_dl_https("https://a.b.c/d", 0, 1);
- dl_error = -EINVAL;
- run_dl_https("https://a.b.c/e", -EINVAL, 1);
- PR_DEBUG;
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hftnode(HO2UP("a.b.c/d"), HFULL, "tmp/tmp/0", NULL),
- hfnode(HO2UP("a.b.c/e"), HFAILED, NULL), NULL), NULL));
+ ck_assert_int_eq(0, system("mkdir -p tmp/rrdp/0 tmp/rrdp/1 tmp/rrdp/2 tmp/rrdp/3"));
- printf("==== Deleted because file ENOENT ====\n");
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/d"), HFULL, NULL), NULL), NULL));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/0/0", "A"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/0/1", "B"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/1/0", "C"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/1/1", "D"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/2/0", "E"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/2/1", "F"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/3/0", "G"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/3/1", "H"));
+ ck_assert_int_eq(0, write_simple_file("tmp/rrdp/3/2", "I"));
+
+ /* 1, 2 */
+ for (i = 0; i < 2; i++) {
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
+
+ new_iteration(false);
+ }
+
+ /* 3 */
+ queue_commit("rsync://domain/mod/rpp0", "tmp/rrdp/0/0", "tmp/rrdp/0/1");
+ queue_commit("rsync://domain/mod/rpp2", "tmp/rrdp/2/0", "tmp/rrdp/2/1");
+ queue_commit("rsync://domain/mod/rpp3", "tmp/rrdp/3/0", "tmp/rrdp/3/2");
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ "tmp/fallback/0/0", "A", "tmp/fallback/0/1", "B",
+ "tmp/fallback/1/0", "E", "tmp/fallback/1/1", "F",
+ "tmp/fallback/2/0", "G", "tmp/fallback/2/1", "I",
+ NULL);
- printf("==== Fail d ====\n");
new_iteration(false);
- dl_error = -EINVAL;
- run_dl_https("https://a.b.c/d", -EINVAL, 1);
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/d"), CNF_CACHED | CNF_FRESH, NULL), NULL), NULL));
- printf("==== Not deleted, because not old ====\n");
+ /* 4 */
+ queue_commit("rsync://domain/mod/rpp0", "tmp/fallback/0/0", "tmp/fallback/0/1");
+ queue_commit("rsync://domain/mod/rpp1", "tmp/rrdp/1/0", "tmp/rrdp/1/1");
+ queue_commit("rsync://domain/mod/rpp3", "tmp/fallback/2/0", "tmp/rrdp/3/1");
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback",
+ "tmp/fallback/0/0", "A", "tmp/fallback/0/1", "B",
+ "tmp/fallback/2/0", "G", "tmp/fallback/2/2", "H",
+ "tmp/fallback/3/0", "C", "tmp/fallback/3/1", "D",
+ NULL);
+
new_iteration(false);
- run_cleanup();
- ck_cache_https(
- hnode(HE2UP,
- hnode(HO2UP("a.b.c"),
- hfnode(HO2UP("a.b.c/d"), CNF_CACHED, NULL), NULL), NULL));
- printf("==== Become old ====\n");
- new_iteration(true);
- run_cleanup();
- ck_cache_https(hnode(HE2UP, NULL));
+ /* 5 */
+ commit_fallbacks();
+ ck_filesystem("tmp/fallback", NULL);
- cleanup_test();
+ cache_foreach(delete_node);
}
END_TEST
cleanup_test();
}
END_TEST
-//
-//START_TEST(test_tal_json)
-//{
-// json_t *json;
-// char *str;
-//
-// setup_test();
-//
-// ck_assert_int_eq(0, system("rm -rf tmp/"));
-// ck_assert_int_eq(0, system("mkdir -p tmp"));
-//
-// add_node(cache, NODE("rsync://a.b.c/d", 0, 1, 0));
-// add_node(cache, NODE("rsync://a.b.c/e", 1, 0, 0));
-// add_node(cache, NODE("rsync://x.y.z/e", 0, 1, 0));
-// add_node(cache, NODE("https://a/b", 1, 1, 0));
-// add_node(cache, node("https://a/c", 0, 0, 1, 0, 1));
-//
-// json = build_tal_json(cache);
-// ck_assert_int_eq(0, json_dump_file(json, "tmp/" TAL_METAFILE, JSON_COMPACT));
-//
-// str = json_dumps(json, /* JSON_INDENT(4) */ JSON_COMPACT);
-// json_decref(json);
-//
-// ck_assert_str_eq(
-// "[{\"type\":\"RPP\",\"url\":\"rsync://a.b.c/d\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
-// "{\"type\":\"RPP\",\"url\":\"rsync://a.b.c/e\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":1},"
-// "{\"type\":\"RPP\",\"url\":\"rsync://x.y.z/e\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
-// "{\"type\":\"TA (HTTP)\",\"url\":\"https://a/b\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":1,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
-// "{\"type\":\"RRDP Notification\",\"url\":\"https://a/c\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"}]",
-// str);
-// free(str);
-//
-// cache_reset(cache);
-//
-// load_tal_json(cache);
-// ck_assert_ptr_ne(NULL, cache->ht);
-//
-// ck_cache(
-// NODE("rsync://a.b.c/d", 0, 1, 0),
-// NODE("rsync://a.b.c/e", 1, 0, 0),
-// NODE("rsync://x.y.z/e", 0, 1, 0),
-// NODE("https://a/b", 1, 1, 0),
-// NODE("https://a/c", 0, 1, 0),
-// NULL);
-//
-// cleanup_test();
-//}
-//END_TEST
-//
-//static void
-//prepare_map_list(struct map_list *maps, ...)
-//{
-// char const *str;
-// enum map_type type;
-// struct cache_mapping *map;
-// va_list args;
-//
-// maps_init(maps);
-//
-// va_start(args, maps);
-// while ((str = va_arg(args, char const *)) != NULL) {
-// if (url_is_https(str))
-// type = MAP_HTTP;
-// else if (url_is_rsync(str))
-// type = MAP_RSYNC;
-// else
-// ck_abort_msg("Bad protocol: %s", str);
-// ck_assert_int_eq(0, map_create(&map, type, str));
-// maps_add(maps, map);
-// }
-// va_end(args);
-//}
-
-//#define PREPARE_MAP_LIST(maps, ...) prepare_map_list(maps, ##__VA_ARGS__, NULL)
-//
-//START_TEST(test_recover)
-//{
-// struct map_list maps;
-//
-// setup_test();
-//
-// /* Query on empty database */
-// PREPARE_MAP_LIST(&maps, "rsync://a.b.c/d", "https://a.b.c/d");
-// ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Only first URI is cached */
-// cache_reset(cache);
-// run_cache_download("rsync://a/b/c", 0, 1, 0);
-//
-// PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
-// ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Only second URI is cached */
-// cache_reset(cache);
-// run_cache_download("https://d/e", 0, 0, 1);
-//
-// PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
-// ck_assert_ptr_eq(maps.array[1], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Only third URI is cached */
-// cache_reset(cache);
-// run_cache_download("https://f", 0, 0, 1);
-//
-// PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
-// ck_assert_ptr_eq(maps.array[2], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* None was cached */
-// cache_reset(cache);
-// run_cache_download("rsync://d/e", 0, 1, 0);
-//
-// PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
-// ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /*
-// * At present, cache_recover() can only be called after all of a
-// * download's URLs yielded failure.
-// * However, node.error can still be zero. This happens when the download
-// * was successful, but the RRDP code wasn't able to expand the snapshot
-// * or deltas.
-// */
-// cache_reset(cache);
-//
-// add_node(cache, node("rsync/a/1", 100, 0, 1, 100, 0));
-// add_node(cache, node("rsync/a/2", 100, 1, 1, 100, 0));
-// add_node(cache, node("rsync/a/3", 200, 0, 1, 100, 0));
-// add_node(cache, node("rsync/a/4", 200, 1, 1, 100, 0));
-// add_node(cache, node("rsync/a/5", 100, 0, 1, 200, 0));
-// add_node(cache, node("rsync/a/6", 100, 1, 1, 200, 0));
-// add_node(cache, node("rsync/b/1", 100, 0, 0, 100, 0));
-// add_node(cache, node("rsync/b/2", 100, 1, 0, 100, 0));
-// add_node(cache, node("rsync/b/3", 200, 0, 0, 100, 0));
-// add_node(cache, node("rsync/b/4", 200, 1, 0, 100, 0));
-// add_node(cache, node("rsync/b/5", 100, 0, 0, 200, 0));
-// add_node(cache, node("rsync/b/6", 100, 1, 0, 200, 0));
-//
-// /* Multiple successful caches: Prioritize the most recent one */
-// PREPARE_MAP_LIST(&maps, "rsync://a/1", "rsync://a/3", "rsync://a/5");
-// ck_assert_ptr_eq(maps.array[2], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// PREPARE_MAP_LIST(&maps, "rsync://a/5", "rsync://a/1", "rsync://a/3");
-// ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* No successful caches: No viable candidates */
-// PREPARE_MAP_LIST(&maps, "rsync://b/2", "rsync://b/4", "rsync://b/6");
-// ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Status: CNF_SUCCESS is better than 0. */
-// PREPARE_MAP_LIST(&maps, "rsync://b/1", "rsync://a/1");
-// ck_assert_ptr_eq(maps.array[1], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /*
-// * If CNF_SUCCESS && error, Fort will probably run into a problem
-// * reading the cached directory, because it's either outdated or
-// * recently corrupted.
-// * But it should still TRY to read it, as there's a chance the
-// * outdatedness is not that severe.
-// */
-// PREPARE_MAP_LIST(&maps, "rsync://a/2", "rsync://b/2");
-// ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Parents of downloaded nodes */
-// PREPARE_MAP_LIST(&maps, "rsync://a", "rsync://b");
-// ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// /* Try them all at the same time */
-// PREPARE_MAP_LIST(&maps,
-// "rsync://a", "rsync://a/1", "rsync://a/2", "rsync://a/3",
-// "rsync://a/4", "rsync://a/5", "rsync://a/6",
-// "rsync://b", "rsync://b/1", "rsync://b/2", "rsync://b/3",
-// "rsync://b/4", "rsync://b/5", "rsync://b/6",
-// "rsync://e/1");
-// ck_assert_ptr_eq(maps.array[5], cache_recover(cache, &maps));
-// maps_cleanup(&maps);
-//
-// cleanup_test();
-//}
-//END_TEST
+
+START_TEST(test_tal_json)
+{
+ json_t *json;
+ char *str;
+
+ setup_test();
+
+ ck_assert_int_eq(0, system("rm -rf tmp/"));
+ ck_assert_int_eq(0, system("mkdir -p tmp"));
+
+ add_node(cache, NODE("rsync://a.b.c/d", 0, 1, 0));
+ add_node(cache, NODE("rsync://a.b.c/e", 1, 0, 0));
+ add_node(cache, NODE("rsync://x.y.z/e", 0, 1, 0));
+ add_node(cache, NODE("https://a/b", 1, 1, 0));
+ add_node(cache, node("https://a/c", 0, 0, 1, 0, 1));
+
+ json = build_tal_json(cache);
+ ck_assert_int_eq(0, json_dump_file(json, "tmp/" TAL_METAFILE, JSON_COMPACT));
+
+ str = json_dumps(json, /* JSON_INDENT(4) */ JSON_COMPACT);
+ json_decref(json);
+
+ ck_assert_str_eq(
+ "[{\"type\":\"RPP\",\"url\":\"rsync://a.b.c/d\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
+ "{\"type\":\"RPP\",\"url\":\"rsync://a.b.c/e\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":1},"
+ "{\"type\":\"RPP\",\"url\":\"rsync://x.y.z/e\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
+ "{\"type\":\"TA (HTTP)\",\"url\":\"https://a/b\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":1,\"success-timestamp\":\"1970-01-01T00:00:00Z\"},"
+ "{\"type\":\"RRDP Notification\",\"url\":\"https://a/c\",\"attempt-timestamp\":\"1970-01-01T00:00:00Z\",\"attempt-result\":0,\"success-timestamp\":\"1970-01-01T00:00:00Z\"}]",
+ str);
+ free(str);
+
+ cache_reset(cache);
+
+ load_tal_json(cache);
+ ck_assert_ptr_ne(NULL, cache->ht);
+
+ ck_cache(
+ NODE("rsync://a.b.c/d", 0, 1, 0),
+ NODE("rsync://a.b.c/e", 1, 0, 0),
+ NODE("rsync://x.y.z/e", 0, 1, 0),
+ NODE("https://a/b", 1, 1, 0),
+ NODE("https://a/c", 0, 1, 0),
+ NULL);
+
+ cleanup_test();
+}
+END_TEST
+
+static void
+prepare_map_list(struct map_list *maps, ...)
+{
+ char const *str;
+ enum map_type type;
+ struct cache_mapping *map;
+ va_list args;
+
+ maps_init(maps);
+
+ va_start(args, maps);
+ while ((str = va_arg(args, char const *)) != NULL) {
+ if (url_is_https(str))
+ type = MAP_HTTP;
+ else if (url_is_rsync(str))
+ type = MAP_RSYNC;
+ else
+ ck_abort_msg("Bad protocol: %s", str);
+ ck_assert_int_eq(0, map_create(&map, type, str));
+ maps_add(maps, map);
+ }
+ va_end(args);
+}
+
+#define PREPARE_MAP_LIST(maps, ...) prepare_map_list(maps, ##__VA_ARGS__, NULL)
+
+START_TEST(test_recover)
+{
+ struct map_list maps;
+
+ setup_test();
+
+ /* Query on empty database */
+ PREPARE_MAP_LIST(&maps, "rsync://a.b.c/d", "https://a.b.c/d");
+ ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Only first URI is cached */
+ cache_reset(cache);
+ run_cache_download("rsync://a/b/c", 0, 1, 0);
+
+ PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
+ ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Only second URI is cached */
+ cache_reset(cache);
+ run_cache_download("https://d/e", 0, 0, 1);
+
+ PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
+ ck_assert_ptr_eq(maps.array[1], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Only third URI is cached */
+ cache_reset(cache);
+ run_cache_download("https://f", 0, 0, 1);
+
+ PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
+ ck_assert_ptr_eq(maps.array[2], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* None was cached */
+ cache_reset(cache);
+ run_cache_download("rsync://d/e", 0, 1, 0);
+
+ PREPARE_MAP_LIST(&maps, "rsync://a/b/c", "https://d/e", "https://f");
+ ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /*
+ * At present, cache_recover() can only be called after all of a
+ * download's URLs yielded failure.
+ * However, node.error can still be zero. This happens when the download
+ * was successful, but the RRDP code wasn't able to expand the snapshot
+ * or deltas.
+ */
+ cache_reset(cache);
+
+ add_node(cache, node("rsync/a/1", 100, 0, 1, 100, 0));
+ add_node(cache, node("rsync/a/2", 100, 1, 1, 100, 0));
+ add_node(cache, node("rsync/a/3", 200, 0, 1, 100, 0));
+ add_node(cache, node("rsync/a/4", 200, 1, 1, 100, 0));
+ add_node(cache, node("rsync/a/5", 100, 0, 1, 200, 0));
+ add_node(cache, node("rsync/a/6", 100, 1, 1, 200, 0));
+ add_node(cache, node("rsync/b/1", 100, 0, 0, 100, 0));
+ add_node(cache, node("rsync/b/2", 100, 1, 0, 100, 0));
+ add_node(cache, node("rsync/b/3", 200, 0, 0, 100, 0));
+ add_node(cache, node("rsync/b/4", 200, 1, 0, 100, 0));
+ add_node(cache, node("rsync/b/5", 100, 0, 0, 200, 0));
+ add_node(cache, node("rsync/b/6", 100, 1, 0, 200, 0));
+
+ /* Multiple successful caches: Prioritize the most recent one */
+ PREPARE_MAP_LIST(&maps, "rsync://a/1", "rsync://a/3", "rsync://a/5");
+ ck_assert_ptr_eq(maps.array[2], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ PREPARE_MAP_LIST(&maps, "rsync://a/5", "rsync://a/1", "rsync://a/3");
+ ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* No successful caches: No viable candidates */
+ PREPARE_MAP_LIST(&maps, "rsync://b/2", "rsync://b/4", "rsync://b/6");
+ ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Status: CNF_SUCCESS is better than 0. */
+ PREPARE_MAP_LIST(&maps, "rsync://b/1", "rsync://a/1");
+ ck_assert_ptr_eq(maps.array[1], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /*
+ * If CNF_SUCCESS && error, Fort will probably run into a problem
+ * reading the cached directory, because it's either outdated or
+ * recently corrupted.
+ * But it should still TRY to read it, as there's a chance the
+ * outdatedness is not that severe.
+ */
+ PREPARE_MAP_LIST(&maps, "rsync://a/2", "rsync://b/2");
+ ck_assert_ptr_eq(maps.array[0], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Parents of downloaded nodes */
+ PREPARE_MAP_LIST(&maps, "rsync://a", "rsync://b");
+ ck_assert_ptr_eq(NULL, cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ /* Try them all at the same time */
+ PREPARE_MAP_LIST(&maps,
+ "rsync://a", "rsync://a/1", "rsync://a/2", "rsync://a/3",
+ "rsync://a/4", "rsync://a/5", "rsync://a/6",
+ "rsync://b", "rsync://b/1", "rsync://b/2", "rsync://b/3",
+ "rsync://b/4", "rsync://b/5", "rsync://b/6",
+ "rsync://e/1");
+ ck_assert_ptr_eq(maps.array[5], cache_recover(cache, &maps));
+ maps_cleanup(&maps);
+
+ cleanup_test();
+}
+END_TEST
/* Boilerplate */
static Suite *thread_pool_suite(void)
{
Suite *suite;
- TCase *rsync, *https, *mix, *dot, *meta, *recover;
+ TCase *rsync, *https, *rrdp, *mix, *dot, *meta, *recover;
rsync = tcase_create("rsync");
tcase_add_test(rsync, test_cache_download_rsync);
tcase_add_test(rsync, test_cache_download_rsync_error);
- tcase_add_test(rsync, test_cache_cleanup_rsync);
- tcase_add_test(rsync, test_cache_cleanup_rsync_error);
+ tcase_add_test(rsync, test_rsync_commit);
https = tcase_create("https");
tcase_add_test(https, test_cache_download_https);
tcase_add_test(https, test_cache_download_https_error);
- tcase_add_test(https, test_cache_cleanup_https);
- tcase_add_test(https, test_cache_cleanup_https_error);
+ tcase_add_test(https, test_https_commit);
+
+ rrdp = tcase_create("rrdp");
+ tcase_add_test(rrdp, test_rrdp_commit);
mix = tcase_create("mix");
tcase_add_test(https, test_collisions);
tcase_add_test(dot, test_dots);
meta = tcase_create(TAL_METAFILE);
-// tcase_add_test(meta, test_tal_json);
+ tcase_add_test(meta, test_tal_json);
recover = tcase_create("recover");
-// tcase_add_test(recover, test_recover);
+ tcase_add_test(recover, test_recover);
suite = suite_create("local-cache");
suite_add_tcase(suite, rsync);
suite_add_tcase(suite, https);
+ suite_add_tcase(suite, rrdp);
+ suite_add_tcase(suite, mix);
suite_add_tcase(suite, dot);
suite_add_tcase(suite, meta);
suite_add_tcase(suite, recover);
#include "cache_util.h"
+/* XXX Might wanna delete this */
+
#include <check.h>
#include <string.h>
#include "types/uthash.h"
ck_assert_str_eq(expected->path, actual->path);
ck_assert_str_eq(expected->name, actual->name);
ck_assert_int_eq(expected->flags, actual->flags);
- if (expected->tmppath)
- ck_assert_str_eq(expected->tmppath, actual->tmppath);
- else
- ck_assert_ptr_eq(NULL, actual->tmppath);
+ ck_assert_str(expected->tmppath, actual->tmmpath);
HASH_ITER(hh, expected->children, echild, tmp) {
HASH_FIND(hh, actual->children, echild->name,
#define TEST_CACHE_UTIL_H_
#include <stdarg.h>
-#include "cachent.h"
void ck_assert_cachent_eq(struct cache_node *, struct cache_node *);
#include <check.h>
#include "alloc.c"
-#include "cachent.c"
-#include "cache_util.c"
#include "mock.c"
#include "types/path.c"
#include "types/url.c"
static char deleted[16][6];
static unsigned int dn;
-MOCK_ABORT_VOID(rrdp_state_cleanup, struct cachefile_notification *notif)
-
static void
__delete_node_cb(struct cache_node const *node)
{
-#include "json_util.c"
+#include "common.c"
#include <check.h>
+#include "alloc.c"
#include "mock.c"
START_TEST(test_tt)
{
- char str[JSON_TS_LEN + 1];
+ char str[FORT_TS_LEN + 1];
time_t tt;
- ck_assert_int_eq(0, str2tt("2024-03-14T17:51:16Z", &tt));
+ ck_assert_int_eq(0, str2time("2024-03-14T17:51:16Z", &tt));
memset(str, 'f', sizeof(str));
- ck_assert_int_eq(0, tt2str(tt, str));
+ ck_assert_int_eq(0, time2str(tt, str));
ck_assert_str_eq("2024-03-14T17:51:16Z", str);
- ck_assert_int_eq('f', str[JSON_TS_LEN]); /* Tests JSON_TS_LEN. */
+ ck_assert_int_eq('f', str[FORT_TS_LEN]); /* Tests FORT_TS_LEN. */
}
END_TEST
-static Suite *json_load_suite(void)
+static Suite *common_load_suite(void)
{
Suite *suite;
TCase *core;
core = tcase_create("utils");
tcase_add_test(core, test_tt);
- suite = suite_create("JSON util");
+ suite = suite_create("commons");
suite_add_tcase(suite, core);
return suite;
}
SRunner *runner;
int tests_failed;
- suite = json_load_suite();
+ suite = common_load_suite();
runner = srunner_create(suite);
srunner_run_all(runner, CK_NORMAL);
MOCK_VOID(fnstack_init, void)
MOCK_VOID(fnstack_push, char const *file)
-MOCK_VOID(fnstack_push_map, struct cache_mapping *map)
+MOCK_VOID(fnstack_push_map, struct cache_mapping const *map)
MOCK_VOID(fnstack_pop, void)
MOCK_VOID(fnstack_cleanup, void)
+
+void
+ck_assert_str(char const *expected, char const *actual)
+{
+ if (expected)
+ ck_assert_str_eq(expected, actual);
+ else
+ ck_assert_ptr_eq(NULL, actual);
+}
#include <check.h>
#include "alloc.c"
-#include "cachent.c"
#include "common.c"
#include "mock.c"
#include "object/manifest.c"
#include "types/path.c"
#include "types/url.c"
-MOCK_ABORT_VOID(rrdp_state_cleanup, struct cachefile_notification *notif)
MOCK_ABORT_INT(signed_object_decode, struct signed_object *sobj, char const *path)
MOCK_ABORT_VOID(signed_object_cleanup, struct signed_object *sobj)
-MOCK_VOID(__delete_node_cb, struct cache_node const *node)
#define BUFFER_LEN 128
static uint8_t buffer[BUFFER_LEN];
#include "types/map.c"
#include "types/path.c"
#include "types/str.c"
+#include "types/url.c"
/* Mocks */
MOCK_ABORT_PTR(db_table_create, db_table, void)
MOCK_VOID(db_table_destroy, struct db_table *table)
MOCK_ABORT_INT(db_table_join, struct db_table *dst, struct db_table *src)
-MOCK_ABORT_INT(deferstack_pop, struct cert_stack *stack,
- struct deferred_cert *result)
MOCK_ABORT_INT(handle_roa_v4, uint32_t as, struct ipv4_prefix const *prefix,
uint8_t max_length, void *arg)
MOCK_ABORT_INT(handle_roa_v6, uint32_t as, struct ipv6_prefix const *prefix,
#include "alloc.c"
#include "base64.c"
-#include "cachent.c"
#include "cachetmp.c"
#include "common.c"
#include "file.c"
#include "mock.c"
#include "relax_ng.c"
#include "rrdp.c"
+#include "types/map.c"
#include "types/path.c"
#include "types/url.c"
/* Mocks */
-MOCK_VOID(__delete_node_cb, struct cache_node const *node)
MOCK_ABORT_INT(http_download, char const *url, char const *path, curl_off_t ims,
bool *changed)
BN_free(bn);
}
-static struct cachefile_notification *
-create_cachefile_notif(char const *session, char const *serial, ...)
+static struct rrdp_state *
+create_rrdp_state(char const *session, char const *serial, ...)
{
- struct cachefile_notification *notif;
+ struct rrdp_state *state;
struct rrdp_hash *hash;
int dh_byte;
va_list args;
- notif = pmalloc(sizeof(struct cachefile_notification));
+ state = pmalloc(sizeof(struct rrdp_state));
- notif->session.session_id = pstrdup(session);
- notif->session.serial.str = pstrdup(serial);
- notif->session.serial.num = NULL; /* Not needed for now. */
- STAILQ_INIT(¬if->delta_hashes);
+ state->session.session_id = pstrdup(session);
+ state->session.serial.str = pstrdup(serial);
+ state->session.serial.num = NULL; /* Not needed for now. */
+ state->files = NULL;
+ STAILQ_INIT(&state->delta_hashes);
va_start(args, serial);
while ((dh_byte = va_arg(args, int)) != 0) {
hash = pmalloc(sizeof(struct rrdp_hash));
memset(hash->bytes, dh_byte, sizeof(hash->bytes));
- STAILQ_INSERT_TAIL(¬if->delta_hashes, hash, hook);
+ STAILQ_INSERT_TAIL(&state->delta_hashes, hash, hook);
}
va_end(args);
- return notif;
+ return state;
}
START_TEST(test_xmlChar_NULL_assumption)
}
static void
-init_cachefile_notif(struct cachefile_notification **result,
+init_rrdp_state(struct rrdp_state **result,
unsigned long serial, ...)
{
- struct cachefile_notification *notif;
+ struct rrdp_state *notif;
va_list args;
int hash_byte;
struct rrdp_hash *hash;
size_t i;
- notif = pmalloc(sizeof(struct cachefile_notification));
+ notif = pmalloc(sizeof(struct rrdp_state));
*result = notif;
init_rrdp_session(¬if->session, serial);
}
static void
-validate_cachefile_notif(struct cachefile_notification *notif,
- unsigned long __serial, ...)
+validate_rrdp_state(struct rrdp_state *state, unsigned long __serial, ...)
{
struct rrdp_serial serial;
va_list args;
struct rrdp_hash *hash;
size_t i;
- ck_assert_str_eq("session", notif->session.session_id);
+ ck_assert_str_eq("session", state->session.session_id);
init_serial(&serial, __serial);
- ck_assert_str_eq(serial.str, notif->session.serial.str);
- ck_assert_int_eq(0, BN_cmp(serial.num, notif->session.serial.num));
+ ck_assert_str_eq(serial.str, state->session.serial.str);
+ ck_assert_int_eq(0, BN_cmp(serial.num, state->session.serial.num));
serial_cleanup(&serial);
- hash = STAILQ_FIRST(¬if->delta_hashes);
+ hash = STAILQ_FIRST(&state->delta_hashes);
va_start(args, __serial);
while ((hash_byte = va_arg(args, int)) >= 0) {
ck_assert_ptr_eq(NULL, hash);
- rrdp_state_free(notif);
+ rrdp_state_free(state);
}
START_TEST(test_update_notif)
{
- struct cachefile_notification *old;
+ struct rrdp_state *old;
struct update_notification new;
/* No changes */
- init_cachefile_notif(&old, 5555, 1, 2, 3, -1);
+ init_rrdp_state(&old, 5555, 1, 2, 3, -1);
init_regular_notif(&new, 5555, 1, 2, 3, -1);
ck_assert_int_eq(0, update_notif(old, &new));
- validate_cachefile_notif(old, 5555, 1, 2, 3, -1);
+ validate_rrdp_state(old, 5555, 1, 2, 3, -1);
/* Add a few serials */
- init_cachefile_notif(&old, 5555, 1, 2, 3, -1);
+ init_rrdp_state(&old, 5555, 1, 2, 3, -1);
init_regular_notif(&new, 5557, 3, 4, 5, -1);
ck_assert_int_eq(0, update_notif(old, &new));
- validate_cachefile_notif(old, 5557, 1, 2, 3, 4, 5, -1);
+ validate_rrdp_state(old, 5557, 1, 2, 3, 4, 5, -1);
/* Add serials, delta threshold exceeded */
- init_cachefile_notif(&old, 5555, 1, 2, 3, -1);
+ init_rrdp_state(&old, 5555, 1, 2, 3, -1);
init_regular_notif(&new, 5558, 3, 4, 5, 6, -1);
ck_assert_int_eq(0, update_notif(old, &new));
- validate_cachefile_notif(old, 5558, 2, 3, 4, 5, 6, -1);
+ validate_rrdp_state(old, 5558, 2, 3, 4, 5, 6, -1);
/* All new serials, but no hashes skipped */
- init_cachefile_notif(&old, 5555, 1, 2, 3, -1);
+ init_rrdp_state(&old, 5555, 1, 2, 3, -1);
init_regular_notif(&new, 5557, 4, 5, -1);
ck_assert_int_eq(0, update_notif(old, &new));
- validate_cachefile_notif(old, 5557, 1, 2, 3, 4, 5, -1);
+ validate_rrdp_state(old, 5557, 1, 2, 3, 4, 5, -1);
/* 2 previous tests combined */
- init_cachefile_notif(&old, 5555, 1, 2, 3, 4, 5, -1);
+ init_rrdp_state(&old, 5555, 1, 2, 3, 4, 5, -1);
init_regular_notif(&new, 5560, 6, 7, 8, 9, 10, -1);
ck_assert_int_eq(0, update_notif(old, &new));
- validate_cachefile_notif(old, 5560, 6, 7, 8, 9, 10, -1);
+ validate_rrdp_state(old, 5560, 6, 7, 8, 9, 10, -1);
}
END_TEST
START_TEST(test_parse_notification_large_serial)
{
- struct cache_node map;
struct update_notification notif;
ck_assert_int_eq(0, relax_ng_init());
START_TEST(test_parse_snapshot_bad_publish)
{
struct rrdp_session session;
- struct cache_node rpp = { 0 };
+ struct rrdp_state rpp = { 0 };
ck_assert_int_eq(0, relax_ng_init());
session.session_id = "9df4b597-af9e-4dca-bdda-719cce2c4e28";
session.serial.str = "2";
session.serial.num = BN_two();
- rpp.url = "https://example.com/notification.xml";
- rpp.path = "cache/https/example.com/notification.xml";
- rpp.name = "notification.xml";
ck_assert_int_eq(-EINVAL, parse_snapshot(&session,
"resources/rrdp/snapshot-bad-publish.xml", &rpp));
START_TEST(test_2s_simple)
{
- struct cachefile_notification *notif;
+ struct rrdp_state *state;
json_t *json, *jdeltas;
char const *str;
- notif = create_cachefile_notif("session", "1234", 0);
+ state = create_rrdp_state("session", "1234", 0);
- json = rrdp_state2json(notif);
+ json = rrdp_state2json(state);
ck_assert_ptr_ne(NULL, json);
- rrdp_state_free(notif);
- notif = NULL;
+ rrdp_state_free(state);
+ state = NULL;
ck_assert_int_eq(0, json_get_str(json, TAGNAME_SESSION, &str));
ck_assert_str_eq("session", str);
ck_assert_str_eq("1234", str);
ck_assert_int_eq(ENOENT, json_get_array(json, TAGNAME_DELTAS, &jdeltas));
- ck_assert_int_eq(0, rrdp_json2state(json, ¬if));
- ck_rrdp_session("session", "1234", ¬if->session);
- ck_assert_uint_eq(true, STAILQ_EMPTY(¬if->delta_hashes));
+ ck_assert_int_eq(0, rrdp_json2state(json, &state));
+ ck_rrdp_session("session", "1234", &state->session);
+ ck_assert_uint_eq(true, STAILQ_EMPTY(&state->delta_hashes));
json_decref(json);
- rrdp_state_free(notif);
+ rrdp_state_free(state);
}
END_TEST
START_TEST(test_2s_more)
{
- struct cachefile_notification *notif;
+ struct rrdp_state *state;
struct rrdp_hash *hash;
json_t *json, *jdeltas;
char const *str;
- notif = create_cachefile_notif("session",
+ state = create_rrdp_state("session",
"123456789012345678901234567890123456789012",
0xAA, 0xBB, 0xCD, 0);
- json = rrdp_state2json(notif);
+ json = rrdp_state2json(state);
ck_assert_ptr_ne(NULL, json);
- rrdp_state_free(notif);
- notif = NULL;
+ rrdp_state_free(state);
+ state = NULL;
ck_assert_int_eq(0, json_get_str(json, TAGNAME_SESSION, &str));
ck_assert_str_eq("session", str);
ck_assert_str_eq("cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd",
json_string_value(json_array_get(jdeltas, 2)));
- ck_assert_int_eq(0, rrdp_json2state(json, ¬if));
- ck_rrdp_session("session", "123456789012345678901234567890123456789012", ¬if->session);
- hash = STAILQ_FIRST(¬if->delta_hashes);
+ ck_assert_int_eq(0, rrdp_json2state(json, &state));
+ ck_rrdp_session("session", "123456789012345678901234567890123456789012", &state->session);
+ hash = STAILQ_FIRST(&state->delta_hashes);
ck_assert_ptr_ne(NULL, hash);
ck_hash(hash, 0xAA);
hash = STAILQ_NEXT(hash, hook);
ck_assert_ptr_eq(NULL, hash);
json_decref(json);
- rrdp_state_free(notif);
+ rrdp_state_free(state);
}
END_TEST
{
json_t *json;
json_error_t error;
- struct cachefile_notification *notif;
+ struct rrdp_state *state;
json = json_loads(json_str, 0, &error);
ck_assert_ptr_ne(NULL, json);
- notif = NULL;
- ck_assert_int_eq(expected, rrdp_json2state(json, ¬if));
+ state = NULL;
+ ck_assert_int_eq(expected, rrdp_json2state(json, &state));
json_decref(json);
- if (notif == NULL)
- rrdp_state_free(notif);
+ if (state != NULL)
+ rrdp_state_free(state);
}
START_TEST(test_2s_errors)
{
- struct cachefile_notification notif = { 0 };
+ struct rrdp_state state = { 0 };
- ck_assert_ptr_eq(NULL, rrdp_state2json(NULL));
- ck_assert_ptr_eq(NULL, rrdp_state2json(¬if));
- notif.session.session_id = "sid";
- ck_assert_ptr_eq(NULL, rrdp_state2json(¬if));
+ ck_assert_ptr_eq(NULL, rrdp_state2json(&state));
+ state.session.session_id = "sid";
+ ck_assert_ptr_eq(NULL, rrdp_state2json(&state));
ck_json2state(ENOENT, "{}");
ck_json2state(0, "{ \"" TAGNAME_SESSION "\":\"sss\", \"" TAGNAME_SERIAL "\":\"123\" }");
#include "alloc.c"
#include "base64.c"
-#include "cachent.c"
#include "cachetmp.c"
-#include "cache_util.c"
#include "common.c"
#include "file.c"
#include "hash.c"
#include "relax_ng.c"
#include "rrdp.c"
#include "rrdp_util.h"
+#include "types/map.c"
#include "types/path.c"
#include "types/url.c"
-/* Mocks */
-
-MOCK_VOID(__delete_node_cb, struct cache_node const *node)
-
/* Utils */
static void
static void
cleanup_test(void)
{
-// ck_assert_int_eq(0, system("rm -rf tmp/"));
hash_teardown();
relax_ng_cleanup();
}
ck_assert_str_eq("Fort\n", buffer);
}
+/* XXX (test) Add delta hashes */
+static void
+ck_state(char const *session, char const *serial, unsigned long seq_id,
+ struct cache_mapping *maps, struct rrdp_state *actual)
+{
+ unsigned int m;
+ struct cache_file *node, *tmp;
+
+ ck_assert_str_eq(session, actual->session.session_id);
+ ck_assert_str_eq(serial, actual->session.serial.str);
+
+ for (m = 0; maps[m].url != NULL; m++)
+ ;
+ ck_assert_int_eq(m, HASH_COUNT(actual->files));
+
+ m = 0;
+ HASH_ITER(hh, actual->files, node, tmp) {
+ ck_assert_str_eq(maps[m].url, node->map.url);
+ ck_assert_str_eq(maps[m].path, node->map.path);
+ m++;
+ }
+}
+
/* Tests */
START_TEST(startup)
{
-#define NOTIF_PATH "tmp/https/host/notification.xml"
- struct cache_node notif;
+ struct cache_mapping notif;
+ struct cache_sequence seq;
+ struct rrdp_state *state = NULL;
+ struct cache_mapping maps[4];
+ bool changed;
setup_test();
- memset(¬if, 0, sizeof(notif));
notif.url = "https://host/notification.xml";
- notif.path = NOTIF_PATH;
- notif.name = "notification.xml";
+ notif.path = "tmp/https/0";
+
+ seq.prefix = "tmp/rrdp";
+ seq.next_id = 0;
+ seq.pathlen = strlen(seq.prefix);
dls[0] = NHDR("3")
- NSS("https://host/9d-8/3/snapshot.xml", "0c84fb949e7b5379ae091b86c41bb1a33cb91636b154b86ad1b1dedd44651a25")
+ NSS("https://host/9d-8/3/snapshot.xml",
+ "0c84fb949e7b5379ae091b86c41bb1a33cb91636b154b86ad1b1dedd44651a25")
NTAIL;
dls[1] = SHDR("3") PBLSH("rsync://a/b/c.cer", "Rm9ydAo=") STAIL;
dls[2] = NULL;
https_counter = 0;
- ck_assert_int_eq(0, rrdp_update(¬if));
+ ck_assert_int_eq(0, rrdp_update(¬if, 0, &changed, &seq, &state));
ck_assert_uint_eq(2, https_counter);
- ck_file("tmp/tmp/0/a/b/c.cer");
- ck_assert_cachent_eq(
- rftnode("rsync://", NOTIF_PATH, 0, "tmp/tmp/0",
- rftnode("rsync://a", NOTIF_PATH "/a", 0, "tmp/tmp/0/a",
- rftnode("rsync://a/b", NOTIF_PATH "/a/b", 0, "tmp/tmp/0/a/b",
- rftnode("rsync://a/b/c.cer", NOTIF_PATH "/a/b/c.cer", 0, "tmp/tmp/0/a/b/c.cer", NULL),
- NULL),
- NULL),
- NULL),
- notif.rrdp.subtree
- );
-
- dls[1] = NULL;
+ ck_assert_uint_eq(true, changed);
+ ck_file("tmp/rrdp/0/0"); /* "tmp/rrdp/<first-cage>/<c.cer>" */
+
+ maps[0].url = "rsync://a/b/c.cer";
+ maps[0].path = "tmp/rrdp/0/0";
+ maps[1].url = NULL;
+ ck_state(TEST_SESSION, "3", 1, maps, state);
+
+ /* Attempt to update, server hasn't changed anything. */
+ dls[1] = NULL; /* Snapshot should not redownload */
https_counter = 0;
- ck_assert_int_eq(0, rrdp_update(¬if));
+ ck_assert_int_eq(0, rrdp_update(¬if, 0, &changed, &seq, &state));
ck_assert_uint_eq(1, https_counter);
- ck_file("tmp/tmp/0/a/b/c.cer");
- ck_assert_cachent_eq(
- rftnode("rsync://", NOTIF_PATH, 0, "tmp/tmp/0",
- rftnode("rsync://a", NOTIF_PATH "/a", 0, "tmp/tmp/0/a",
- rftnode("rsync://a/b", NOTIF_PATH "/a/b", 0, "tmp/tmp/0/a/b",
- rftnode("rsync://a/b/c.cer", NOTIF_PATH "/a/b/c.cer", 0, "tmp/tmp/0/a/b/c.cer", NULL),
- NULL),
- NULL),
- NULL),
- notif.rrdp.subtree
- );
+ ck_assert_uint_eq(false, changed);
+ ck_file("tmp/rrdp/0/0");
+ ck_state(TEST_SESSION, "3", 1, maps, state);
+ rrdp_state_free(state);
cleanup_test();
+
+ // XXX Missing a looooooooooooooooooot of tests
}
END_TEST
#ifndef TEST_RRDP_UTIL_H_
#define TEST_RRDP_UTIL_H_
+#define TEST_SESSION "9df4b597-af9e-4dca-bdda-719cce2c4e28"
+
#define NHDR(serial) "<notification " \
"xmlns=\"http://www.ripe.net/rpki/rrdp\" " \
"version=\"1\" " \
- "session_id=\"9df4b597-af9e-4dca-bdda-719cce2c4e28\" " \
+ "session_id=\"" TEST_SESSION "\" " \
"serial=\"" serial "\">\n"
#define NSS(u, h) "\t<snapshot uri=\"" u "\" hash=\"" h "\"/>\n"
#define NTAIL "</notification>"
#define SHDR(serial) "<snapshot " \
"xmlns=\"http://www.ripe.net/rpki/rrdp\" " \
"version=\"1\" " \
- "session_id=\"9df4b597-af9e-4dca-bdda-719cce2c4e28\" " \
+ "session_id=\"" TEST_SESSION "\" " \
"serial=\"" serial "\">\n"
#define STAIL "</snapshot>"
ensure_file_deleted("tmp/1kb-copy");
// XXX this is creating directories because of rsync_download's mkdir_p.
// Is this a symptom of a problem?
- ck_assert_int_eq(0, rsync_download("tmp/1kb", "tmp/1kb-copy", false));
+ ck_assert_int_eq(0, rsync_download("tmp/1kb", "tmp/1kb-copy"));
}
END_TEST
printf("3kb\n");
create_file("tmp/3kb", 3);
ensure_file_deleted("tmp/3kb-copy");
- ck_assert_int_eq(0, rsync_download("tmp/3kb", "tmp/3kb-copy", false));
+ ck_assert_int_eq(0, rsync_download("tmp/3kb", "tmp/3kb-copy"));
}
END_TEST
create_file("tmp/5kb", 5);
ensure_file_deleted("tmp/5kb-copy");
/* Max speed is 1kbps, timeout is 4 seconds */
- ck_assert_int_eq(EIO, rsync_download("tmp/5kb", "tmp/5kb-copy", false));
+ ck_assert_int_eq(EIO, rsync_download("tmp/5kb", "tmp/5kb-copy"));
}
END_TEST
char *actual;
stream = create_stream(input, length);
-
actual = read_string(stream, length);
- if (expected == NULL)
- ck_assert_ptr_eq(NULL, actual);
- else
- ck_assert_str_eq(expected, actual);
+ ck_assert_str(expected, actual);
free(actual);
free(stream);
#include <stdlib.h>
#include "alloc.c"
+#include "common.c"
#include "mock.c"
#include "types/path.c"
#include "types/url.c"