# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.69])
- AC_INIT([fort],[1.6.2],[validadorfort@fortproject.net])
-AC_INIT([fort],[1.6.5],[validadorfort@fortproject.net])
++AC_INIT([fort],[1.255.0],[validadorfort@fortproject.net])
AC_CONFIG_SRCDIR([src/main.c])
AM_INIT_AUTOMAKE([subdir-objects])
"rsync": {
"<a href="#--rsyncenabled">enabled</a>": true,
"<a href="#--rsyncpriority">priority</a>": 50,
- "<a href="#--rsynctransfer-timeout">transfer-timeout</a>": 0,
- "retry": {
- "<a href="#--rsyncretrycount">count</a>": 1,
- "<a href="#--rsyncretryinterval">interval</a>": 4
- },
+ "<a href="#--rsynctransfer-timeout">transfer-timeout</a>": 900,
"<a href="#rsyncprogram">program</a>": "rsync",
"<a href="#rsyncarguments-recursive">arguments-recursive</a>": [
"-rtz",
"http": {
"<a href="#--httpenabled">enabled</a>": true,
"<a href="#--httppriority">priority</a>": 60,
- "<a href="#--httpuser-agent">user-agent</a>": "fort/1.6.2",
- "retry": {
- "<a href="#--httpretrycount">count</a>": 1,
- "<a href="#--httpretryinterval">interval</a>": 4
- },
+ "<a href="#--httpuser-agent">user-agent</a>": "fort/{{ site.fort-latest-version }}",
"<a href="#--httpmax-redirs">max-redirs</a>": 10,
"<a href="#--httpconnect-timeout">connect-timeout</a>": 30,
- "<a href="#--httptransfer-timeout">transfer-timeout</a>": 0,
+ "<a href="#--httptransfer-timeout">transfer-timeout</a>": 900,
"<a href="#--httplow-speed-limit">low-speed-limit</a>": 100000,
"<a href="#--httplow-speed-time">low-speed-time</a>": 10,
"<a href="#--httpmax-file-size">max-file-size</a>": 1000000000,
"log": {
"<a href="#--logenabled">enabled</a>": true,
"<a href="#--logoutput">output</a>": "console",
- "<a href="#--loglevel">level</a>": "warning",
+ "<a href="#--loglevel">level</a>": "info",
- "<a href="#--logtag">tag</a>": "Operation",
+ "<a href="#--logtag">tag</a>": "Op",
"<a href="#--logfacility">facility</a>": "daemon",
"<a href="#--logfile-name-format">file-name-format</a>": "global-url",
"<a href="#--logcolor-output">color-output</a>": false
.RE
.P
--.B \-\-http.retry.count=\fIUNSIGNED_INTEGER\fR
--.RS 4
--Maximum number of retries whenever there's an error requesting an HTTP URI.
--.P
--A value of \fI0\fR means no retries.
--.P
--Whenever is necessary to request an HTTP URI, the validator will try the
--request at least once. If there was an error requesting the URI, the validator
--will retry at most \fI--http.retry.count\fR times to fetch the file, waiting
--\fI--http.retry.interval\fR seconds between each retry.
--.P
- By default, the value is \fI2\fR.
-By default, the value is \fI4\fR.
--.RE
--.P
--
--.B \-\-http.retry.interval=\fIUNSIGNED_INTEGER\fR
--.RS 4
--Period (in seconds) to wait between retries after an error ocurred requestin
--HTTP URIs.
--.P
--By default, the value is \fI5\fR.
--.RE
--.P
--
.BR \-\-http.user\-agent=\fISTRING\fR
.RS 4
User-Agent to use at HTTP requests.
.RE
.P
- .B \-\-rsync.strategy=(\fIstrict\fR|\fIroot\fR|\fIroot-except-ta\fR)
- .RS 4
- \fIrsync\fR download strategy; states the way rsync URLs are approached during
- downloads. It can have one of three values:
- .IR strict ", "
- .IR root ", "
- .IB "root-except-ta" "(default value)" \fR. \fR
- .P
- .I strict
- .RS 4
- In order to enable this strategy, FORT must be compiled using the flag:
- ENABLE\_STRICT\_STRATEGY. e.g.
- \fB $ make FORT_FLAGS='-DENABLE_STRICT_STRATEGY'\fR
- .P
- RSYNC every repository publication point separately. Only skip publication
- points that have already been downloaded during the current validation cycle.
- (Assuming each synchronization is recursive.)
- .P
- For example, suppose the validator gets certificates whose caRepository access
- methods (in their Subject Information Access extensions) point to the following
- publication points:
- .P
- 1. rsync://rpki.example.com/foo/bar/
- .br
- 2. rsync://rpki.example.com/foo/qux/
- .br
- 3. rsync://rpki.example.com/foo/bar/
- .br
- 4. rsync://rpki.example.com/foo/corge/grault/
- .br
- 5. rsync://rpki.example.com/foo/corge/
- .br
- 6. rsync://rpki.example.com/foo/corge/waldo/
- .P
- A validator following the `strict` strategy would download `bar`, download
- `qux`, skip `bar`, download `corge/grault`, download `corge` and skip
- `corge/waldo`.
- .P
- This is the slowest, but also the strictly correct sync strategy.
- .RE
- .P
- .I root
- .RS 4
- For each publication point found, guess the root of its repository and RSYNC
- that instead. Then skip any subsequent children of said root.
- .P
- (To guess the root of a repository, the validator counts four slashes, and
- prunes the rest of the URL.)
- .P
- Reusing the caRepository URLs from the `strict` strategy (above) as example, a
- validator following the `root` strategy would download
- `rsync://rpki.example.com/foo`, and then skip everything else.
- .P
- Assuming that the repository is specifically structured to be found within as
- few roots as possible, and they contain minimal RPKI-unrelated noise files, this
- is the fastest synchronization strategy. At time of writing, this is true for
- all the current official repositories.
- .RE
- .P
- .I root-except-ta
- .RS 4
- Synchronizes the root certificate (the one pointed by the TAL) in 'strict' mode,
- and once it's validated, synchronizes the rest of the repository in 'root' mode.
- .P
- Useful if you want 'root', but the root certificate is separated from the rest
- of the repository. Also useful if you don't want the validator to download the
- entire repository without first confirming the integrity and legitimacy of the
- root certificate.
- .RE
- .RE
- .P
-
--.B \-\-rsync.retry.count=\fIUNSIGNED_INTEGER\fR
--.RS 4
--Maximum number of retries whenever there's an error executing RSYNC.
--.P
--A value of \fI0\fR means no retries.
--.P
--Whenever is necessary to execute an RSYNC, the validator will try the execution
--at least once. If there was an error executing the RSYNC, the validator will
--retry it at most \fI--rsync.retry.count\fR times, waiting
--\fI--rsync.retry.interval\fR seconds between each retry.
--.P
- By default, the value is \fI2\fR.
-By default, the value is \fI4\fR.
--.RE
--.P
--
--.B \-\-rsync.retry.interval=\fIUNSIGNED_INTEGER\fR
--.RS 4
--Period (in seconds) to wait between retries after an RSYNC error ocurred.
--.P
--By default, the value is \fI5\fR.
--.RE
--.P
--
.B \-\-rsync.transfer\-timeout=\fIUNSIGNED_INTEGER\fR
.RS 4
Maximum time in seconds that the rsync process can last.
"count": 1,
"interval": 4
},
- "user-agent": "fort/1.6.4",
+ "user-agent": "fort/1.6.2",
"max-redirs": 10,
"connect-timeout": 30,
- "transfer-timeout": 0,
+ "transfer-timeout": 900,
"low-speed-limit": 100000,
"low-speed-time": 10,
"max-file-size": 1000000000,
"log": {
"enabled": true,
"output": "console",
- "level": "warning",
+ "level": "info",
- "tag": "Operation",
+ "tag": "Op",
"facility": "daemon",
"file-name-format": "global-url",
"color-output": false
enum file_type ft;
char *payload;
- * Meant for test repositories we don't want to have to keep
- * regenerating.
+
+ struct {
+ /*
+ * If nonzero, all RPKI object expiration dates are compared to
+ * this number instead of the current time.
++ * Meant for testing of repositories we don't want to have to
++ * keep regenerating.
+ */
+ time_t validation_time;
+ } debug;
};
static void print_usage(FILE *, bool);
{
.id = 13000,
- .id = 13000,
+ .name = "debug.validation-time",
+ .type = >_time,
+ .offset = offsetof(struct rpki_config, debug.validation_time),
+ },
+
+ {
++ .id = 13001,
.name = "file-type",
.type = >_file_type,
.offset = offsetof(struct rpki_config, ft),
}
}
-/*
- * Contract:
- * - Length = 4 (includes dot)
- * - Not NULL-terminated!
- * - Can return NULL
- */
-static char *
-get_extension(IA5String_t *file)
+static bool
+is_valid_mft_file_chara(uint8_t chara)
+{
+ return ('a' <= chara && chara <= 'z')
+ || ('A' <= chara && chara <= 'Z')
+ || ('0' <= chara && chara <= '9')
+ || (chara == '-')
+ || (chara == '_');
+}
+
+/* RFC 9286, section 4.2.2 */
+static int
+validate_mft_filename(IA5String_t *ia5)
+{
+ size_t dot;
+ size_t i;
+
+ if (ia5->size < 5)
+ return pr_val_err("File name is too short (%zu < 5).", ia5->size);
+ dot = ia5->size - 4;
+ if (ia5->buf[dot] != '.')
+ return pr_val_err("File name is missing three-letter extension.");
+
+ for (i = 0; i < ia5->size; i++)
+ if (i != dot && !is_valid_mft_file_chara(ia5->buf[i]))
+ return pr_val_err("File name contains illegal character #%u",
+ ia5->buf[i]);
+
- /*
- * Well... the RFC says the extension must match a IANA listing,
- * but rejecting unknown extensions is a liability since they keep
- * adding new ones, and people rarely updates.
- * If we don't have a handler, we'll naturally ignore the file.
- */
+ return 0;
+}
+
+static int
+check_file_and_hash(struct FileAndHash *fah, char const *path)
{
- return (file->size < 4) ? NULL : (((char *)file->buf) + file->size - 4);
+ if (fah->hash.bits_unused != 0)
+ return pr_val_err("Hash string has unused bits.");
+
+ /* Includes file exists validation, obv. */
+ return hash_validate_file(hash_get_sha256(), path,
+ fah->hash.buf, fah->hash.size);
}
+/*
+ * XXX
+ *
+ * revoked manifest: 6.6
+ * CRL not in fileList: 6.6
+ * fileList file in different folder: 6.6
+ * manifest is identified by id-ad-rpkiManifest. (A directory will have more
+ * than 1 on rollover.)
+ * id-ad-rpkiManifest not found: 6.6
+ * invalid manifest: 6.6
+ * stale manifest: 6.6
+ * fileList file not found: 6.6
+ * bad hash: 6.6
+ * 6.6: warning, fallback to previous version. Children inherit this.
+ */
+
static int
-build_rpp(struct Manifest *mft, struct rpki_uri *notif,
- struct rpki_uri *mft_uri, struct rpp **pp)
+collect_files(char const *mft_url, char const *mft_path,
+ struct Manifest *mft, struct cache_cage *cage,
+ struct rpki_certificate *parent)
{
- char const *tal;
- unsigned int i;
- struct FileAndHash *fah;
- char *ext;
- int (*rpp_add)(struct rpp *pp, struct rpki_uri *uri);
- struct rpki_uri *uri;
+ struct rpp *rpp;
+ char *rpp_url;
- unsigned int i;
++ unsigned int m;
+ struct FileAndHash *src;
+ struct cache_mapping *dst;
++ char const *ext;
+ char const *path;
int error;
if (mft->fileList.list.count == 0)
return pr_val_err("Manifest's file list is empty.");
- shuffle_file_list(mft);
+ rpp = &parent->rpp;
+ rpp_url = url_parent(mft_url); // XXX
- rpp->nfiles = mft->fileList.list.count + 1; /* plus manifest */
- rpp->files = pzalloc(rpp->nfiles * sizeof(*rpp->files));
++ rpp->files = pzalloc((mft->fileList.list.count + 1) * sizeof(*rpp->files));
++ rpp->nfiles = 0;
- for (i = 0; i < mft->fileList.list.count; i++) {
- src = mft->fileList.list.array[i];
- dst = &rpp->files[i];
- *pp = rpp_create();
- tal = tal_get_file_name(validation_tal(state_retrieve()));
++ for (m = 0; m < mft->fileList.list.count; m++) {
++ src = mft->fileList.list.array[m];
- for (i = 0; i < mft->fileList.list.count; i++) {
- fah = mft->fileList.list.array[i];
+ /*
+ * IA5String is a subset of ASCII. However, IA5String_t doesn't
+ * seem to be guaranteed to be NULL-terminated.
+ */
+
+ error = validate_mft_filename(&src->file);
+ if (error)
+ goto revert;
- * rsync filters unknown files. We don't want absent unknown
- * files to induce RPP rejection, so we'll skip them.
+ /*
- ext = get_extension(&fah->file);
- if (ext == NULL)
- continue;
- else if (strncmp(ext, ".cer", 4) == 0)
- rpp_add = rpp_add_cer;
- else if (strncmp(ext, ".roa", 4) == 0)
- rpp_add = rpp_add_roa;
- else if (strncmp(ext, ".crl", 4) == 0)
- rpp_add = rpp_add_crl;
- else if (strncmp(ext, ".gbr", 4) == 0)
- rpp_add = rpp_add_gbr;
- else
++ * rsync and RRDP filter unknown files. We don't want absent
++ * unknown files to induce RPP rejection, so we'll skip them.
+ * This contradicts rfc9286#6.4, but it's necessary evil because
+ * we can't trust the repositories to not accidentally serve
+ * garbage.
+ *
+ * This includes .mft; They're presently not supposed to be
+ * listed.
+ */
- error = uri_create_mft(&uri, tal, notif, mft_uri, &fah->file);
- /*
- * Not handling ENOTRSYNC is fine because the manifest URL
- * should have been RSYNC. Something went wrong if an RSYNC URL
- * plus a relative path is not RSYNC.
- */
- if (error)
- goto fail;
-
- /*
- * Expect:
- * - Negative value: an error not to be ignored, the whole
- * manifest will be discarded.
- * - Zero value: hash at manifest matches file's hash, or it
- * doesn't match its hash but there's an incidence to ignore
- * such error.
- * - Positive value: file doesn't exist and keep validating
- * manifest.
- */
- error = hash_validate_mft_file(uri, &fah->hash);
- if (error < 0) {
- uri_refput(uri);
- goto fail;
- }
- if (error > 0) {
- uri_refput(uri);
- continue;
++ ext = ((char const *)src->file.buf) + src->file.size - 3;
++ if ((strncmp(ext, "cer", 3) != 0) &&
++ (strncmp(ext, "roa", 3) != 0) &&
++ (strncmp(ext, "crl", 3) != 0) &&
++ (strncmp(ext, "gbr", 3) != 0))
+ continue;
+
++ dst = &rpp->files[rpp->nfiles++];
+ dst->url = path_njoin(rpp_url,
+ (char const *)src->file.buf,
+ src->file.size);
+
+ path = cage_map_file(cage, dst->url);
+ if (!path) {
+ error = pr_val_err(
+ "Manifest file '%s' is absent from the cache.",
+ dst->url);
+ goto revert;
}
+ dst->path = pstrdup(path);
- error = rpp_add(*pp, uri);
- if (error) {
- uri_refput(uri);
- goto fail;
- } /* Otherwise ownership was transferred to @pp. */
+ error = check_file_and_hash(src, dst->path);
+ if (error)
+ goto revert;
}
- /* rfc6486#section-7 */
- if (rpp_get_crl(*pp) == NULL) {
- error = pr_val_err("Manifest lacks a CRL.");
- goto fail;
- }
+ /* Manifest */
- dst = &rpp->files[mft->fileList.list.count];
++ dst = &rpp->files[rpp->nfiles++];
+ dst->url = pstrdup(mft_url);
+ dst->path = pstrdup(mft_path);
return 0;
return 0;
}
- /* Does not clean @tag on failure. */
+ static bool
-is_known_extension(struct rpki_uri *uri)
++is_known_extension(char const *uri)
+ {
++ size_t len;
+ char const *ext;
+
- if (uri_get_global_len(uri) < 4)
++ len = strlen(uri);
++ if (len < 4)
+ return false;
+
- ext = uri_get_global(uri) + uri_get_global_len(uri) - 4;
++ ext = uri + len - 4;
+ return ((strcmp(ext, ".cer") == 0)
+ || (strcmp(ext, ".roa") == 0)
+ || (strcmp(ext, ".mft") == 0)
+ || (strcmp(ext, ".crl") == 0)
+ || (strcmp(ext, ".gbr") == 0));
+ }
+
static int
- parse_publish(xmlTextReaderPtr reader, struct publish *tag)
-parse_publish(xmlTextReaderPtr reader, struct rpki_uri *notif,
- hash_requirement hr, struct publish *tag)
++handle_publish(xmlTextReaderPtr reader, struct parser_args *args)
{
++ struct publish tag = { 0 };
xmlChar *base64_str;
++ struct cache_file *file;
++ char *path;
int error;
- error = parse_file_metadata(reader, &tag->meta);
- error = parse_file_metadata(reader, notif, hr, &tag->meta);
++ /* Parse tag itself */
++ error = parse_file_metadata(reader, &tag.meta);
if (error)
return error;
--
-- /* Read the text */
- if (xmlTextReaderRead(reader) != 1)
- return pr_val_err(
+ if (xmlTextReaderRead(reader) != 1) {
- return pr_val_err(
++ error = pr_val_err(
"Couldn't read publish content of element '%s'",
- tag->meta.uri
- uri_get_global(tag->meta.uri)
++ tag.meta.uri
);
++ goto end;
+ }
++ if (!is_known_extension(tag.meta.uri))
++ goto end; /* Mirror rsync filters */
- if (!is_known_extension(tag->meta.uri))
- return 0; /* Mirror rsync filters */
-
++ /* Parse tag content */
base64_str = parse_string(reader, NULL);
-- if (base64_str == NULL)
-- return -EINVAL;
- if (!base64_decode((char *)base64_str, 0, &tag->content, &tag->content_len))
- error = base64_read((char const *)base64_str, &tag->content,
- &tag->content_len);
++ if (base64_str == NULL) {
++ error = -EINVAL;
++ goto end;
++ }
++ if (!base64_decode((char *)base64_str, 0, &tag.content, &tag.content_len)) {
++ xmlFree(base64_str);
+ error = pr_val_err("Cannot decode publish tag's base64.");
++ goto end;
++ }
xmlFree(base64_str);
- if (error)
- return error;
- return error;
- }
-
- /* Does not clean @tag on failure. */
- static int
- parse_withdraw(xmlTextReaderPtr reader, struct withdraw *tag)
- {
- int error;
-
- error = parse_file_metadata(reader, &tag->meta);
- if (error)
- return error;
-
- if (!tag->meta.hash)
- return pr_val_err("Withdraw '%s' is missing a hash.",
- tag->meta.uri);
-
- return 0;
- }
-
- static int
- handle_publish(xmlTextReaderPtr reader, struct parser_args *args)
- {
- struct publish tag = { 0 };
- struct cache_file *file;
- char *path;
- int error;
-
- error = parse_publish(reader, &tag);
- if (error)
- goto end;
- /* rfc8181#section-2.2 but considering optional hash */
- if (tag->meta.hash_len > 0)
- error = validate_hash(&tag->meta);
++ /* Parsing done */
+
+ pr_clutter("Publish %s", logv_filename(tag.meta.uri));
+
+ file = state_find_file(args->state, tag.meta.uri, strlen(tag.meta.uri));
+ /* rfc8181#section-2.2 */
+ if (file) {
+ if (tag.meta.hash == NULL) {
+ // XXX watch out for this in the log before release
+ error = pr_val_err("RRDP desync: "
+ "<publish> is attempting to create '%s', "
+ "but the file is already cached.",
+ tag.meta.uri);
+ goto end;
+ }
+
+ error = validate_hash(&tag.meta, file->map.path);
+ if (error)
+ goto end;
+
+ /*
+ * Reminder: This is needed because the file might be
+ * hard-linked. Our repo file write should not propagate
+ * to the fallback.
+ */
+ if (remove(file->map.path) < 0) {
+ error = errno;
+ pr_val_err("Cannot delete %s: %s",
+ file->map.path, strerror(error));
+ if (error != ENOENT)
+ goto end;
+ }
+
+ } else {
+ if (tag.meta.hash != NULL) {
+ // XXX watch out for this in the log before release
+ error = pr_val_err("RRDP desync: "
+ "<publish> is attempting to overwrite '%s', "
+ "but the file is absent in the cache.",
+ tag.meta.uri);
+ goto end;
+ }
+
+ path = cseq_next(&args->state->seq);
+ if (!path) {
+ error = -EINVAL;
+ goto end;
+ }
+ file = cache_file_add(args->state, pstrdup(tag.meta.uri), path);
+ }
+
+ error = file_write_bin(file->map.path, tag.content, tag.content_len);
+
+end: metadata_cleanup(&tag.meta);
+ free(tag.content);
return error;
}
static int
-write_file(struct rpki_uri *uri, unsigned char *content, size_t content_len)
+handle_withdraw(xmlTextReaderPtr reader, struct parser_args *args)
{
- FILE *out;
- size_t written;
+ struct withdraw tag = { 0 };
+ struct cache_file *file;
+ size_t len;
int error;
- error = parse_withdraw(reader, &tag);
- if (content_len == 0)
- return 0;
-
- error = mkdir_p(uri_get_local(uri), false);
++ error = parse_file_metadata(reader, &tag.meta);
if (error)
- return error;
+ goto end;
++ if (!is_known_extension(tag.meta.uri))
++ goto end; /* Mirror rsync filters */
++ if (!tag.meta.hash) {
++ error = pr_val_err("Withdraw '%s' is missing a hash.",
++ tag.meta.uri);
++ goto end;
++ }
- error = file_write(uri_get_local(uri), "wb", &out);
- if (error)
- return error;
+ pr_clutter("Withdraw %s", logv_filename(tag.meta.uri));
- written = fwrite(content, sizeof(unsigned char), content_len, out);
- file_close(out);
+ len = strlen(tag.meta.uri);
+ file = state_find_file(args->state, tag.meta.uri, len);
- if (written != content_len) {
- return pr_val_err(
- "Couldn't write file '%s' (error code not available)",
- uri_get_local(uri)
- );
+ if (!file) {
+ error = pr_val_err("Broken RRDP: "
+ "<withdraw> is attempting to delete unknown file '%s'.",
+ tag.meta.uri);
+ goto end;
}
- return 0;
-}
-
-/* Remove a local file and its directory tree (if empty) */
-static int
-delete_file(struct rpki_uri *uri)
-{
- /* Delete parent dirs only if empty. */
- return delete_dir_recursive_bottom_up(uri_get_local(uri));
-}
+ error = validate_hash(&tag.meta, file->map.path);
+ if (error)
+ goto end;
-static int
-handle_publish(xmlTextReaderPtr reader, struct rpki_uri *notif,
- hash_requirement hr)
-{
- struct publish tag = { 0 };
- int error;
+ if (remove(file->map.path) < 0) {
+ pr_val_warn("Cannot delete %s: %s", file->map.path,
+ strerror(errno));
+ /* It's fine; keep going. */
+ }
- error = parse_publish(reader, notif, hr, &tag);
- if (!error)
- error = write_file(tag.meta.uri, tag.content, tag.content_len);
+ HASH_DEL(args->state->files, file);
+ map_cleanup(&file->map);
+ free(file);
- metadata_cleanup(&tag.meta);
- free(tag.content);
+end: metadata_cleanup(&tag.meta);
return error;
}
MOCK_NULL(config_get_slurm, char const *, void)
MOCK(config_get_tal, char const *, "tal/", void)
- MOCK(config_get_local_repository, char const *, "tmp", void)
+MOCK(cfg_cache_threshold, time_t, 2, void)
MOCK(config_get_mode, enum mode, STANDALONE, void)
+MOCK_UINT(config_get_rrdp_delta_threshold, 5, void)
MOCK_TRUE(config_get_rsync_enabled, void)
MOCK_UINT(config_get_rsync_priority, 50, void)
MOCK_TRUE(config_get_http_enabled, void)
#include "alloc.c"
#include "common.c"
#include "mock.c"
-#include "thread/thread_pool.c"
+#include "thread_pool.c"
+ __MOCK_ABORT(config_get_local_repository, char const *, "tmp/thread", void)
+
static void
thread_work(void *arg)
{