int kr_cache_insert_rr(struct kr_cache *, const knot_rrset_t *, const knot_rrset_t *, uint8_t, uint32_t);
int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
-int kr_cache_sync(struct kr_cache *);
+int kr_cache_commit(struct kr_cache *);
typedef struct {
uint8_t bitmap[32];
uint8_t length;
kr_cache_insert_rr
kr_cache_remove
kr_cache_remove_subtree
- kr_cache_sync
+ kr_cache_commit
EOF
end,
sync = function (self)
assert(ffi.istype(kr_cache_t, self))
- local ret = C.kr_cache_sync(self)
+ local ret = C.kr_cache_commit(self)
if ret ~= 0 then return nil, knot_error_t(ret) end
return true
end,
ret = cache_op(cache, write, &key, &val, 1);
}
}
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
return ret;
}
kr_cache_emergency_file_to_remove = NULL;
}
-int kr_cache_sync(struct kr_cache *cache)
+int kr_cache_commit(struct kr_cache *cache)
{
if (!cache_isvalid(cache)) {
return kr_error(EINVAL);
}
- if (cache->api->sync) {
- return cache_op(cache, sync);
+ if (cache->api->commit) {
+ return cache_op(cache, commit);
}
return kr_ok();
}
}
int ret = peek_nosync(ctx, pkt);
- kr_cache_sync(&req->ctx->cache);
+ kr_cache_commit(&req->ctx->cache);
return ret;
}
if (unauth_cnt) {
VERBOSE_MSG(qry, "=> stashed also %d nonauth RRsets\n", unauth_cnt);
};
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
return ctx->state; /* we ignore cache-stashing errors */
}
#if 0 /* Occasionally useful when debugging some kinds of changes. */
{
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
knot_db_val_t val = { NULL, 0 };
ret = cache_op(cache, read, &key, &val, 1);
if (ret != kr_error(ENOENT)) { // ENOENT might happen in some edge case, I guess
}
ret = cache->api->remove(cache->db, keys, count);
cleanup:
- kr_cache_sync(cache); /* Sync even after just kr_cache_match(). */
+ kr_cache_commit(cache); /* Sync even after just kr_cache_match(). */
/* Free keys */
while (--i >= 0) {
free(keys[i].data);
/** Run after a row of operations to release transaction/lock if needed. */
KR_EXPORT
-int kr_cache_sync(struct kr_cache *cache);
+int kr_cache_commit(struct kr_cache *cache);
/**
* Return true if cache is open and enabled.
int (*clear)(knot_db_t *db);
/** Run after a row of operations to release transaction/lock if needed. */
- int (*sync)(knot_db_t *db);
+ int (*commit)(knot_db_t *db);
/* Data access */
assert(env && txn);
if (env->txn.rw) {
/* Reuse the *open* RW txn even if only reading is requested.
- * We leave the management of this to the cdb_sync command.
+ * We leave the management of this to the cdb_commit command.
* The user may e.g. want to do some reads between the writes. */
*txn = env->txn.rw;
return kr_ok();
return kr_ok();
}
-static int cdb_sync(knot_db_t *db)
+static int cdb_commit(knot_db_t *db)
{
struct lmdb_env *env = db;
int ret = kr_ok();
}
/* Only in a read-only txn; TODO: it's a bit messy/coupled */
if (env->txn.rw) {
- int ret = cdb_sync(env);
+ int ret = cdb_commit(env);
if (ret) return ret;
}
MDB_txn *txn = NULL;
assert(env && env->env);
/* Get rid of any transactions. */
- cdb_sync(env);
+ cdb_commit(env);
free_txn_ro(env);
mdb_env_sync(env->env, 1);
if (ret == kr_ok()) {
ret = lmdb_error(mdb_drop(txn, env->dbi, 0));
if (ret == kr_ok()) {
- ret = cdb_sync(db);
+ ret = cdb_commit(db);
}
if (ret == kr_ok()) {
return ret;
}
/* We are about to switch to a different file, so end all txns, to be sure. */
- (void) cdb_sync(db);
+ (void) cdb_commit(db);
free_txn_ro(db);
/* Since there is no guarantee that there will be free
/* Try to recover from doing too much writing in a single transaction. */
if (ret == MDB_TXN_FULL) {
- ret = cdb_sync(env);
+ ret = cdb_commit(env);
if (ret) {
ret = txn_get(env, txn, false);
}
{
static const struct kr_cdb_api api = {
"lmdb",
- cdb_init, cdb_deinit, cdb_count, cdb_clear, cdb_sync,
+ cdb_init, cdb_deinit, cdb_count, cdb_clear, cdb_commit,
cdb_readv, cdb_writev, cdb_remove,
cdb_match,
cdb_read_leq
assert(target[0]);
target = knot_wire_next_label(target, NULL);
}
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
#endif
}
}
}
- kr_cache_sync(&ctx->cache);
+ kr_cache_commit(&ctx->cache);
mm_free(cut->pool, qname);
return ret;
}
ret = kr_cache_peek(cache, KR_CACHE_USER, dname, KNOT_RRTYPE_TSIG, &entry, 0);
cache->api = api_saved;
assert_int_not_equal(ret, 0);
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
static void test_fake_insert(void **state)
KNOT_RRTYPE_TSIG, &global_fake_ce, global_namedb_data);
assert_int_equal(ret_cache_ins_ok, 0);
assert_int_equal(ret_cache_ins_inval, KNOT_EINVAL);
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test invalid parameters and some api failures. */
assert_int_not_equal(kr_cache_remove(cache, KR_CACHE_RR, NULL, 0), 0);
assert_int_not_equal(kr_cache_remove(NULL, 0, NULL, 0), 0);
assert_int_not_equal(kr_cache_clear(NULL), 0);
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test cache write */
struct kr_cache *cache = (*state);
int ret = kr_cache_insert_rr(cache, &global_rr, 0, 0, CACHE_TIME);
assert_int_equal(ret, 0);
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
static void test_materialize(void **state)
assert_int_equal(query_ret, 0);
assert_true(rr_equal);
}
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test cache read (simulate aged entry) */
struct kr_cache *cache = (*state);
int ret = kr_cache_peek_rr(cache, &cache_rr, &rank, &flags, ×tamp);
assert_int_equal(ret, kr_error(ESTALE));
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test cache removal */
assert_int_equal(ret, 0);
ret = kr_cache_peek_rr(cache, &cache_rr, &rank, &flags, ×tamp);
assert_int_equal(ret, KNOT_ENOENT);
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test cache fill */
if (ret != 0) {
break;
}
- ret = kr_cache_sync(cache);
+ ret = kr_cache_commit(cache);
if (ret != 0) {
break;
}
/* Expect we run out of space */
assert_int_equal(ret, kr_error(ENOSPC));
- kr_cache_sync(cache);
+ kr_cache_commit(cache);
}
/* Test cache clear */