{
daemon->cfg = cfg;
config_apply(cfg);
- if(!daemon->env->msg_cache ||
- cfg->msg_cache_slabs != daemon->env->msg_cache->size ||
- (cfg->msg_cache_size/cfg->msg_cache_slabs)*cfg->msg_cache_slabs != slabhash_get_size(daemon->env->msg_cache)) {
+ if(!slabhash_is_size(daemon->env->msg_cache, cfg->msg_cache_size,
+ cfg->msg_cache_slabs)) {
slabhash_delete(daemon->env->msg_cache);
daemon->env->msg_cache = slabhash_create(cfg->msg_cache_slabs,
HASH_DEFAULT_STARTARRAY, cfg->msg_cache_size,
return UB_INITFAIL;
if(!auth_zones_apply_cfg(ctx->env->auth_zones, cfg, 1))
return UB_INITFAIL;
- if(!ctx->env->msg_cache ||
- cfg->msg_cache_slabs != ctx->env->msg_cache->size ||
- (cfg->msg_cache_size/cfg->msg_cache_slabs)*cfg->msg_cache_slabs != slabhash_get_size(ctx->env->msg_cache)) {
+ if(!slabhash_is_size(ctx->env->msg_cache, cfg->msg_cache_size,
+ cfg->msg_cache_slabs)) {
slabhash_delete(ctx->env->msg_cache);
ctx->env->msg_cache = slabhash_create(cfg->msg_cache_slabs,
HASH_DEFAULT_STARTARRAY, cfg->msg_cache_size,
/* divide cachesize by slabs and multiply by slabs, because if the
* cachesize is not an even multiple of slabs, that is the resulting
* size of the slabhash */
- if((maxmem/cfg->infra_cache_slabs)*cfg->infra_cache_slabs != slabhash_get_size(infra->hosts) ||
- cfg->infra_cache_slabs != infra->hosts->size ||
- cfg->ratelimit_slabs != infra->domain_rates->size ||
- (cfg->ratelimit_size/cfg->ratelimit_slabs)*cfg->ratelimit_slabs != slabhash_get_size(infra->domain_rates) ||
- cfg->ip_ratelimit_slabs != infra->client_ip_rates->size ||
- (cfg->ip_ratelimit_size/cfg->ip_ratelimit_slabs)*cfg->ip_ratelimit_slabs != slabhash_get_size(infra->client_ip_rates)) {
+ if(!slabhash_is_size(infra->hosts, maxmem, cfg->infra_cache_slabs) ||
+ !slabhash_is_size(infra->domain_rates, cfg->ratelimit_size,
+ cfg->ratelimit_slabs) ||
+ !slabhash_is_size(infra->client_ip_rates, cfg->ip_ratelimit_size,
+ cfg->ip_ratelimit_slabs)) {
infra_delete(infra);
infra = infra_create(cfg);
} else {
struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r,
struct config_file* cfg, struct alloc_cache* alloc)
{
- if(!r || !cfg || cfg->rrset_cache_slabs != r->table.size ||
- cfg->rrset_cache_size != slabhash_get_size(&r->table))
+ if(!r || !cfg || !slabhash_is_size(&r->table, cfg->rrset_cache_size,
+ cfg->rrset_cache_slabs))
{
rrset_cache_delete(r);
r = rrset_cache_create(cfg, alloc);
return total;
}
+int slabhash_is_size(struct slabhash* sl, size_t size, size_t slabs)
+{
+ /* divide by slabs and then multiply by the number of slabs,
+ * because if the size is not an even multiple of slabs, the
+ * uneven amount needs to be removed for comparison */
+ if(!sl) return 0;
+ if(sl->size != slabs) return 0;
+ if(slabs == 0) return 0;
+ if( (size/slabs)*slabs == slabhash_get_size(sl))
+ return 1;
+ return 0;
+}
+
size_t slabhash_get_mem(struct slabhash* sl)
{
size_t i, total = sizeof(*sl);
*/
size_t slabhash_get_size(struct slabhash* table);
+/**
+ * See if slabhash is of given (size, slabs) configuration.
+ * @param table: hash table
+ * @param size: max size to test for
+ * @param slabs: slab count to test for.
+ * @return true if equal
+ */
+int slabhash_is_size(struct slabhash* table, size_t size, size_t slabs);
+
/**
* Retrieve slab hash current memory use.
* @param table: hash table.