/* TODO: check if update really needed */
slabhash_insert(worker->daemon->rrset_cache,
rep->rrsets[i]->entry.hash, &rep->rrsets[i]->entry,
- rep->rrsets[i]->entry.data);
+ rep->rrsets[i]->entry.data, &worker->alloc);
}
}
return 0;
}
slabhash_insert(w->worker->daemon->msg_cache, w->query_hash,
- &e->entry, rep);
+ &e->entry, rep, &w->worker->alloc);
return 0;
}
4 May 2007: Wouter
- msgreply sizefunc is more accurate.
- config settings for rrset cache size and slabs.
+ - hashtable insert takes argument so that a thread can use its own
+ alloc cache to store released keys.
3 May 2007: Wouter
- fill refs. Use new parse and encode to answer queries.
k->entry.data = d;
k2->entry.data = d2;
- lruhash_insert(table, myhash(12), &k->entry, d);
- lruhash_insert(table, myhash(14), &k2->entry, d2);
+ lruhash_insert(table, myhash(12), &k->entry, d, NULL);
+ lruhash_insert(table, myhash(14), &k2->entry, d2, NULL);
unit_assert( lruhash_lookup(table, myhash(12), k, 0) == &k->entry);
lock_rw_unlock( &k->entry.lock );
struct testdata* data = newdata(numtoadd);
struct testkey* key = newkey(numtoadd);
key->entry.data = data;
- lruhash_insert(table, myhash(numtoadd), &key->entry, data);
+ lruhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
ref[numtoadd] = data;
}
struct testdata* data = newdata(numtoadd);
struct testkey* key = newkey(numtoadd);
key->entry.data = data;
- lruhash_insert(table, myhash(numtoadd), &key->entry, data);
+ lruhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
if(ref)
ref[numtoadd] = data;
}
k->entry.data = d;
k2->entry.data = d2;
- slabhash_insert(table, myhash(12), &k->entry, d);
- slabhash_insert(table, myhash(14), &k2->entry, d2);
+ slabhash_insert(table, myhash(12), &k->entry, d, NULL);
+ slabhash_insert(table, myhash(14), &k2->entry, d2, NULL);
unit_assert( slabhash_lookup(table, myhash(12), k, 0) == &k->entry);
lock_rw_unlock( &k->entry.lock );
struct slabtestdata* data = newdata(numtoadd);
struct slabtestkey* key = newkey(numtoadd);
key->entry.data = data;
- slabhash_insert(table, myhash(numtoadd), &key->entry, data);
+ slabhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
ref[numtoadd] = data;
}
struct slabtestdata* data = newdata(numtoadd);
struct slabtestkey* key = newkey(numtoadd);
key->entry.data = data;
- slabhash_insert(table, myhash(numtoadd), &key->entry, data);
+ slabhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
if(ref)
ref[numtoadd] = data;
}
log_assert(alloc);
if(!mem)
return;
+ if(!alloc->super) /* superalloc needs locking */
+ lock_quick_lock(&alloc->lock);
+
alloc_special_clean(mem);
if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) {
/* push it to the super structure */
alloc_set_special_next(mem, alloc->quar);
alloc->quar = mem;
alloc->num_quar++;
+ if(!alloc->super)
+ lock_quick_unlock(&alloc->lock);
}
void
k->id = 0;
free(k->rk.dname);
k->rk.dname = NULL;
- lock_quick_lock(&a->lock);
alloc_special_release(a, k);
- lock_quick_unlock(&a->lock);
}
void
void
lruhash_insert(struct lruhash* table, hashvalue_t hash,
- struct lruhash_entry* entry, void* data)
+ struct lruhash_entry* entry, void* data, void* cb_arg)
{
struct lruhash_bin* bin;
struct lruhash_entry* found, *reclaimlist=NULL;
size_t need_size;
need_size = table->sizefunc(entry->key, data);
+ if(cb_arg == NULL) cb_arg = table->cb_arg;
/* find bin */
lock_quick_lock(&table->lock);
/* if so: update data - needs a writelock */
table->space_used += need_size -
(*table->sizefunc)(found->key, found->data);
- (*table->delkeyfunc)(entry->key, table->cb_arg);
+ (*table->delkeyfunc)(entry->key, cb_arg);
lru_touch(table, found);
lock_rw_wrlock(&found->lock);
- (*table->deldatafunc)(found->data, table->cb_arg);
+ (*table->deldatafunc)(found->data, cb_arg);
found->data = data;
lock_rw_unlock(&found->lock);
}
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
lock_rw_unlock(&reclaimlist->lock);
- (*table->delkeyfunc)(reclaimlist->key, table->cb_arg);
- (*table->deldatafunc)(d, table->cb_arg);
+ (*table->delkeyfunc)(reclaimlist->key, cb_arg);
+ (*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
}
* But entry->data is set to NULL before deletion, and put into
* the existing entry. The data is then freed.
* @param data: the data.
+ * @param cb_override: if not null overrides the cb_arg for the deletefunc.
*/
void lruhash_insert(struct lruhash* table, hashvalue_t hash,
- struct lruhash_entry* entry, void* data);
+ struct lruhash_entry* entry, void* data, void* cb_override);
/**
* Lookup an entry in the hashtable.
}
void slabhash_insert(struct slabhash* sl, hashvalue_t hash,
- struct lruhash_entry* entry, void* data)
+ struct lruhash_entry* entry, void* data, void* arg)
{
- lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data);
+ lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data, arg);
}
struct lruhash_entry* slabhash_lookup(struct slabhash* sl,
* But entry->data is set to NULL before deletion, and put into
* the existing entry. The data is then freed.
* @param data: the data.
+ * @param cb_override: if not NULL overrides the cb_arg for deletfunc.
*/
void slabhash_insert(struct slabhash* table, hashvalue_t hash,
- struct lruhash_entry* entry, void* data);
+ struct lruhash_entry* entry, void* data, void* cb_override);
/**
* Lookup an entry in the hashtable. Uses lruhash_lookup.