- start of regional allocator code.
- regional uses less memory and variables, simplified code.
- remove of region-allocator.
+ - alloc cache keeps a cache of recently released regional blocks,
+ up to a maximum.
+ - make unit test cleanly free memory.
17 October 2007: Wouter
- fixup another cycle detect and ns-addr timeout resolution bug.
#include "util/data/msgencode.h"
#include "util/timehist.h"
#include "util/fptr_wlist.h"
+#include "util/alloc.h"
int
mesh_state_compare(const void* ap, const void* bp)
mesh_state_create(struct module_env* env, struct query_info* qinfo,
uint16_t qflags, int prime)
{
- struct regional* region = regional_create();
+ struct regional* region = alloc_reg_obtain(env->alloc);
struct mesh_state* mstate;
int i;
if(!region)
mstate = (struct mesh_state*)regional_alloc(region,
sizeof(struct mesh_state));
if(!mstate) {
- regional_destroy(region);
+ alloc_reg_release(env->alloc, region);
return NULL;
}
memset(mstate, 0, sizeof(*mstate));
mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
qinfo->qname_len);
if(!mstate->s.qinfo.qname) {
- regional_destroy(region);
+ alloc_reg_release(env->alloc, region);
return NULL;
}
/* remove all weird bits from qflags */
mstate->s.minfo[i] = NULL;
mstate->s.ext_state[i] = module_finished;
}
- regional_destroy(mstate->s.region);
+ alloc_reg_release(mstate->s.env->alloc, mstate->s.region);
}
void
verbose(1, "%s: Read %d entries\n", prog_name, entry_num);
fclose(in);
+ ldns_rdf_deep_free(origin);
+ ldns_rdf_deep_free(prev_rr);
return list;
}
verifytest_entry(e, &alloc, region, buf, dnskey, &env, &ve);
}
+ ub_packed_rrset_parsedelete(dnskey, &alloc);
delete_entry(list);
regional_destroy(region);
alloc_clear(&alloc);
#include "config.h"
#include "util/alloc.h"
+#include "util/regional.h"
#include "util/data/packed_rrset.h"
+/** custom size of cached regional blocks */
+#define ALLOC_REG_SIZE 16384
/** number of bits for ID part of uint64, rest for number of threads. */
#define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */
}
}
+/** prealloc region blocks */
+static void
+prealloc_blocks(struct alloc_cache* alloc, size_t num)
+{
+ size_t i;
+ struct regional* r;
+ for(i=0; i<num; i++) {
+ r = regional_create_custom(ALLOC_REG_SIZE);
+ if(!r) fatal_exit("prealloc blocks: out of memory");
+ r->next = (char*)alloc->reg_list;
+ alloc->reg_list = r;
+ alloc->num_reg_blocks ++;
+ }
+}
+
void
alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
int thread_num)
alloc->last_id -= 1; /* for compiler portability. */
alloc->last_id |= alloc->next_id;
alloc->next_id += 1; /* because id=0 is special. */
+ alloc->max_reg_blocks = 100;
+ alloc->num_reg_blocks = 0;
+ alloc->reg_list = NULL;
+ if(alloc->super)
+ prealloc_blocks(alloc, alloc->max_reg_blocks);
if(!alloc->super) {
lock_quick_init(&alloc->lock);
lock_protect(&alloc->lock, alloc, sizeof(*alloc));
alloc_clear(struct alloc_cache* alloc)
{
alloc_special_t* p, *np;
+ struct regional* r, *nr;
if(!alloc)
return;
if(!alloc->super) {
}
alloc->quar = 0;
alloc->num_quar = 0;
+ r = alloc->reg_list;
+ while(r) {
+ nr = (struct regional*)r->next;
+ free(r);
+ r = nr;
+ }
+ alloc->reg_list = NULL;
+ alloc->num_reg_blocks = 0;
}
uint64_t
void
alloc_stats(struct alloc_cache* alloc)
{
- log_info("%salloc: %d in cache.", alloc->super?"":"sup",
- (int)alloc->num_quar);
+ log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup",
+ (int)alloc->num_quar, (int)alloc->num_reg_blocks);
}
size_t alloc_get_mem(struct alloc_cache* alloc)
for(p = alloc->quar; p; p = alloc_special_next(p)) {
s += lock_get_mem(&p->entry.lock);
}
+ s += alloc->num_reg_blocks * ALLOC_REG_SIZE;
if(!alloc->super) {
lock_quick_unlock(&alloc->lock);
}
return s;
}
+struct regional*
+alloc_reg_obtain(struct alloc_cache* alloc)
+{
+ if(alloc->num_reg_blocks > 0) {
+ struct regional* r = alloc->reg_list;
+ alloc->reg_list = (struct regional*)r->next;
+ r->next = NULL;
+ alloc->num_reg_blocks--;
+ return r;
+ }
+ return regional_create_custom(ALLOC_REG_SIZE);
+}
+
+void
+alloc_reg_release(struct alloc_cache* alloc, struct regional* r)
+{
+ if(alloc->num_reg_blocks >= alloc->max_reg_blocks) {
+ regional_destroy(r);
+ return;
+ }
+ regional_free_all(r);
+ log_assert(r->next == NULL);
+ r->next = (char*)alloc->reg_list;
+ alloc->reg_list = r;
+ alloc->num_reg_blocks++;
+}
+
/** global debug value to keep track of total memory mallocs */
size_t unbound_mem_alloc = 0;
/** global debug value to keep track of total memory frees */
#include "util/locks.h"
struct ub_packed_rrset_key;
+struct regional;
/** The special type, packed rrset. Not allowed to be used for other memory */
typedef struct ub_packed_rrset_key alloc_special_t;
uint64_t next_id;
/** last id number possible */
uint64_t last_id;
+
+ /** how many regional blocks to keep back max */
+ size_t max_reg_blocks;
+ /** how many regional blocks are kept now */
+ size_t num_reg_blocks;
+ /** linked list of regional blocks, using regional->next */
+ struct regional* reg_list;
};
/**
*/
void alloc_stats(struct alloc_cache* alloc);
+/**
+ * Get a new regional for query states
+ * @param alloc: where to alloc it.
+ * @return regional for use or NULL on alloc failure.
+ */
+struct regional* alloc_reg_obtain(struct alloc_cache* alloc);
+
+/**
+ * Put regional for query states back into alloc cache.
+ * @param alloc: where to alloc it.
+ * @param r: regional to put back.
+ */
+void alloc_reg_release(struct alloc_cache* alloc, struct regional* r);
+
#endif /* UTIL_ALLOC_H */