]> git.ipfire.org Git - thirdparty/unbound.git/commitdiff
Commit code, and see if unit test is fixed (with less busy open).
authorWouter Wijngaards <wouter@nlnetlabs.nl>
Tue, 10 Apr 2007 07:11:57 +0000 (07:11 +0000)
committerWouter Wijngaards <wouter@nlnetlabs.nl>
Tue, 10 Apr 2007 07:11:57 +0000 (07:11 +0000)
git-svn-id: file:///svn/unbound/trunk@233 be551aaa-1e26-0410-a405-d3ace91eadb9

doc/Changelog
util/alloc.c
util/alloc.h
util/data/msgreply.c
util/data/msgreply.h
util/data/packed_rrset.c
util/data/packed_rrset.h
util/region-allocator.c [new file with mode: 0644]
util/region-allocator.h [new file with mode: 0644]

index 71fb2935f47e947fca90099826c9689947a91b07..caa6ceec1af33ead816bbcf36f56281fa94eeef7 100644 (file)
@@ -2,6 +2,9 @@
        - Improved alignment of reply_info packet, nice for 32 and 64 bit.
        - Put RRset counts in reply_info, because the number of RRs can change
          due to RRset updates.
+       - import of region-allocator code from nsd.
+       - set alloc special type to ub_packed_rrset_key.
+         Uses lruhash entry overflow chain next pointer in alloc cache.
 
 5 April 2007: Wouter
        - discussed packed rrset with Jelte.
index 933c64a32b36fb7b46b3a1ddfdeb5efd67f87a05..516456eb99bcd208c123dcf22e3590541aa0e1b1 100644 (file)
@@ -41,6 +41,7 @@
 
 #include "config.h"
 #include "util/alloc.h"
+#include "util/data/packed_rrset.h"
 
 /** prealloc some entries in the cache. To minimize contention. 
  * Result is 1 lock per alloc_max newly created entries.
@@ -54,7 +55,7 @@ prealloc(struct alloc_cache* alloc)
        for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
                if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t))))
                        fatal_exit("prealloc: out of memory");
-               alloc_special_next(p) = alloc->quar;
+               alloc_set_special_next(p, alloc->quar);
                alloc->quar = p;
                alloc->num_quar++;
        }
@@ -86,7 +87,7 @@ alloc_clear(struct alloc_cache* alloc)
                while(alloc_special_next(p)) /* find last */
                        p = alloc_special_next(p);
                lock_quick_lock(&alloc->super->lock);
-               alloc_special_next(p) = alloc->super->quar;
+               alloc_set_special_next(p, alloc->super->quar);
                alloc->super->quar = alloc->quar;
                alloc->super->num_quar += alloc->num_quar;
                lock_quick_unlock(&alloc->super->lock);
@@ -149,7 +150,7 @@ pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem)
        log_assert(alloc && alloc->super && 
                alloc->num_quar >= ALLOC_SPECIAL_MAX);
        /* push ALLOC_SPECIAL_MAX/2 after mem */
-       alloc_special_next(mem) = alloc->quar;
+       alloc_set_special_next(mem, alloc->quar);
        for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) {
                p = alloc_special_next(p);
        }
@@ -158,7 +159,7 @@ pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem)
 
        /* dump mem+list into the super quar list */
        lock_quick_lock(&alloc->super->lock);
-       alloc_special_next(p) = alloc->super->quar;
+       alloc_set_special_next(p, alloc->super->quar);
        alloc->super->quar = mem;
        alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1;
        lock_quick_unlock(&alloc->super->lock);
@@ -178,7 +179,7 @@ alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem)
                return;
        }
 
-       alloc_special_next(mem) = alloc->quar;
+       alloc_set_special_next(mem, alloc->quar);
        alloc->quar = mem;
        alloc->num_quar++;
 }
index d261b6147af1f52d4348a427ddba2257096b916b..29f084f3cc6b934433a00386ee3d1e33b0fd5bb2 100644 (file)
 #define UTIL_ALLOC_H
 
 #include "util/locks.h"
+struct ub_packed_rrset_key;
 
 /** The special type, packed rrset. Not allowed to be used for other memory */
-typedef uint64_t alloc_special_t;
+typedef struct ub_packed_rrset_key alloc_special_t;
 /** clean the special type. Pass pointer. */
-#define alloc_special_clean(x) memset(x, 0, sizeof(alloc_special_t))
+#define alloc_special_clean(x) (x)->id = 0;
 /** access next pointer. (in available spot). Pass pointer. */
-#define alloc_special_next(x) (*((alloc_special_t**)(x)))
+#define alloc_special_next(x) ((alloc_special_t*)((x)->entry.overflow_next))
+/** set next pointer. (in available spot). Pass pointers. */
+#define alloc_set_special_next(x, y) \
+       ((x)->entry.overflow_next) = (struct lruhash_entry*)(y);
 
 /** how many blocks to cache locally. */
 #define ALLOC_SPECIAL_MAX 10
index 46a575a3f5b06b1cfc981662a5cf8baecb8a5ce3..188545f6bf60a1b89370fa8e9777d5aead722e24 100644 (file)
 #include "util/net_help.h"
 #include "util/data/dname.h"
 
+int reply_info_parse(ldns_buffer* pkt, struct alloc_cache* alloc,
+        struct query_info* qinf, struct reply_info** rep);
+
+void 
+reply_info_parsedelete(struct reply_info* rep, struct alloc_cache* alloc)
+{
+       size_t i;
+       if(!rep) 
+               return;
+       /* no need to lock, since not shared in hashtables. */
+       for(i=0; i<rep->rrset_count; i++) {
+               ub_packed_rrset_parsedelete(rep->rrsets[i], alloc);
+       }
+       free(rep);
+}
+
 int 
 query_info_parse(struct query_info* m, ldns_buffer* query)
 {
index 4b3903dc531f6ae6da16bf6674806ea7e1146527..dfcce46d4eea32fbf4bf3bef901820903596dfc3 100644 (file)
@@ -72,7 +72,7 @@ struct query_info {
  */
 struct rrset_ref {
        /** the key with lock, and ptr to packed data. */
-       struct packed_rrset_key* key;
+       struct ub_packed_rrset_key* key;
        /** id needed */
        rrset_id_t id;
 };
@@ -126,6 +126,9 @@ struct reply_info {
        /** Count of additional section RRsets */
        size_t ar_numrrsets;
 
+       /** number of RRsets: an_numrrsets + ns_numrrsets + ar_numrrsets */
+       size_t rrset_count;
+
        /** 
         * List of pointers (only) to the rrsets in the order in which 
         * they appear in the reply message.  
@@ -133,7 +136,7 @@ struct reply_info {
         * This is a pointer to that array. 
         * Use the accessor function for access.
         */
-       struct packed_rrset_key** rrsets;
+       struct ub_packed_rrset_key** rrsets;
 
        /** 
         * Packed array of ids (see counts) and pointers to packed_rrset_key.
@@ -185,10 +188,12 @@ int query_info_parse(struct query_info* m, ldns_buffer* query);
  * @param alloc: creates packed rrset key structures.
  * @param rep: allocated reply_info is returned (only on no error).
  * @param qinf: query_info is returned (only on no error).
- * @return: zero is OK, or error code in case of error.
+ * @return: zero is OK, or DNS error code in case of error
+ *     o FORMERR for parse errors.
+ *     o SERVFAIL for memory allocation errors.
  */
 int reply_info_parse(ldns_buffer* pkt, struct alloc_cache* alloc,
-       struct query_info* qinf, struct reply_info* rep);
+       struct query_info* qinf, struct reply_info** rep);
 
 /** 
  * Delete reply_info and packed_rrsets (while they are not yet added to the
index 5053a75ad23591b25d1c51be0b31f8e85f726d15..2ecc109c987210371b47128c128ca29c69570042 100644 (file)
 #include "config.h"
 #include "util/data/packed_rrset.h"
 #include "util/log.h"
+#include "util/alloc.h"
 
+void
+ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
+        struct alloc_cache* alloc)
+{
+       if(!pkey)
+               return;
+       if(pkey->entry.data)
+               free(pkey->entry.data);
+       pkey->entry.data = NULL;
+       if(pkey->rk.dname)
+               free(pkey->rk.dname);
+       pkey->rk.dname = NULL;
+       pkey->id = 0;
+       alloc_special_release(alloc, pkey);
+}
 
index 2b08b90928e94680bafbae034de5a7f88a7db1fe..67b80b4c5d0d922eef718bdf67bb225e562e1e36 100644 (file)
@@ -42,6 +42,7 @@
 #ifndef UTIL_DATA_PACKED_RRSET_H
 #define UTIL_DATA_PACKED_RRSET_H
 #include "util/storage/lruhash.h"
+struct alloc_cache;
 
 /** type used to uniquely identify rrsets. Cannot be reused without
  * clearing the cache. */
@@ -101,8 +102,8 @@ struct ub_packed_rrset_key {
  * The data is packed, stored contiguously in memory.
  * memory layout:
  *     o base struct
- *     o rr_data uint8_t* array
  *     o rr_len size_t array
+ *     o rr_data uint8_t* array
  *     o rr_ttl uint32_t array
  *     o rr_data rdata wireformats
  *     o rrsig_data rdata wireformat
@@ -127,14 +128,14 @@ struct packed_rrset_data {
        uint32_t ttl;
        /** number of rrs. */
        size_t count;
+       /** length of every rr's rdata, rr_len[i] is size of rr_data[i]. */
+       size_t* rr_len;
        /** 
         * Array of pointers to every rr's rdata. 
         * The rr_data[i] rdata is stored in uncompressed wireformat. 
         * The first uint16_t of rr_data[i] is network format rdlength.
         */
        uint8_t** rr_data;
-       /** length of every rr's rdata, rr_len[i] is size of rr_data[i]. */
-       size_t* rr_len;
        /** if this rrset is signed with an RRSIG, it is stored here. */
        uint8_t* rrsig_data;
        /** length of rrsig rdata (only examine if rrsig_data is not null) */
@@ -153,4 +154,13 @@ struct packed_rrset {
        struct packed_rrset_data* d;
 };
 
+/**
+ * Delete packed rrset key and data, not entered in hashtables yet.
+ * Used during parsing.
+ * @param pkey: rrset key structure with locks, key and data pointers.
+ * @param alloc: where to return the unfree-able key structure.
+ */
+void ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
+       struct alloc_cache* alloc);
+
 #endif /* UTIL_DATA_PACKED_RRSET_H */
diff --git a/util/region-allocator.c b/util/region-allocator.c
new file mode 100644 (file)
index 0000000..c04e2a6
--- /dev/null
@@ -0,0 +1,478 @@
+/*
+ * region-allocator.c -- region based memory allocator.
+ *
+ * Copyright (c) 2001-2006, NLnet Labs. All rights reserved.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ * 
+ * This software is open source.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "region-allocator.h"
+
+#ifdef ALIGNMENT
+#  undef ALIGNMENT
+#endif
+#define ALIGN_UP(x, s)     (((x) + s - 1) & (~(s - 1)))
+#define ALIGNMENT          (sizeof(void *))
+#define CHECK_DOUBLE_FREE 0 /* set to 1 to perform expensive check for double recycle() */
+
+typedef struct cleanup cleanup_type;
+struct cleanup
+{
+       void (*action)(void *);
+       void *data;
+};
+
+struct recycle_elem {
+       struct recycle_elem* next;
+};
+
+struct region
+{
+       size_t        total_allocated;
+       size_t        small_objects;
+       size_t        large_objects;
+       size_t        chunk_count;
+       size_t        unused_space; /* Unused space due to alignment, etc. */
+       
+       size_t        allocated;
+       char         *initial_data;
+       char         *data;
+
+       void         *(*allocator)(size_t);
+       void          (*deallocator)(void *);
+    
+       size_t        maximum_cleanup_count;
+       size_t        cleanup_count;
+       cleanup_type *cleanups;
+
+       size_t        chunk_size;
+       size_t        large_object_size;
+
+       /* if not NULL recycling is enabled.
+        * It is an array of linked lists of parts held for recycle.
+        * The parts are all pointers to within the allocated chunks.
+        * Array [i] points to elements of size i. */
+       struct recycle_elem** recycle_bin;
+       /* amount of memory in recycle storage */
+       size_t          recycle_size;
+};
+
+
+static region_type *
+alloc_region_base(void *(*allocator)(size_t size),
+                 void (*deallocator)(void *),
+                 size_t initial_cleanup_count)
+{
+       region_type *result = (region_type *) allocator(sizeof(region_type));
+       if (!result) return NULL;
+
+       result->total_allocated = 0;
+       result->small_objects = 0;
+       result->large_objects = 0;
+       result->chunk_count = 1;
+       result->unused_space = 0;
+       result->recycle_bin = NULL;
+       result->recycle_size = 0;
+       
+       result->allocated = 0;
+       result->data = NULL;
+       result->initial_data = NULL;
+
+       result->allocator = allocator;
+       result->deallocator = deallocator;
+
+       assert(initial_cleanup_count > 0);
+       result->maximum_cleanup_count = initial_cleanup_count;
+       result->cleanup_count = 0;
+       result->cleanups = (cleanup_type *) allocator(
+               result->maximum_cleanup_count * sizeof(cleanup_type));
+       if (!result->cleanups) {
+               deallocator(result);
+               return NULL;
+       }
+
+       result->chunk_size = DEFAULT_CHUNK_SIZE;
+       result->large_object_size = DEFAULT_LARGE_OBJECT_SIZE;
+       return result;
+}
+
+region_type *
+region_create(void *(*allocator)(size_t size),
+             void (*deallocator)(void *))
+{
+       region_type* result = alloc_region_base(allocator, deallocator, 
+               DEFAULT_INITIAL_CLEANUP_SIZE);
+       if(!result)
+               return NULL;
+       result->data = (char *) allocator(result->chunk_size);
+       if (!result->data) {
+               deallocator(result->cleanups);
+               deallocator(result);
+               return NULL;
+       }
+       result->initial_data = result->data;
+    
+       return result;
+}
+
+
+region_type *region_create_custom(void *(*allocator)(size_t),
+                                 void (*deallocator)(void *),
+                                 size_t chunk_size,
+                                 size_t large_object_size,
+                                 size_t initial_cleanup_size,
+                                 int recycle)
+{
+       region_type* result = alloc_region_base(allocator, deallocator, 
+               initial_cleanup_size);
+       if(!result)
+               return NULL;
+       assert(large_object_size <= chunk_size);
+       result->chunk_size = chunk_size;
+       result->large_object_size = large_object_size;
+       if(result->chunk_size > 0) {
+               result->data = (char *) allocator(result->chunk_size);
+               if (!result->data) {
+                       deallocator(result->cleanups);
+                       deallocator(result);
+                       return NULL;
+               }
+               result->initial_data = result->data;
+       }
+       if(recycle) {
+               result->recycle_bin = allocator(sizeof(struct recycle_elem*)
+                       * result->large_object_size);
+               if(!result->recycle_bin) {
+                       region_destroy(result);
+                       return NULL;
+               }
+               memset(result->recycle_bin, 0, sizeof(struct recycle_elem*)
+                       * result->large_object_size);
+       }
+       return result;
+}
+
+
+void
+region_destroy(region_type *region)
+{
+       void (*deallocator)(void *);
+       if (!region)
+               return;
+
+       deallocator = region->deallocator;
+
+       region_free_all(region);
+       deallocator(region->cleanups);
+       deallocator(region->initial_data);
+       if(region->recycle_bin)
+               deallocator(region->recycle_bin);
+       deallocator(region);
+}
+
+
+size_t
+region_add_cleanup(region_type *region, void (*action)(void *), void *data)
+{
+       assert(action);
+    
+       if (region->cleanup_count >= region->maximum_cleanup_count) {
+               cleanup_type *cleanups = (cleanup_type *) region->allocator(
+                       2 * region->maximum_cleanup_count * sizeof(cleanup_type));
+               if (!cleanups) return 0;
+
+               memcpy(cleanups, region->cleanups,
+                      region->cleanup_count * sizeof(cleanup_type));
+               region->deallocator(region->cleanups);
+
+               region->cleanups = cleanups;
+               region->maximum_cleanup_count *= 2;
+       }
+
+       region->cleanups[region->cleanup_count].action = action;
+       region->cleanups[region->cleanup_count].data = data;
+
+       ++region->cleanup_count;
+       return region->cleanup_count;
+}
+
+void *
+region_alloc(region_type *region, size_t size)
+{
+       size_t aligned_size;
+       void *result;
+
+       if (size == 0) {
+               size = 1;
+       }
+       aligned_size = ALIGN_UP(size, ALIGNMENT);
+
+       if (aligned_size >= region->large_object_size) {
+               result = region->allocator(size);
+               if (!result) return NULL;
+        
+               if (!region_add_cleanup(region, region->deallocator, result)) {
+                       region->deallocator(result);
+                       return NULL;
+               }
+        
+               region->total_allocated += size;
+               ++region->large_objects;
+               
+               return result;
+       }
+
+       if (region->recycle_bin && region->recycle_bin[aligned_size]) {
+               result = (void*)region->recycle_bin[aligned_size];
+               region->recycle_bin[aligned_size] = region->recycle_bin[aligned_size]->next; 
+               region->recycle_size -= aligned_size;
+               region->unused_space += aligned_size - size;
+               return result;
+       }
+    
+       if (region->allocated + aligned_size > region->chunk_size) {
+               void *chunk = region->allocator(region->chunk_size);
+               size_t wasted;
+               if (!chunk) return NULL;
+
+               wasted = (region->chunk_size - region->allocated) & (~(ALIGNMENT-1));
+               if(wasted >= ALIGNMENT) {
+                       /* put wasted part in recycle bin for later use */
+                       region->total_allocated += wasted;
+                       ++region->small_objects;
+                       region_recycle(region, region->data+region->allocated, wasted);
+                       region->allocated += wasted;
+               }
+               ++region->chunk_count;
+               region->unused_space += region->chunk_size - region->allocated;
+               
+               region_add_cleanup(region, region->deallocator, chunk);
+               region->allocated = 0;
+               region->data = (char *) chunk;
+       }
+
+       result = region->data + region->allocated;
+       region->allocated += aligned_size;
+
+       region->total_allocated += aligned_size;
+       region->unused_space += aligned_size - size;
+       ++region->small_objects;
+       
+       return result;
+}
+
+void *
+region_alloc_init(region_type *region, const void *init, size_t size)
+{
+       void *result = region_alloc(region, size);
+       if (!result) return NULL;
+       memcpy(result, init, size);
+       return result;
+}
+
+void *
+region_alloc_zero(region_type *region, size_t size)
+{
+       void *result = region_alloc(region, size);
+       if (!result) return NULL;
+       memset(result, 0, size);
+       return result;
+}
+
+void
+region_free_all(region_type *region)
+{
+       size_t i;
+       assert(region);
+       assert(region->cleanups);
+    
+       i = region->cleanup_count;
+       while (i > 0) {
+               --i;
+               assert(region->cleanups[i].action);
+               region->cleanups[i].action(region->cleanups[i].data);
+       }
+
+       if(region->recycle_bin) {
+               memset(region->recycle_bin, 0, sizeof(struct recycle_elem*)
+                       * region->large_object_size);
+               region->recycle_size = 0;
+       }
+
+       region->data = region->initial_data;
+       region->cleanup_count = 0;
+       region->allocated = 0;
+
+       region->total_allocated = 0;
+       region->small_objects = 0;
+       region->large_objects = 0;
+       region->chunk_count = 1;
+       region->unused_space = 0;
+}
+
+
+char *
+region_strdup(region_type *region, const char *string)
+{
+       return (char *) region_alloc_init(region, string, strlen(string) + 1);
+}
+
+void 
+region_recycle(region_type *region, void *block, size_t size)
+{
+       size_t aligned_size;
+       size_t i;
+
+       if(!block || !region->recycle_bin)
+               return;
+
+       if (size == 0) {
+               size = 1;
+       }
+       aligned_size = ALIGN_UP(size, ALIGNMENT);
+
+       if(aligned_size < region->large_object_size) {
+               struct recycle_elem* elem = (struct recycle_elem*)block;
+               /* we rely on the fact that ALIGNMENT is void* so the next will fit */
+               assert(aligned_size >= sizeof(struct recycle_elem));
+
+               if(CHECK_DOUBLE_FREE) {
+                       /* make sure the same ptr is not freed twice. */
+                       struct recycle_elem *p = region->recycle_bin[aligned_size];
+                       while(p) {
+                               assert(p != elem); 
+                               p = p->next;
+                       }
+               }
+
+               elem->next = region->recycle_bin[aligned_size];
+               region->recycle_bin[aligned_size] = elem;
+               region->recycle_size += aligned_size;
+               region->unused_space -= aligned_size - size;
+               return;
+       }
+
+       /* a large allocation */
+       region->total_allocated -= size;
+       --region->large_objects;
+       for(i=0; i<region->cleanup_count; i++) {
+               while(region->cleanups[i].data == block) {
+                       /* perform action (deallocator) on block */
+                       region->cleanups[i].action(block);
+                       region->cleanups[i].data = NULL;
+                       /* remove cleanup - move last entry here, check this one again */
+                       --region->cleanup_count;
+                       region->cleanups[i].action = 
+                               region->cleanups[region->cleanup_count].action;
+                       region->cleanups[i].data = 
+                               region->cleanups[region->cleanup_count].data;
+               }
+       }
+}
+
+void
+region_dump_stats(region_type *region, FILE *out)
+{
+       fprintf(out, "%lu objects (%lu small/%lu large), %lu bytes allocated (%lu wasted) in %lu chunks, %lu cleanups, %lu in recyclebin",
+               (unsigned long) (region->small_objects + region->large_objects),
+               (unsigned long) region->small_objects,
+               (unsigned long) region->large_objects,
+               (unsigned long) region->total_allocated,
+               (unsigned long) region->unused_space,
+               (unsigned long) region->chunk_count,
+               (unsigned long) region->cleanup_count,
+               (unsigned long) region->recycle_size);
+       if(1 && region->recycle_bin) {
+               /* print details of the recycle bin */
+               size_t i;
+               for(i=0; i<region->large_object_size; i++) {
+                       size_t count = 0;
+                       struct recycle_elem* el = region->recycle_bin[i];
+                       while(el) {
+                               count++;
+                               el = el->next;
+                       }
+                       if(i%ALIGNMENT == 0 && i!=0)
+                               fprintf(out, " %lu", (unsigned long)count);
+               }
+       }
+}
+
+size_t region_get_recycle_size(region_type* region)
+{
+       return region->recycle_size;
+}
+
+/* debug routine, includes here to keep base region-allocator independent */
+#undef ALIGN_UP
+#include "util/log.h"
+void
+region_log_stats(region_type *region)
+{
+       char buf[10240], *str=buf;
+       int len=0;
+       sprintf(str, "%lu objects (%lu small/%lu large), %lu bytes allocated (%lu wasted) in %lu chunks, %lu cleanups, %lu in recyclebin%n",
+               (unsigned long) (region->small_objects + region->large_objects),
+               (unsigned long) region->small_objects,
+               (unsigned long) region->large_objects,
+               (unsigned long) region->total_allocated,
+               (unsigned long) region->unused_space,
+               (unsigned long) region->chunk_count,
+               (unsigned long) region->cleanup_count,
+               (unsigned long) region->recycle_size,
+               &len);
+       str+=len;
+       if(1 && region->recycle_bin) {
+               /* print details of the recycle bin */
+               size_t i;
+               for(i=0; i<region->large_object_size; i++) {
+                       size_t count = 0;
+                       struct recycle_elem* el = region->recycle_bin[i];
+                       while(el) {
+                               count++;
+                               el = el->next;
+                       }
+                       if(i%ALIGNMENT == 0 && i!=0) {
+                               sprintf(str, " %lu%n", (unsigned long)count, 
+                                       &len);
+                               str+=len;
+                       }
+               }
+       }
+       log_info("memory: %s", buf);
+}
diff --git a/util/region-allocator.h b/util/region-allocator.h
new file mode 100644 (file)
index 0000000..2d5faaa
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * region-allocator.h -- region based memory allocator.
+ *
+ * Copyright (c) 2001-2006, NLnet Labs. All rights reserved.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ * 
+ * This software is open source.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _REGION_ALLOCATOR_H_
+#define _REGION_ALLOCATOR_H_
+
+#include <stdio.h>
+
+typedef struct region region_type;
+
+#define DEFAULT_CHUNK_SIZE         4096
+#define DEFAULT_LARGE_OBJECT_SIZE  (DEFAULT_CHUNK_SIZE / 8)
+#define DEFAULT_INITIAL_CLEANUP_SIZE 16
+
+/*
+ * Create a new region.
+ */
+region_type *region_create(void *(*allocator)(size_t),
+                          void (*deallocator)(void *));
+
+
+/*
+ * Create a new region, with chunk size and large object size.
+ * Note that large_object_size must be <= chunk_size.
+ * Anything larger than the large object size is individually alloced.
+ * large_object_size = chunk_size/8 is reasonable;
+ * initial_cleanup_size is the number of prealloced ptrs for cleanups.
+ * The cleanups are in a growing array, and it must start larger than zero.
+ * If recycle is true, environmentally friendly memory recycling is be enabled.
+ */
+region_type *region_create_custom(void *(*allocator)(size_t),
+                                 void (*deallocator)(void *),
+                                 size_t chunk_size,
+                                 size_t large_object_size,
+                                 size_t initial_cleanup_size,
+                                 int recycle);
+
+
+/*
+ * Destroy REGION.  All memory associated with REGION is freed as if
+ * region_free_all was called.
+ */
+void region_destroy(region_type *region);
+
+
+/*
+ * Add a cleanup to REGION.  ACTION will be called with DATA as
+ * parameter when the region is freed or destroyed.
+ *
+ * Returns 0 on failure.
+ */
+size_t region_add_cleanup(region_type *region,
+                         void (*action)(void *),
+                         void *data);
+
+
+/*
+ * Allocate SIZE bytes of memory inside REGION.  The memory is
+ * deallocated when region_free_all is called for this region.
+ */
+void *region_alloc(region_type *region, size_t size);
+
+
+/*
+ * Allocate SIZE bytes of memory inside REGION and copy INIT into it.
+ * The memory is deallocated when region_free_all is called for this
+ * region.
+ */
+void *region_alloc_init(region_type *region, const void *init, size_t size);
+
+
+/*
+ * Allocate SIZE bytes of memory inside REGION that are initialized to
+ * 0.  The memory is deallocated when region_free_all is called for
+ * this region.
+ */
+void *region_alloc_zero(region_type *region, size_t size);
+
+
+/*
+ * Run the cleanup actions and free all memory associated with REGION.
+ */
+void region_free_all(region_type *region);
+
+
+/*
+ * Duplicate STRING and allocate the result in REGION.
+ */
+char *region_strdup(region_type *region, const char *string);
+
+/*
+ * Recycle an allocated memory block. Pass size used to alloc it.
+ * Does nothing if recycling is not enabled for the region.
+ */
+void region_recycle(region_type *region, void *block, size_t size);
+
+/*
+ * Print some REGION statistics to OUT.
+ */
+void region_dump_stats(region_type *region, FILE *out);
+
+/* get size of recyclebin */
+size_t region_get_recycle_size(region_type* region);
+
+/* Debug print REGION statistics to LOG. */
+void region_log_stats(region_type *region);
+
+#endif /* _REGION_ALLOCATOR_H_ */