]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
repair: allocate and free extent records individually
authorChristoph Hellwig <hch@lst.de>
Wed, 11 Jan 2012 18:52:00 +0000 (18:52 +0000)
committerChristoph Hellwig <hch@lst.de>
Wed, 11 Jan 2012 18:52:00 +0000 (18:52 +0000)
Instead of allocating inode records in chunks and keeping a freelist of them
which gets released to the system memory allocator in one go use plain malloc
and free for them.  The freelist just means adding a global lock instead
of relying on malloc and free which could be implemented lockless.  In
addition smart allocators like tcmalloc have far less overhead than our
chunk and linked list.

Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
repair/incore_ext.c

index 60dd4c442f4fe65f33fc658372c33c21a0eea302..28648d1066894ec5480288e3458b2588e6e44ee4 100644 (file)
 #include "err_protos.h"
 #include "avl64.h"
 #include "threads.h"
-#define ALLOC_NUM_EXTS         100
-
-/*
- * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
- */
-typedef struct extent_alloc_rec  {
-       struct list_head        list;
-       extent_tree_node_t      extents[ALLOC_NUM_EXTS];
-} extent_alloc_rec_t;
-
-typedef struct rt_extent_alloc_rec  {
-       struct list_head        list;
-       rt_extent_tree_node_t   extents[ALLOC_NUM_EXTS];
-} rt_extent_alloc_rec_t;
 
 /*
  * note:  there are 4 sets of incore things handled here:
@@ -57,21 +43,9 @@ typedef struct rt_extent_alloc_rec  {
  * phase 5.  The uncertain inode list goes away at the end of
  * phase 3.  The inode tree and bno/bnct trees go away after phase 5.
  */
-typedef struct ext_flist_s  {
-       extent_tree_node_t      *list;
-       int                     cnt;
-} ext_flist_t;
-
-static ext_flist_t ext_flist;
-
-typedef struct rt_ext_flist_s  {
-       rt_extent_tree_node_t   *list;
-       int                     cnt;
-} rt_ext_flist_t;
-
-static rt_ext_flist_t rt_ext_flist;
 
 static avl64tree_desc_t        *rt_ext_tree_ptr;       /* dup extent tree for rt */
+static pthread_mutex_t rt_ext_tree_lock;
 
 static struct btree_root **dup_extent_trees;   /* per ag dup extent trees */
 static pthread_mutex_t *dup_extent_tree_locks;
@@ -88,19 +62,6 @@ static avltree_desc_t        **extent_bcnt_ptrs;     /*
                                                 * sorted by size
                                                 */
 
-/*
- * list of allocated "blocks" for easy freeing later
- */
-static struct list_head        ba_list;
-static struct list_head        rt_ba_list;
-
-/*
- * locks.
- */
-static pthread_mutex_t ext_flist_lock;
-static pthread_mutex_t rt_ext_tree_lock;
-static pthread_mutex_t rt_ext_flist_lock;
-
 /*
  * duplicate extent tree functions
  */
@@ -167,60 +128,26 @@ static extent_tree_node_t *
 mk_extent_tree_nodes(xfs_agblock_t new_startblock,
        xfs_extlen_t new_blockcount, extent_state_t new_state)
 {
-       int i;
        extent_tree_node_t *new;
-       extent_alloc_rec_t *rec;
-
-       pthread_mutex_lock(&ext_flist_lock);
-       if (ext_flist.cnt == 0)  {
-               ASSERT(ext_flist.list == NULL);
-
-               if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL)
-                       do_error(
-                       _("couldn't allocate new extent descriptors.\n"));
-
-               list_add(&rec->list, &ba_list);
 
-               new = &rec->extents[0];
+       new = malloc(sizeof(*new));
+       if (!new)
+               do_error(_("couldn't allocate new extent descriptor.\n"));
 
-               for (i = 0; i < ALLOC_NUM_EXTS; i++)  {
-                       new->avl_node.avl_nextino = (avlnode_t *)
-                                                       ext_flist.list;
-                       ext_flist.list = new;
-                       ext_flist.cnt++;
-                       new++;
-               }
-       }
-
-       ASSERT(ext_flist.list != NULL);
-
-       new = ext_flist.list;
-       ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino;
-       ext_flist.cnt--;
        new->avl_node.avl_nextino = NULL;
-       pthread_mutex_unlock(&ext_flist_lock);
-
-       /* initialize node */
-
        new->ex_startblock = new_startblock;
        new->ex_blockcount = new_blockcount;
        new->ex_state = new_state;
        new->next = NULL;
        new->last = NULL;
 
-       return(new);
+       return new;
 }
 
 void
 release_extent_tree_node(extent_tree_node_t *node)
 {
-       pthread_mutex_lock(&ext_flist_lock);
-       node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list;
-       ext_flist.list = node;
-       ext_flist.cnt++;
-       pthread_mutex_unlock(&ext_flist_lock);
-
-       return;
+       free(node);
 }
 
 /*
@@ -630,57 +557,24 @@ static rt_extent_tree_node_t *
 mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock,
        xfs_extlen_t new_blockcount, extent_state_t new_state)
 {
-       int i;
        rt_extent_tree_node_t *new;
-       rt_extent_alloc_rec_t *rec;
-
-       pthread_mutex_lock(&rt_ext_flist_lock);
-       if (rt_ext_flist.cnt == 0)  {
-               ASSERT(rt_ext_flist.list == NULL);
-
-               if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL)
-                       do_error(
-                       _("couldn't allocate new extent descriptors.\n"));
-
-               list_add(&rec->list, &rt_ba_list);
 
-               new = &rec->extents[0];
+       new = malloc(sizeof(*new));
+       if (!new)
+               do_error(_("couldn't allocate new extent descriptor.\n"));
 
-               for (i = 0; i < ALLOC_NUM_EXTS; i++)  {
-                       new->avl_node.avl_nextino = (avlnode_t *)
-                                                       rt_ext_flist.list;
-                       rt_ext_flist.list = new;
-                       rt_ext_flist.cnt++;
-                       new++;
-               }
-       }
-
-       ASSERT(rt_ext_flist.list != NULL);
-
-       new = rt_ext_flist.list;
-       rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino;
-       rt_ext_flist.cnt--;
        new->avl_node.avl_nextino = NULL;
-       pthread_mutex_unlock(&rt_ext_flist_lock);
-
-       /* initialize node */
-
        new->rt_startblock = new_startblock;
        new->rt_blockcount = new_blockcount;
        new->rt_state = new_state;
-
-       return(new);
+       return new;
 }
 
 #if 0
 void
 release_rt_extent_tree_node(rt_extent_tree_node_t *node)
 {
-       node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list;
-       rt_ext_flist.list = node;
-       rt_ext_flist.cnt++;
-
-       return;
+       free(node);
 }
 
 void
@@ -719,18 +613,9 @@ release_rt_extent_tree()
 void
 free_rt_dup_extent_tree(xfs_mount_t *mp)
 {
-       rt_extent_alloc_rec_t *cur, *tmp;
-
        ASSERT(mp->m_sb.sb_rblocks != 0);
-
-       list_for_each_entry_safe(cur, tmp, &rt_ba_list, list)
-               free(cur);
-
        free(rt_ext_tree_ptr);
-
        rt_ext_tree_ptr = NULL;
-
-       return;
 }
 
 /*
@@ -862,11 +747,7 @@ incore_ext_init(xfs_mount_t *mp)
        int i;
        xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
 
-       list_head_init(&ba_list);
-       list_head_init(&rt_ba_list);
-       pthread_mutex_init(&ext_flist_lock, NULL);
        pthread_mutex_init(&rt_ext_tree_lock, NULL);
-       pthread_mutex_init(&rt_ext_flist_lock, NULL);
 
        dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
        if (!dup_extent_trees)
@@ -908,11 +789,6 @@ incore_ext_init(xfs_mount_t *mp)
                do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
 
        avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
-
-       ext_flist.cnt = 0;
-       ext_flist.list = NULL;
-
-       return;
 }
 
 /*
@@ -921,12 +797,8 @@ incore_ext_init(xfs_mount_t *mp)
 void
 incore_ext_teardown(xfs_mount_t *mp)
 {
-       extent_alloc_rec_t *cur, *tmp;
        xfs_agnumber_t i;
 
-       list_for_each_entry_safe(cur, tmp, &ba_list, list)
-               free(cur);
-
        for (i = 0; i < mp->m_sb.sb_agcount; i++)  {
                btree_destroy(dup_extent_trees[i]);
                free(extent_bno_ptrs[i]);