From 520cf040f8aa1ea0ffa66f21531fa5ad393616b0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 10 Nov 2011 14:01:09 +0000 Subject: [PATCH] revert the accidentally commited inode and ext chunk changes I had unrelated changes in the repository when commiting Jakubs string fixes, which accidentally got included in that commit. Revert them for now. Signed-off-by: Christoph Hellwig --- repair/incore_ext.c | 150 +++++++++++++++++++++++++++++++--- repair/incore_ino.c | 193 +++++++++++++++++++++++++++++--------------- 2 files changed, 267 insertions(+), 76 deletions(-) diff --git a/repair/incore_ext.c b/repair/incore_ext.c index 28648d106..60dd4c442 100644 --- a/repair/incore_ext.c +++ b/repair/incore_ext.c @@ -26,6 +26,20 @@ #include "err_protos.h" #include "avl64.h" #include "threads.h" +#define ALLOC_NUM_EXTS 100 + +/* + * paranoia -- account for any weird padding, 64/32-bit alignment, etc. + */ +typedef struct extent_alloc_rec { + struct list_head list; + extent_tree_node_t extents[ALLOC_NUM_EXTS]; +} extent_alloc_rec_t; + +typedef struct rt_extent_alloc_rec { + struct list_head list; + rt_extent_tree_node_t extents[ALLOC_NUM_EXTS]; +} rt_extent_alloc_rec_t; /* * note: there are 4 sets of incore things handled here: @@ -43,9 +57,21 @@ * phase 5. The uncertain inode list goes away at the end of * phase 3. The inode tree and bno/bnct trees go away after phase 5. */ +typedef struct ext_flist_s { + extent_tree_node_t *list; + int cnt; +} ext_flist_t; + +static ext_flist_t ext_flist; + +typedef struct rt_ext_flist_s { + rt_extent_tree_node_t *list; + int cnt; +} rt_ext_flist_t; + +static rt_ext_flist_t rt_ext_flist; static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */ -static pthread_mutex_t rt_ext_tree_lock; static struct btree_root **dup_extent_trees; /* per ag dup extent trees */ static pthread_mutex_t *dup_extent_tree_locks; @@ -62,6 +88,19 @@ static avltree_desc_t **extent_bcnt_ptrs; /* * sorted by size */ +/* + * list of allocated "blocks" for easy freeing later + */ +static struct list_head ba_list; +static struct list_head rt_ba_list; + +/* + * locks. + */ +static pthread_mutex_t ext_flist_lock; +static pthread_mutex_t rt_ext_tree_lock; +static pthread_mutex_t rt_ext_flist_lock; + /* * duplicate extent tree functions */ @@ -128,26 +167,60 @@ static extent_tree_node_t * mk_extent_tree_nodes(xfs_agblock_t new_startblock, xfs_extlen_t new_blockcount, extent_state_t new_state) { + int i; extent_tree_node_t *new; + extent_alloc_rec_t *rec; + + pthread_mutex_lock(&ext_flist_lock); + if (ext_flist.cnt == 0) { + ASSERT(ext_flist.list == NULL); + + if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL) + do_error( + _("couldn't allocate new extent descriptors.\n")); + + list_add(&rec->list, &ba_list); - new = malloc(sizeof(*new)); - if (!new) - do_error(_("couldn't allocate new extent descriptor.\n")); + new = &rec->extents[0]; + for (i = 0; i < ALLOC_NUM_EXTS; i++) { + new->avl_node.avl_nextino = (avlnode_t *) + ext_flist.list; + ext_flist.list = new; + ext_flist.cnt++; + new++; + } + } + + ASSERT(ext_flist.list != NULL); + + new = ext_flist.list; + ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino; + ext_flist.cnt--; new->avl_node.avl_nextino = NULL; + pthread_mutex_unlock(&ext_flist_lock); + + /* initialize node */ + new->ex_startblock = new_startblock; new->ex_blockcount = new_blockcount; new->ex_state = new_state; new->next = NULL; new->last = NULL; - return new; + return(new); } void release_extent_tree_node(extent_tree_node_t *node) { - free(node); + pthread_mutex_lock(&ext_flist_lock); + node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list; + ext_flist.list = node; + ext_flist.cnt++; + pthread_mutex_unlock(&ext_flist_lock); + + return; } /* @@ -557,24 +630,57 @@ static rt_extent_tree_node_t * mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock, xfs_extlen_t new_blockcount, extent_state_t new_state) { + int i; rt_extent_tree_node_t *new; + rt_extent_alloc_rec_t *rec; + + pthread_mutex_lock(&rt_ext_flist_lock); + if (rt_ext_flist.cnt == 0) { + ASSERT(rt_ext_flist.list == NULL); + + if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL) + do_error( + _("couldn't allocate new extent descriptors.\n")); + + list_add(&rec->list, &rt_ba_list); - new = malloc(sizeof(*new)); - if (!new) - do_error(_("couldn't allocate new extent descriptor.\n")); + new = &rec->extents[0]; + for (i = 0; i < ALLOC_NUM_EXTS; i++) { + new->avl_node.avl_nextino = (avlnode_t *) + rt_ext_flist.list; + rt_ext_flist.list = new; + rt_ext_flist.cnt++; + new++; + } + } + + ASSERT(rt_ext_flist.list != NULL); + + new = rt_ext_flist.list; + rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino; + rt_ext_flist.cnt--; new->avl_node.avl_nextino = NULL; + pthread_mutex_unlock(&rt_ext_flist_lock); + + /* initialize node */ + new->rt_startblock = new_startblock; new->rt_blockcount = new_blockcount; new->rt_state = new_state; - return new; + + return(new); } #if 0 void release_rt_extent_tree_node(rt_extent_tree_node_t *node) { - free(node); + node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list; + rt_ext_flist.list = node; + rt_ext_flist.cnt++; + + return; } void @@ -613,9 +719,18 @@ release_rt_extent_tree() void free_rt_dup_extent_tree(xfs_mount_t *mp) { + rt_extent_alloc_rec_t *cur, *tmp; + ASSERT(mp->m_sb.sb_rblocks != 0); + + list_for_each_entry_safe(cur, tmp, &rt_ba_list, list) + free(cur); + free(rt_ext_tree_ptr); + rt_ext_tree_ptr = NULL; + + return; } /* @@ -747,7 +862,11 @@ incore_ext_init(xfs_mount_t *mp) int i; xfs_agnumber_t agcount = mp->m_sb.sb_agcount; + list_head_init(&ba_list); + list_head_init(&rt_ba_list); + pthread_mutex_init(&ext_flist_lock, NULL); pthread_mutex_init(&rt_ext_tree_lock, NULL); + pthread_mutex_init(&rt_ext_flist_lock, NULL); dup_extent_trees = calloc(agcount, sizeof(struct btree_root *)); if (!dup_extent_trees) @@ -789,6 +908,11 @@ incore_ext_init(xfs_mount_t *mp) do_error(_("couldn't malloc dup rt extent tree descriptor\n")); avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops); + + ext_flist.cnt = 0; + ext_flist.list = NULL; + + return; } /* @@ -797,8 +921,12 @@ incore_ext_init(xfs_mount_t *mp) void incore_ext_teardown(xfs_mount_t *mp) { + extent_alloc_rec_t *cur, *tmp; xfs_agnumber_t i; + list_for_each_entry_safe(cur, tmp, &ba_list, list) + free(cur); + for (i = 0; i < mp->m_sb.sb_agcount; i++) { btree_destroy(dup_extent_trees[i]); free(extent_bno_ptrs[i]); diff --git a/repair/incore_ino.c b/repair/incore_ino.c index fa88944d0..7827ff510 100644 --- a/repair/incore_ino.c +++ b/repair/incore_ino.c @@ -25,6 +25,7 @@ #include "threads.h" #include "err_protos.h" +static pthread_mutex_t ino_flist_lock; extern avlnode_t *avl_firstino(avlnode_t *root); /* @@ -37,6 +38,18 @@ avltree_desc_t **inode_tree_ptrs; */ static avltree_desc_t **inode_uncertain_tree_ptrs; +#define ALLOC_NUM_INOS 100 + +/* free lists -- inode nodes and extent nodes */ + +typedef struct ino_flist_s { + ino_tree_node_t *list; + ino_tree_node_t *last; + long long cnt; +} ino_flist_t; + +static ino_flist_t ino_flist; /* free list must be initialized before use */ + /* memory optimised nlink counting for all inodes */ static void nlink_grow_8_to_16(ino_tree_node_t *irec); @@ -225,63 +238,102 @@ nlink_grow_16_to_32(ino_tree_node_t *irec) } /* - * Next is the uncertain inode list -- a sorted (in ascending order) + * next is the uncertain inode list -- a sorted (in ascending order) * list of inode records sorted on the starting inode number. There * is one list per ag. */ /* - * Common code for creating inode records for use by trees and lists. + * common code for creating inode records for use by trees and lists. * called only from add_inodes and add_inodes_uncertain * * IMPORTANT: all inodes (inode records) start off as free and * unconfirmed. */ -static struct ino_tree_node * -alloc_ino_node( +/* ARGSUSED */ +static ino_tree_node_t * +mk_ino_tree_nodes( xfs_agino_t starting_ino) { - struct ino_tree_node *irec; + int i; + ino_tree_node_t *ino_rec; + avlnode_t *node; - irec = malloc(sizeof(*irec)); - if (!irec) - do_error(_("inode map malloc failed\n")); + pthread_mutex_lock(&ino_flist_lock); + if (ino_flist.cnt == 0) { + ASSERT(ino_flist.list == NULL); - irec->avl_node.avl_nextino = NULL; - irec->avl_node.avl_forw = NULL; - irec->avl_node.avl_back = NULL; + if ((ino_rec = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS]))) + == NULL) + do_error(_("inode map malloc failed\n")); - irec->ino_startnum = starting_ino; - irec->ino_confirmed = 0; - irec->ino_isa_dir = 0; - irec->ir_free = (xfs_inofree_t) - 1; - irec->ino_un.ex_data = NULL; - irec->nlinkops = &nlinkops[0]; - irec->disk_nlinks = calloc(1, nlinkops[0].nlink_size); - if (!irec->disk_nlinks) + for (i = 0; i < ALLOC_NUM_INOS; i++) { + ino_rec->avl_node.avl_nextino = + (avlnode_t *) ino_flist.list; + ino_flist.list = ino_rec; + ino_flist.cnt++; + ino_rec++; + } + } + + ASSERT(ino_flist.list != NULL); + + ino_rec = ino_flist.list; + ino_flist.list = (ino_tree_node_t *) ino_rec->avl_node.avl_nextino; + ino_flist.cnt--; + node = &ino_rec->avl_node; + node->avl_nextino = node->avl_forw = node->avl_back = NULL; + pthread_mutex_unlock(&ino_flist_lock); + + /* initialize node */ + + ino_rec->ino_startnum = 0; + ino_rec->ino_confirmed = 0; + ino_rec->ino_isa_dir = 0; + ino_rec->ir_free = (xfs_inofree_t) - 1; + ino_rec->ino_un.ex_data = NULL; + ino_rec->nlinkops = &nlinkops[0]; + ino_rec->disk_nlinks = calloc(1, nlinkops[0].nlink_size); + if (ino_rec->disk_nlinks == NULL) do_error(_("could not allocate nlink array\n")); - return irec; + + return(ino_rec); } +/* + * return inode record to free list, will be initialized when + * it gets pulled off list + */ static void -free_ino_tree_node( - struct ino_tree_node *irec) +free_ino_tree_node(ino_tree_node_t *ino_rec) { - irec->avl_node.avl_nextino = NULL; - irec->avl_node.avl_forw = NULL; - irec->avl_node.avl_back = NULL; + ino_rec->avl_node.avl_nextino = NULL; + ino_rec->avl_node.avl_forw = NULL; + ino_rec->avl_node.avl_back = NULL; - free(irec->disk_nlinks); - if (irec->ino_un.ex_data != NULL) { + pthread_mutex_lock(&ino_flist_lock); + if (ino_flist.list != NULL) { + ASSERT(ino_flist.cnt > 0); + ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list; + } else { + ASSERT(ino_flist.cnt == 0); + ino_rec->avl_node.avl_nextino = NULL; + } + + ino_flist.list = ino_rec; + ino_flist.cnt++; + + free(ino_rec->disk_nlinks); + + if (ino_rec->ino_un.ex_data != NULL) { if (full_ino_ex_data) { - free(irec->ino_un.ex_data->parents); - free(irec->ino_un.ex_data->counted_nlinks); + free(ino_rec->ino_un.ex_data->parents); + free(ino_rec->ino_un.ex_data->counted_nlinks); } - free(irec->ino_un.ex_data); + free(ino_rec->ino_un.ex_data); } - - free(irec); + pthread_mutex_unlock(&ino_flist_lock); } /* @@ -327,15 +379,17 @@ add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free) * check to see if record containing inode is already in the tree. * if not, add it */ - ino_rec = (ino_tree_node_t *) - avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino); - if (!ino_rec) { - ino_rec = alloc_ino_node(s_ino); - - if (!avl_insert(inode_uncertain_tree_ptrs[agno], - &ino_rec->avl_node)) - do_error( - _("add_aginode_uncertain - duplicate inode range\n")); + if ((ino_rec = (ino_tree_node_t *) + avl_findrange(inode_uncertain_tree_ptrs[agno], + s_ino)) == NULL) { + ino_rec = mk_ino_tree_nodes(s_ino); + ino_rec->ino_startnum = s_ino; + + if (avl_insert(inode_uncertain_tree_ptrs[agno], + (avlnode_t *) ino_rec) == NULL) { + do_error(_("add_aginode_uncertain - " + "duplicate inode range\n")); + } } if (free) @@ -400,38 +454,43 @@ clear_uncertain_ino_cache(xfs_agnumber_t agno) /* - * Next comes the inode trees. One per AG, AVL trees of inode records, each - * inode record tracking 64 inodes + * next comes the inode trees. One per ag. AVL trees + * of inode records, each inode record tracking 64 inodes */ - /* - * Set up an inode tree record for a group of inodes that will include the - * requested inode. + * set up an inode tree record for a group of inodes that will + * include the requested inode. * - * This does NOT do error-check for duplicate records. The caller is - * responsible for checking that. Ino must be the start of an - * XFS_INODES_PER_CHUNK (64) inode chunk + * does NOT error-check for duplicate records. Caller is + * responsible for checking that. * - * Each inode resides in a 64-inode chunk which can be part one or more chunks - * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1 - * chunk) when a block can hold more than one chunk (inodes per block > 64). - * Allocating in one chunk pieces causes us problems when it takes more than - * one fs block to contain an inode chunk because the chunks can start on - * *any* block boundary. So we assume that the caller has a clue because at - * this level, we don't. + * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk + * + * Each inode resides in a 64-inode chunk which can be part + * one or more chunks (MAX(64, inodes-per-block). The fs allocates + * in chunks (as opposed to 1 chunk) when a block can hold more than + * one chunk (inodes per block > 64). Allocating in one chunk pieces + * causes us problems when it takes more than one fs block to contain + * an inode chunk because the chunks can start on *any* block boundary. + * So we assume that the caller has a clue because at this level, we + * don't. */ -static struct ino_tree_node * -add_inode( - struct xfs_mount *mp, - xfs_agnumber_t agno, - xfs_agino_t agino) +static ino_tree_node_t * +add_inode(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino) { - struct ino_tree_node *irec; + ino_tree_node_t *ino_rec; + + /* no record exists, make some and put them into the tree */ + + ino_rec = mk_ino_tree_nodes(ino); + ino_rec->ino_startnum = ino; - irec = alloc_ino_node(agino); - if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node)) + if (avl_insert(inode_tree_ptrs[agno], + (avlnode_t *) ino_rec) == NULL) { do_warn(_("add_inode - duplicate inode range\n")); - return irec; + } + + return(ino_rec); } /* @@ -757,6 +816,7 @@ incore_ino_init(xfs_mount_t *mp) int i; int agcount = mp->m_sb.sb_agcount; + pthread_mutex_init(&ino_flist_lock, NULL); if ((inode_tree_ptrs = malloc(agcount * sizeof(avltree_desc_t *))) == NULL) do_error(_("couldn't malloc inode tree descriptor table\n")); @@ -779,6 +839,9 @@ incore_ino_init(xfs_mount_t *mp) avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops); } + ino_flist.cnt = 0; + ino_flist.list = NULL; + if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL) do_error(_("couldn't malloc uncertain inode cache area\n")); -- 2.47.2