#include "err_protos.h"
#include "avl64.h"
#include "threads.h"
+#define ALLOC_NUM_EXTS 100
+
+/*
+ * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
+ */
+typedef struct extent_alloc_rec {
+ struct list_head list;
+ extent_tree_node_t extents[ALLOC_NUM_EXTS];
+} extent_alloc_rec_t;
+
+typedef struct rt_extent_alloc_rec {
+ struct list_head list;
+ rt_extent_tree_node_t extents[ALLOC_NUM_EXTS];
+} rt_extent_alloc_rec_t;
/*
* note: there are 4 sets of incore things handled here:
* phase 5. The uncertain inode list goes away at the end of
* phase 3. The inode tree and bno/bnct trees go away after phase 5.
*/
+typedef struct ext_flist_s {
+ extent_tree_node_t *list;
+ int cnt;
+} ext_flist_t;
+
+static ext_flist_t ext_flist;
+
+typedef struct rt_ext_flist_s {
+ rt_extent_tree_node_t *list;
+ int cnt;
+} rt_ext_flist_t;
+
+static rt_ext_flist_t rt_ext_flist;
static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */
-static pthread_mutex_t rt_ext_tree_lock;
static struct btree_root **dup_extent_trees; /* per ag dup extent trees */
static pthread_mutex_t *dup_extent_tree_locks;
* sorted by size
*/
+/*
+ * list of allocated "blocks" for easy freeing later
+ */
+static struct list_head ba_list;
+static struct list_head rt_ba_list;
+
+/*
+ * locks.
+ */
+static pthread_mutex_t ext_flist_lock;
+static pthread_mutex_t rt_ext_tree_lock;
+static pthread_mutex_t rt_ext_flist_lock;
+
/*
* duplicate extent tree functions
*/
mk_extent_tree_nodes(xfs_agblock_t new_startblock,
xfs_extlen_t new_blockcount, extent_state_t new_state)
{
+ int i;
extent_tree_node_t *new;
+ extent_alloc_rec_t *rec;
+
+ pthread_mutex_lock(&ext_flist_lock);
+ if (ext_flist.cnt == 0) {
+ ASSERT(ext_flist.list == NULL);
+
+ if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL)
+ do_error(
+ _("couldn't allocate new extent descriptors.\n"));
+
+ list_add(&rec->list, &ba_list);
- new = malloc(sizeof(*new));
- if (!new)
- do_error(_("couldn't allocate new extent descriptor.\n"));
+ new = &rec->extents[0];
+ for (i = 0; i < ALLOC_NUM_EXTS; i++) {
+ new->avl_node.avl_nextino = (avlnode_t *)
+ ext_flist.list;
+ ext_flist.list = new;
+ ext_flist.cnt++;
+ new++;
+ }
+ }
+
+ ASSERT(ext_flist.list != NULL);
+
+ new = ext_flist.list;
+ ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino;
+ ext_flist.cnt--;
new->avl_node.avl_nextino = NULL;
+ pthread_mutex_unlock(&ext_flist_lock);
+
+ /* initialize node */
+
new->ex_startblock = new_startblock;
new->ex_blockcount = new_blockcount;
new->ex_state = new_state;
new->next = NULL;
new->last = NULL;
- return new;
+ return(new);
}
void
release_extent_tree_node(extent_tree_node_t *node)
{
- free(node);
+ pthread_mutex_lock(&ext_flist_lock);
+ node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list;
+ ext_flist.list = node;
+ ext_flist.cnt++;
+ pthread_mutex_unlock(&ext_flist_lock);
+
+ return;
}
/*
mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock,
xfs_extlen_t new_blockcount, extent_state_t new_state)
{
+ int i;
rt_extent_tree_node_t *new;
+ rt_extent_alloc_rec_t *rec;
+
+ pthread_mutex_lock(&rt_ext_flist_lock);
+ if (rt_ext_flist.cnt == 0) {
+ ASSERT(rt_ext_flist.list == NULL);
+
+ if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL)
+ do_error(
+ _("couldn't allocate new extent descriptors.\n"));
+
+ list_add(&rec->list, &rt_ba_list);
- new = malloc(sizeof(*new));
- if (!new)
- do_error(_("couldn't allocate new extent descriptor.\n"));
+ new = &rec->extents[0];
+ for (i = 0; i < ALLOC_NUM_EXTS; i++) {
+ new->avl_node.avl_nextino = (avlnode_t *)
+ rt_ext_flist.list;
+ rt_ext_flist.list = new;
+ rt_ext_flist.cnt++;
+ new++;
+ }
+ }
+
+ ASSERT(rt_ext_flist.list != NULL);
+
+ new = rt_ext_flist.list;
+ rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino;
+ rt_ext_flist.cnt--;
new->avl_node.avl_nextino = NULL;
+ pthread_mutex_unlock(&rt_ext_flist_lock);
+
+ /* initialize node */
+
new->rt_startblock = new_startblock;
new->rt_blockcount = new_blockcount;
new->rt_state = new_state;
- return new;
+
+ return(new);
}
#if 0
void
release_rt_extent_tree_node(rt_extent_tree_node_t *node)
{
- free(node);
+ node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list;
+ rt_ext_flist.list = node;
+ rt_ext_flist.cnt++;
+
+ return;
}
void
void
free_rt_dup_extent_tree(xfs_mount_t *mp)
{
+ rt_extent_alloc_rec_t *cur, *tmp;
+
ASSERT(mp->m_sb.sb_rblocks != 0);
+
+ list_for_each_entry_safe(cur, tmp, &rt_ba_list, list)
+ free(cur);
+
free(rt_ext_tree_ptr);
+
rt_ext_tree_ptr = NULL;
+
+ return;
}
/*
int i;
xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
+ list_head_init(&ba_list);
+ list_head_init(&rt_ba_list);
+ pthread_mutex_init(&ext_flist_lock, NULL);
pthread_mutex_init(&rt_ext_tree_lock, NULL);
+ pthread_mutex_init(&rt_ext_flist_lock, NULL);
dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
if (!dup_extent_trees)
do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
+
+ ext_flist.cnt = 0;
+ ext_flist.list = NULL;
+
+ return;
}
/*
void
incore_ext_teardown(xfs_mount_t *mp)
{
+ extent_alloc_rec_t *cur, *tmp;
xfs_agnumber_t i;
+ list_for_each_entry_safe(cur, tmp, &ba_list, list)
+ free(cur);
+
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
btree_destroy(dup_extent_trees[i]);
free(extent_bno_ptrs[i]);
#include "threads.h"
#include "err_protos.h"
+static pthread_mutex_t ino_flist_lock;
extern avlnode_t *avl_firstino(avlnode_t *root);
/*
*/
static avltree_desc_t **inode_uncertain_tree_ptrs;
+#define ALLOC_NUM_INOS 100
+
+/* free lists -- inode nodes and extent nodes */
+
+typedef struct ino_flist_s {
+ ino_tree_node_t *list;
+ ino_tree_node_t *last;
+ long long cnt;
+} ino_flist_t;
+
+static ino_flist_t ino_flist; /* free list must be initialized before use */
+
/* memory optimised nlink counting for all inodes */
static void nlink_grow_8_to_16(ino_tree_node_t *irec);
}
/*
- * Next is the uncertain inode list -- a sorted (in ascending order)
+ * next is the uncertain inode list -- a sorted (in ascending order)
* list of inode records sorted on the starting inode number. There
* is one list per ag.
*/
/*
- * Common code for creating inode records for use by trees and lists.
+ * common code for creating inode records for use by trees and lists.
* called only from add_inodes and add_inodes_uncertain
*
* IMPORTANT: all inodes (inode records) start off as free and
* unconfirmed.
*/
-static struct ino_tree_node *
-alloc_ino_node(
+/* ARGSUSED */
+static ino_tree_node_t *
+mk_ino_tree_nodes(
xfs_agino_t starting_ino)
{
- struct ino_tree_node *irec;
+ int i;
+ ino_tree_node_t *ino_rec;
+ avlnode_t *node;
- irec = malloc(sizeof(*irec));
- if (!irec)
- do_error(_("inode map malloc failed\n"));
+ pthread_mutex_lock(&ino_flist_lock);
+ if (ino_flist.cnt == 0) {
+ ASSERT(ino_flist.list == NULL);
- irec->avl_node.avl_nextino = NULL;
- irec->avl_node.avl_forw = NULL;
- irec->avl_node.avl_back = NULL;
+ if ((ino_rec = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS])))
+ == NULL)
+ do_error(_("inode map malloc failed\n"));
- irec->ino_startnum = starting_ino;
- irec->ino_confirmed = 0;
- irec->ino_isa_dir = 0;
- irec->ir_free = (xfs_inofree_t) - 1;
- irec->ino_un.ex_data = NULL;
- irec->nlinkops = &nlinkops[0];
- irec->disk_nlinks = calloc(1, nlinkops[0].nlink_size);
- if (!irec->disk_nlinks)
+ for (i = 0; i < ALLOC_NUM_INOS; i++) {
+ ino_rec->avl_node.avl_nextino =
+ (avlnode_t *) ino_flist.list;
+ ino_flist.list = ino_rec;
+ ino_flist.cnt++;
+ ino_rec++;
+ }
+ }
+
+ ASSERT(ino_flist.list != NULL);
+
+ ino_rec = ino_flist.list;
+ ino_flist.list = (ino_tree_node_t *) ino_rec->avl_node.avl_nextino;
+ ino_flist.cnt--;
+ node = &ino_rec->avl_node;
+ node->avl_nextino = node->avl_forw = node->avl_back = NULL;
+ pthread_mutex_unlock(&ino_flist_lock);
+
+ /* initialize node */
+
+ ino_rec->ino_startnum = 0;
+ ino_rec->ino_confirmed = 0;
+ ino_rec->ino_isa_dir = 0;
+ ino_rec->ir_free = (xfs_inofree_t) - 1;
+ ino_rec->ino_un.ex_data = NULL;
+ ino_rec->nlinkops = &nlinkops[0];
+ ino_rec->disk_nlinks = calloc(1, nlinkops[0].nlink_size);
+ if (ino_rec->disk_nlinks == NULL)
do_error(_("could not allocate nlink array\n"));
- return irec;
+
+ return(ino_rec);
}
+/*
+ * return inode record to free list, will be initialized when
+ * it gets pulled off list
+ */
static void
-free_ino_tree_node(
- struct ino_tree_node *irec)
+free_ino_tree_node(ino_tree_node_t *ino_rec)
{
- irec->avl_node.avl_nextino = NULL;
- irec->avl_node.avl_forw = NULL;
- irec->avl_node.avl_back = NULL;
+ ino_rec->avl_node.avl_nextino = NULL;
+ ino_rec->avl_node.avl_forw = NULL;
+ ino_rec->avl_node.avl_back = NULL;
- free(irec->disk_nlinks);
- if (irec->ino_un.ex_data != NULL) {
+ pthread_mutex_lock(&ino_flist_lock);
+ if (ino_flist.list != NULL) {
+ ASSERT(ino_flist.cnt > 0);
+ ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list;
+ } else {
+ ASSERT(ino_flist.cnt == 0);
+ ino_rec->avl_node.avl_nextino = NULL;
+ }
+
+ ino_flist.list = ino_rec;
+ ino_flist.cnt++;
+
+ free(ino_rec->disk_nlinks);
+
+ if (ino_rec->ino_un.ex_data != NULL) {
if (full_ino_ex_data) {
- free(irec->ino_un.ex_data->parents);
- free(irec->ino_un.ex_data->counted_nlinks);
+ free(ino_rec->ino_un.ex_data->parents);
+ free(ino_rec->ino_un.ex_data->counted_nlinks);
}
- free(irec->ino_un.ex_data);
+ free(ino_rec->ino_un.ex_data);
}
-
- free(irec);
+ pthread_mutex_unlock(&ino_flist_lock);
}
/*
* check to see if record containing inode is already in the tree.
* if not, add it
*/
- ino_rec = (ino_tree_node_t *)
- avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
- if (!ino_rec) {
- ino_rec = alloc_ino_node(s_ino);
-
- if (!avl_insert(inode_uncertain_tree_ptrs[agno],
- &ino_rec->avl_node))
- do_error(
- _("add_aginode_uncertain - duplicate inode range\n"));
+ if ((ino_rec = (ino_tree_node_t *)
+ avl_findrange(inode_uncertain_tree_ptrs[agno],
+ s_ino)) == NULL) {
+ ino_rec = mk_ino_tree_nodes(s_ino);
+ ino_rec->ino_startnum = s_ino;
+
+ if (avl_insert(inode_uncertain_tree_ptrs[agno],
+ (avlnode_t *) ino_rec) == NULL) {
+ do_error(_("add_aginode_uncertain - "
+ "duplicate inode range\n"));
+ }
}
if (free)
/*
- * Next comes the inode trees. One per AG, AVL trees of inode records, each
- * inode record tracking 64 inodes
+ * next comes the inode trees. One per ag. AVL trees
+ * of inode records, each inode record tracking 64 inodes
*/
-
/*
- * Set up an inode tree record for a group of inodes that will include the
- * requested inode.
+ * set up an inode tree record for a group of inodes that will
+ * include the requested inode.
*
- * This does NOT do error-check for duplicate records. The caller is
- * responsible for checking that. Ino must be the start of an
- * XFS_INODES_PER_CHUNK (64) inode chunk
+ * does NOT error-check for duplicate records. Caller is
+ * responsible for checking that.
*
- * Each inode resides in a 64-inode chunk which can be part one or more chunks
- * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
- * chunk) when a block can hold more than one chunk (inodes per block > 64).
- * Allocating in one chunk pieces causes us problems when it takes more than
- * one fs block to contain an inode chunk because the chunks can start on
- * *any* block boundary. So we assume that the caller has a clue because at
- * this level, we don't.
+ * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk
+ *
+ * Each inode resides in a 64-inode chunk which can be part
+ * one or more chunks (MAX(64, inodes-per-block). The fs allocates
+ * in chunks (as opposed to 1 chunk) when a block can hold more than
+ * one chunk (inodes per block > 64). Allocating in one chunk pieces
+ * causes us problems when it takes more than one fs block to contain
+ * an inode chunk because the chunks can start on *any* block boundary.
+ * So we assume that the caller has a clue because at this level, we
+ * don't.
*/
-static struct ino_tree_node *
-add_inode(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agino_t agino)
+static ino_tree_node_t *
+add_inode(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
{
- struct ino_tree_node *irec;
+ ino_tree_node_t *ino_rec;
+
+ /* no record exists, make some and put them into the tree */
+
+ ino_rec = mk_ino_tree_nodes(ino);
+ ino_rec->ino_startnum = ino;
- irec = alloc_ino_node(agino);
- if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
+ if (avl_insert(inode_tree_ptrs[agno],
+ (avlnode_t *) ino_rec) == NULL) {
do_warn(_("add_inode - duplicate inode range\n"));
- return irec;
+ }
+
+ return(ino_rec);
}
/*
int i;
int agcount = mp->m_sb.sb_agcount;
+ pthread_mutex_init(&ino_flist_lock, NULL);
if ((inode_tree_ptrs = malloc(agcount *
sizeof(avltree_desc_t *))) == NULL)
do_error(_("couldn't malloc inode tree descriptor table\n"));
avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
}
+ ino_flist.cnt = 0;
+ ino_flist.list = NULL;
+
if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
do_error(_("couldn't malloc uncertain inode cache area\n"));