+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <libxfs.h>
+#include "libxfs.h"
#include "avl.h"
+#include "btree.h"
#include "globals.h"
#include "incore.h"
#include "agheader.h"
#include "err_protos.h"
#include "avl64.h"
#include "threads.h"
-#define ALLOC_NUM_EXTS 100
-
-/*
- * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
- */
-typedef struct extent_alloc_rec {
- struct list_head list;
- extent_tree_node_t extents[ALLOC_NUM_EXTS];
-} extent_alloc_rec_t;
-
-typedef struct rt_extent_alloc_rec {
- struct list_head list;
- rt_extent_tree_node_t extents[ALLOC_NUM_EXTS];
-} rt_extent_alloc_rec_t;
/*
* note: there are 4 sets of incore things handled here:
* phase 5. The uncertain inode list goes away at the end of
* phase 3. The inode tree and bno/bnct trees go away after phase 5.
*/
-typedef struct ext_flist_s {
- extent_tree_node_t *list;
- int cnt;
-} ext_flist_t;
-
-static ext_flist_t ext_flist;
-
-typedef struct rt_ext_flist_s {
- rt_extent_tree_node_t *list;
- int cnt;
-} rt_ext_flist_t;
-
-static rt_ext_flist_t rt_ext_flist;
static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */
+static pthread_mutex_t rt_ext_tree_lock;
+
+static struct btree_root **dup_extent_trees; /* per ag dup extent trees */
+static pthread_mutex_t *dup_extent_tree_locks;
-avltree_desc_t **extent_tree_ptrs; /* array of extent tree ptrs */
- /* one per ag for dups */
static avltree_desc_t **extent_bno_ptrs; /*
* array of extent tree ptrs
* one per ag for free extents
*/
/*
- * list of allocated "blocks" for easy freeing later
+ * duplicate extent tree functions
*/
-static struct list_head ba_list;
-static struct list_head rt_ba_list;
-/*
- * locks.
- */
-static pthread_mutex_t ext_flist_lock;
-static pthread_mutex_t rt_ext_tree_lock;
-static pthread_mutex_t rt_ext_flist_lock;
+void
+release_dup_extent_tree(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_lock(&dup_extent_tree_locks[agno]);
+ btree_clear(dup_extent_trees[agno]);
+ pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
+}
+
+int
+add_dup_extent(
+ xfs_agnumber_t agno,
+ xfs_agblock_t startblock,
+ xfs_extlen_t blockcount)
+{
+ int ret;
+#ifdef XR_DUP_TRACE
+ fprintf(stderr, "Adding dup extent - %d/%d %d\n", agno, startblock,
+ blockcount);
+#endif
+ pthread_mutex_lock(&dup_extent_tree_locks[agno]);
+ ret = btree_insert(dup_extent_trees[agno], startblock,
+ (void *)(uintptr_t)(startblock + blockcount));
+ pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
+ return ret;
+}
+
+int
+search_dup_extent(
+ xfs_agnumber_t agno,
+ xfs_agblock_t start_agbno,
+ xfs_agblock_t end_agbno)
+{
+ unsigned long bno;
+ int ret;
+
+ pthread_mutex_lock(&dup_extent_tree_locks[agno]);
+ if (!btree_find(dup_extent_trees[agno], start_agbno, &bno)) {
+ ret = 0;
+ goto out; /* this really shouldn't happen */
+ }
+ if (bno < end_agbno) {
+ ret = 1;
+ goto out;
+ }
+ ret = (uintptr_t)btree_peek_prev(dup_extent_trees[agno], NULL) >
+ start_agbno;
+out:
+ pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
+ return ret;
+}
+
/*
* extent tree stuff is avl trees of duplicate extents,
mk_extent_tree_nodes(xfs_agblock_t new_startblock,
xfs_extlen_t new_blockcount, extent_state_t new_state)
{
- int i;
extent_tree_node_t *new;
- extent_alloc_rec_t *rec;
- pthread_mutex_lock(&ext_flist_lock);
- if (ext_flist.cnt == 0) {
- ASSERT(ext_flist.list == NULL);
-
- if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL)
- do_error(
- _("couldn't allocate new extent descriptors.\n"));
+ new = malloc(sizeof(*new));
+ if (!new)
+ do_error(_("couldn't allocate new extent descriptor.\n"));
- list_add(&rec->list, &ba_list);
-
- new = &rec->extents[0];
-
- for (i = 0; i < ALLOC_NUM_EXTS; i++) {
- new->avl_node.avl_nextino = (avlnode_t *)
- ext_flist.list;
- ext_flist.list = new;
- ext_flist.cnt++;
- new++;
- }
- }
-
- ASSERT(ext_flist.list != NULL);
-
- new = ext_flist.list;
- ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino;
- ext_flist.cnt--;
new->avl_node.avl_nextino = NULL;
- pthread_mutex_unlock(&ext_flist_lock);
-
- /* initialize node */
-
new->ex_startblock = new_startblock;
new->ex_blockcount = new_blockcount;
new->ex_state = new_state;
new->next = NULL;
new->last = NULL;
- return(new);
+ return new;
}
void
release_extent_tree_node(extent_tree_node_t *node)
{
- pthread_mutex_lock(&ext_flist_lock);
- node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list;
- ext_flist.list = node;
- ext_flist.cnt++;
- pthread_mutex_unlock(&ext_flist_lock);
-
- return;
+ free(node);
}
/*
* reused. the duplicate and bno/bcnt extent trees for each AG
* are recycled after they're no longer needed to save memory
*/
-void
+static void
release_extent_tree(avltree_desc_t *tree)
{
extent_tree_node_t *ext;
/*
* top-level (visible) routines
*/
-void
-release_dup_extent_tree(xfs_agnumber_t agno)
-{
- release_extent_tree(extent_tree_ptrs[agno]);
-
- return;
-}
-
void
release_agbno_extent_tree(xfs_agnumber_t agno)
{
return;
}
-/*
- * normalizing constant for bcnt size -> address conversion (see avl ops)
- * used by the AVL tree code to convert sizes and must be used when
- * doing an AVL search in the tree (e.g. avl_findrange(s))
- */
-#define MAXBCNT 0xFFFFFFFF
-#define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
-
/*
* the next 4 routines manage the trees of free extents -- 2 trees
* per AG. The first tree is sorted by block number. The second
extent_tree_node_t *
findbiggest_bcnt_extent(xfs_agnumber_t agno)
{
- extern avlnode_t *avl_lastino(avlnode_t *root);
-
ASSERT(extent_bcnt_ptrs != NULL);
ASSERT(extent_bcnt_ptrs[agno] != NULL);
return(ext);
}
-/*
- * the next 2 routines manage the trees of duplicate extents -- 1 tree
- * per AG
- */
-void
-add_dup_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
- xfs_extlen_t blockcount)
-{
- extent_tree_node_t *first, *last, *ext, *next_ext;
- xfs_agblock_t new_startblock;
- xfs_extlen_t new_blockcount;
-
- ASSERT(agno < glob_agcount);
-
-#ifdef XR_DUP_TRACE
- fprintf(stderr, "Adding dup extent - %d/%d %d\n", agno, startblock, blockcount);
-#endif
- avl_findranges(extent_tree_ptrs[agno], startblock - 1,
- startblock + blockcount + 1,
- (avlnode_t **) &first, (avlnode_t **) &last);
- /*
- * find adjacent and overlapping extent blocks
- */
- if (first == NULL && last == NULL) {
- /* nothing, just make and insert new extent */
-
- ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_MULT);
-
- if (avl_insert(extent_tree_ptrs[agno],
- (avlnode_t *) ext) == NULL) {
- do_error(_("duplicate extent range\n"));
- }
-
- return;
- }
-
- ASSERT(first != NULL && last != NULL);
-
- /*
- * find the new composite range, delete old extent nodes
- * as we go
- */
- new_startblock = startblock;
- new_blockcount = blockcount;
-
- for (ext = first;
- ext != (extent_tree_node_t *) last->avl_node.avl_nextino;
- ext = next_ext) {
- /*
- * preserve the next inorder node
- */
- next_ext = (extent_tree_node_t *) ext->avl_node.avl_nextino;
- /*
- * just bail if the new extent is contained within an old one
- */
- if (ext->ex_startblock <= startblock &&
- ext->ex_blockcount >= blockcount)
- return;
- /*
- * now check for overlaps and adjacent extents
- */
- if (ext->ex_startblock + ext->ex_blockcount >= startblock
- || ext->ex_startblock <= startblock + blockcount) {
-
- if (ext->ex_startblock < new_startblock)
- new_startblock = ext->ex_startblock;
-
- if (ext->ex_startblock + ext->ex_blockcount >
- new_startblock + new_blockcount)
- new_blockcount = ext->ex_startblock +
- ext->ex_blockcount -
- new_startblock;
-
- avl_delete(extent_tree_ptrs[agno], (avlnode_t *) ext);
- continue;
- }
- }
-
- ext = mk_extent_tree_nodes(new_startblock, new_blockcount, XR_E_MULT);
-
- if (avl_insert(extent_tree_ptrs[agno], (avlnode_t *) ext) == NULL) {
- do_error(_("duplicate extent range\n"));
- }
-
- return;
-}
-
-static __psunsigned_t
+static uintptr_t
avl_ext_start(avlnode_t *node)
{
- return((__psunsigned_t)
+ return((uintptr_t)
((extent_tree_node_t *) node)->ex_startblock);
}
-static __psunsigned_t
+static uintptr_t
avl_ext_end(avlnode_t *node)
{
- return((__psunsigned_t) (
+ return((uintptr_t) (
((extent_tree_node_t *) node)->ex_startblock +
((extent_tree_node_t *) node)->ex_blockcount));
}
* convert size to an address for the AVL tree code -- the bigger the size,
* the lower the address so the biggest extent will be first in the tree
*/
-static __psunsigned_t
+static uintptr_t
avl_ext_bcnt_start(avlnode_t *node)
{
/*
- return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
+ return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
node)->ex_blockcount)));
*/
- return((__psunsigned_t) ((extent_tree_node_t *)node)->ex_blockcount);
+ return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
}
-static __psunsigned_t
+static uintptr_t
avl_ext_bcnt_end(avlnode_t *node)
{
/*
- return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
+ return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
node)->ex_blockcount)));
*/
- return((__psunsigned_t) ((extent_tree_node_t *)node)->ex_blockcount);
+ return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
}
-avlops_t avl_extent_bcnt_tree_ops = {
+static avlops_t avl_extent_bcnt_tree_ops = {
avl_ext_bcnt_start,
avl_ext_bcnt_end
};
-avlops_t avl_extent_tree_ops = {
+static avlops_t avl_extent_tree_ops = {
avl_ext_start,
avl_ext_end
};
* startblocks can be 64-bit values.
*/
static rt_extent_tree_node_t *
-mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock,
+mk_rt_extent_tree_nodes(xfs_rtblock_t new_startblock,
xfs_extlen_t new_blockcount, extent_state_t new_state)
{
- int i;
rt_extent_tree_node_t *new;
- rt_extent_alloc_rec_t *rec;
-
- pthread_mutex_lock(&rt_ext_flist_lock);
- if (rt_ext_flist.cnt == 0) {
- ASSERT(rt_ext_flist.list == NULL);
-
- if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL)
- do_error(
- _("couldn't allocate new extent descriptors.\n"));
-
- list_add(&rec->list, &rt_ba_list);
- new = &rec->extents[0];
+ new = malloc(sizeof(*new));
+ if (!new)
+ do_error(_("couldn't allocate new extent descriptor.\n"));
- for (i = 0; i < ALLOC_NUM_EXTS; i++) {
- new->avl_node.avl_nextino = (avlnode_t *)
- rt_ext_flist.list;
- rt_ext_flist.list = new;
- rt_ext_flist.cnt++;
- new++;
- }
- }
-
- ASSERT(rt_ext_flist.list != NULL);
-
- new = rt_ext_flist.list;
- rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino;
- rt_ext_flist.cnt--;
new->avl_node.avl_nextino = NULL;
- pthread_mutex_unlock(&rt_ext_flist_lock);
-
- /* initialize node */
-
new->rt_startblock = new_startblock;
new->rt_blockcount = new_blockcount;
new->rt_state = new_state;
-
- return(new);
+ return new;
}
#if 0
void
release_rt_extent_tree_node(rt_extent_tree_node_t *node)
{
- node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list;
- rt_ext_flist.list = node;
- rt_ext_flist.cnt++;
-
- return;
+ free(node);
}
void
void
free_rt_dup_extent_tree(xfs_mount_t *mp)
{
- rt_extent_alloc_rec_t *cur, *tmp;
-
ASSERT(mp->m_sb.sb_rblocks != 0);
-
- list_for_each_entry_safe(cur, tmp, &rt_ba_list, list)
- free(cur);
-
free(rt_ext_tree_ptr);
-
rt_ext_tree_ptr = NULL;
-
- return;
}
/*
* add a duplicate real-time extent
*/
void
-add_rt_dup_extent(xfs_drtbno_t startblock, xfs_extlen_t blockcount)
+add_rt_dup_extent(xfs_rtblock_t startblock, xfs_extlen_t blockcount)
{
rt_extent_tree_node_t *first, *last, *ext, *next_ext;
- xfs_drtbno_t new_startblock;
+ xfs_rtblock_t new_startblock;
xfs_extlen_t new_blockcount;
pthread_mutex_lock(&rt_ext_tree_lock);
*/
/* ARGSUSED */
int
-search_rt_dup_extent(xfs_mount_t *mp, xfs_drtbno_t bno)
+search_rt_dup_extent(xfs_mount_t *mp, xfs_rtblock_t bno)
{
int ret;
return(ret);
}
-static __uint64_t
+static uint64_t
avl64_rt_ext_start(avl64node_t *node)
{
return(((rt_extent_tree_node_t *) node)->rt_startblock);
}
-static __uint64_t
+static uint64_t
avl64_ext_end(avl64node_t *node)
{
return(((rt_extent_tree_node_t *) node)->rt_startblock +
((rt_extent_tree_node_t *) node)->rt_blockcount);
}
-avl64ops_t avl64_extent_tree_ops = {
+static avl64ops_t avl64_extent_tree_ops = {
avl64_rt_ext_start,
avl64_ext_end
};
int i;
xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
- list_head_init(&ba_list);
- list_head_init(&rt_ba_list);
- pthread_mutex_init(&ext_flist_lock, NULL);
pthread_mutex_init(&rt_ext_tree_lock, NULL);
- pthread_mutex_init(&rt_ext_flist_lock, NULL);
- if ((extent_tree_ptrs = malloc(agcount *
- sizeof(avltree_desc_t *))) == NULL)
- do_error(
- _("couldn't malloc dup extent tree descriptor table\n"));
+ dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
+ if (!dup_extent_trees)
+ do_error(_("couldn't malloc dup extent tree descriptor table\n"));
+
+ dup_extent_tree_locks = calloc(agcount, sizeof(pthread_mutex_t));
+ if (!dup_extent_tree_locks)
+ do_error(_("couldn't malloc dup extent tree descriptor table\n"));
if ((extent_bno_ptrs = malloc(agcount *
sizeof(avltree_desc_t *))) == NULL)
_("couldn't malloc free by-bcnt extent tree descriptor table\n"));
for (i = 0; i < agcount; i++) {
- if ((extent_tree_ptrs[i] =
- malloc(sizeof(avltree_desc_t))) == NULL)
- do_error(
- _("couldn't malloc dup extent tree descriptor\n"));
if ((extent_bno_ptrs[i] =
malloc(sizeof(avltree_desc_t))) == NULL)
do_error(
}
for (i = 0; i < agcount; i++) {
- avl_init_tree(extent_tree_ptrs[i], &avl_extent_tree_ops);
+ btree_init(&dup_extent_trees[i]);
+ pthread_mutex_init(&dup_extent_tree_locks[i], NULL);
avl_init_tree(extent_bno_ptrs[i], &avl_extent_tree_ops);
avl_init_tree(extent_bcnt_ptrs[i], &avl_extent_bcnt_tree_ops);
}
- if ((rt_ext_tree_ptr = malloc(sizeof(avltree_desc_t))) == NULL)
+ if ((rt_ext_tree_ptr = malloc(sizeof(avl64tree_desc_t))) == NULL)
do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
-
- ext_flist.cnt = 0;
- ext_flist.list = NULL;
-
- return;
}
/*
void
incore_ext_teardown(xfs_mount_t *mp)
{
- extent_alloc_rec_t *cur, *tmp;
xfs_agnumber_t i;
- list_for_each_entry_safe(cur, tmp, &ba_list, list)
- free(cur);
-
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
- free(extent_tree_ptrs[i]);
+ btree_destroy(dup_extent_trees[i]);
free(extent_bno_ptrs[i]);
free(extent_bcnt_ptrs[i]);
}
+ free(dup_extent_trees);
free(extent_bcnt_ptrs);
free(extent_bno_ptrs);
- free(extent_tree_ptrs);
-
- extent_bcnt_ptrs = extent_bno_ptrs = extent_tree_ptrs = NULL;
- return;
+ dup_extent_trees = NULL;
+ extent_bcnt_ptrs = NULL;
+ extent_bno_ptrs = NULL;
}
-int
+static int
count_extents(xfs_agnumber_t agno, avltree_desc_t *tree, int whichtree)
{
extent_tree_node_t *node;
int
count_bno_extents_blocks(xfs_agnumber_t agno, uint *numblocks)
{
- __uint64_t nblocks;
+ uint64_t nblocks;
extent_tree_node_t *node;
int i = 0;