if (check_aginode_block(mp, agno, agino) == 0)
return 0;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, agbno, state);
set_bmap(agno, agbno, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
- return(0);
+ unlock_ag(agno);
+ return 0;
default:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
start_agino = XFS_AGB_TO_AGINO(mp, agbno);
*start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
* user data -- we're probably here as a result of a directory
* entry or an iunlinked pointer
*/
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return 0;
case XR_E_METADATA:
case XR_E_INO:
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
/*
* ok, chunk is good. put the record into the tree if required,
set_inode_used(irec_p, agino - start_agino);
- pthread_mutex_lock(&ag_locks[agno].lock);
-
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return(ino_cnt);
}
{
int state;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
case XR_E_INO: /* already marked */
XFS_AGB_TO_FSB(mp, agno, agbno), state);
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
/*
ebno = agbno + irec.br_blockcount;
if (agno != locked_agno) {
if (locked_agno != -1)
- pthread_mutex_unlock(&ag_locks[locked_agno].lock);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ unlock_ag(locked_agno);
locked_agno = agno;
+ lock_ag(locked_agno);
}
for (b = irec.br_startblock;
error = 0;
done:
if (locked_agno != -1)
- pthread_mutex_unlock(&ag_locks[locked_agno].lock);
+ unlock_ag(locked_agno);
if (i != *numrecs) {
ASSERT(i < *numrecs);
uint32_t sb_unit;
uint32_t sb_width;
-struct aglock *ag_locks;
-
time_t report_interval;
uint64_t *prog_rpt_done;
extern uint32_t sb_unit;
extern uint32_t sb_width;
-struct aglock {
- pthread_mutex_t lock __attribute__((__aligned__(64)));
-};
-extern struct aglock *ag_locks;
extern pthread_mutex_t rt_lock;
extern time_t report_interval;
static int states[16] =
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-static struct btree_root **ag_bmap;
+struct bmap {
+ pthread_mutex_t lock __attribute__((__aligned__(64)));
+ struct btree_root *root;
+};
+static struct bmap *ag_bmaps;
+
+void
+lock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_lock(&ag_bmaps[agno].lock);
+}
+
+void
+unlock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_unlock(&ag_bmaps[agno].lock);
+}
static void
update_bmap(
xfs_extlen_t blen,
int state)
{
- update_bmap(ag_bmap[agno], agbno, blen, &states[state]);
+ update_bmap(ag_bmaps[agno].root, agbno, blen, &states[state]);
}
int
xfs_agblock_t maxbno,
xfs_extlen_t *blen)
{
+ struct btree_root *bmap = ag_bmaps[agno].root;
int *statep;
unsigned long key;
- statep = btree_find(ag_bmap[agno], agbno, &key);
+ statep = btree_find(bmap, agbno, &key);
if (!statep)
return -1;
if (key == agbno) {
if (blen) {
- if (!btree_peek_next(ag_bmap[agno], &key))
+ if (!btree_peek_next(bmap, &key))
return -1;
*blen = min(maxbno, key) - agbno;
}
return *statep;
}
- statep = btree_peek_prev(ag_bmap[agno], NULL);
+ statep = btree_peek_prev(bmap, NULL);
if (!statep)
return -1;
if (blen)
ag_size = mp->m_sb.sb_agblocks;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ struct btree_root *bmap = ag_bmaps[agno].root;
+
if (agno == mp->m_sb.sb_agcount - 1)
ag_size = (xfs_extlen_t)(mp->m_sb.sb_dblocks -
(xfs_rfsblock_t)mp->m_sb.sb_agblocks * agno);
#ifdef BTREE_STATS
- if (btree_find(ag_bmap[agno], 0, NULL)) {
+ if (btree_find(bmap, 0, NULL)) {
printf("ag_bmap[%d] btree stats:\n", i);
- btree_print_stats(ag_bmap[agno], stdout);
+ btree_print_stats(bmap, stdout);
}
#endif
/*
* ag_hdr_block..ag_size: XR_E_UNKNOWN
* ag_size... XR_E_BAD_STATE
*/
- btree_clear(ag_bmap[agno]);
- btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
- btree_insert(ag_bmap[agno],
- ag_hdr_block, &states[XR_E_UNKNOWN]);
- btree_insert(ag_bmap[agno], ag_size, &states[XR_E_BAD_STATE]);
+ btree_clear(bmap);
+ btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+ btree_insert(bmap, ag_hdr_block, &states[XR_E_UNKNOWN]);
+ btree_insert(bmap, ag_size, &states[XR_E_BAD_STATE]);
}
if (mp->m_sb.sb_logstart != 0) {
reset_rt_bmap();
}
-void
-init_bmaps(xfs_mount_t *mp)
+static struct bmap *
+alloc_bmaps(
+ unsigned int nr_groups)
{
- xfs_agnumber_t i;
+ struct bmap *bmap;
+ unsigned int i;
- ag_bmap = calloc(mp->m_sb.sb_agcount, sizeof(struct btree_root *));
- if (!ag_bmap)
- do_error(_("couldn't allocate block map btree roots\n"));
+ bmap = calloc(nr_groups, sizeof(*bmap));
+ if (!bmap)
+ return NULL;
- ag_locks = calloc(mp->m_sb.sb_agcount, sizeof(struct aglock));
- if (!ag_locks)
- do_error(_("couldn't allocate block map locks\n"));
-
- for (i = 0; i < mp->m_sb.sb_agcount; i++) {
- btree_init(&ag_bmap[i]);
- pthread_mutex_init(&ag_locks[i].lock, NULL);
+ for (i = 0; i < nr_groups; i++) {
+ btree_init(&bmap[i].root);
+ pthread_mutex_init(&bmap[i].lock, NULL);
}
- init_rt_bmap(mp);
- reset_bmaps(mp);
+ return bmap;
}
-void
-free_bmaps(xfs_mount_t *mp)
+static void
+destroy_bmaps(
+ struct bmap *bmap,
+ unsigned int nr_groups)
{
- xfs_agnumber_t i;
+ unsigned int i;
- for (i = 0; i < mp->m_sb.sb_agcount; i++)
- pthread_mutex_destroy(&ag_locks[i].lock);
+ for (i = 0; i < nr_groups; i++) {
+ btree_destroy(bmap[i].root);
+ pthread_mutex_destroy(&bmap[i].lock);
+ }
- free(ag_locks);
- ag_locks = NULL;
+ free(bmap);
+}
- for (i = 0; i < mp->m_sb.sb_agcount; i++)
- btree_destroy(ag_bmap[i]);
+void
+init_bmaps(
+ struct xfs_mount *mp)
+{
+ ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ if (!ag_bmaps)
+ do_error(_("couldn't allocate block map btree roots\n"));
- free(ag_bmap);
- ag_bmap = NULL;
+ init_rt_bmap(mp);
+ reset_bmaps(mp);
+}
+
+void
+free_bmaps(
+ struct xfs_mount *mp)
+{
+ destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ ag_bmaps = NULL;
free_rt_bmap(mp);
}
void reset_bmaps(xfs_mount_t *mp);
void free_bmaps(xfs_mount_t *mp);
+void lock_ag(xfs_agnumber_t agno);
+void unlock_ag(xfs_agnumber_t agno);
+
void set_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_extlen_t blen, int state);
int get_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
agno = XFS_INO_TO_AGNO(mp, rciter.ino);
agino = XFS_INO_TO_AGINO(mp, rciter.ino);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
irec = find_inode_rec(mp, agno, agino);
off = get_inode_offset(mp, rciter.ino, irec);
/* lock here because we might go outside this ag */
set_inode_is_rl(irec, off);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
rcbag_ino_iter_stop(rcstack, &rciter);
}
agno = XFS_FSB_TO_AGNO(mp, bno);
agbno = XFS_FSB_TO_AGBNO(mp, bno);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
case XR_E_INUSE1:
state, ino, bno);
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
} else {
if (search_dup_extent(XFS_FSB_TO_AGNO(mp, bno),
XFS_FSB_TO_AGBNO(mp, bno),
/* Record BMBT blocks in the reverse-mapping data. */
if (check_dups && collect_rmaps && !zap_metadata) {
agno = XFS_FSB_TO_AGNO(mp, bno);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
rmap_add_bmbt_rec(mp, ino, whichfork, bno);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
if (level == 0) {