]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: introduce in-core global counter of allocbt blocks
authorBrian Foster <bfoster@redhat.com>
Wed, 30 Jun 2021 22:38:58 +0000 (18:38 -0400)
committerEric Sandeen <sandeen@sandeen.net>
Wed, 30 Jun 2021 22:38:58 +0000 (18:38 -0400)
Source kernel commit: 16eaab839a9273ed156ebfccbd40c15d1e72f3d8

Introduce an in-core counter to track the sum of all allocbt blocks
used by the filesystem. This value is currently tracked per-ag via
the ->agf_btreeblks field in the AGF, which also happens to include
rmapbt blocks. A global, in-core count of allocbt blocks is required
to identify the subset of global ->m_fdblocks that consists of
unavailable blocks currently used for allocation btrees. To support
this calculation at block reservation time, construct a similar
global counter for allocbt blocks, populate it on first read of each
AGF and update it as allocbt blocks are used and released.

Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
include/atomic.h
include/xfs_mount.h
libxfs/xfs_alloc.c
libxfs/xfs_alloc_btree.c

index 1aabecc3ae57b7994fe6c4d140a98e9ba4796915..e0e1ba84bc82a4e414d0d5445e7d67762d9a065f 100644 (file)
@@ -20,5 +20,8 @@ typedef       int64_t atomic64_t;
 
 #define atomic64_read(x)       *(x)
 #define atomic64_set(x, v)     (*(x) = v)
+#define atomic64_add(v, x)     (*(x) += v)
+#define atomic64_inc(x)                ((*(x))++)
+#define atomic64_dec(x)                ((*(x))--)
 
 #endif /* __ATOMIC_H__ */
index f93a9f11420138ae70fbf05d4180b13b3fdd0588..12019c4b4dbd7e618d8a25455bf79457d5d8fc4c 100644 (file)
@@ -105,6 +105,14 @@ typedef struct xfs_mount {
         * if warranted.
         */
        struct xlog             *m_log;         /* log specific stuff */
+
+        /*
+        * Global count of allocation btree blocks in use across all AGs. Only
+        * used when perag reservation is enabled. Helps prevent block
+        * reservation from attempting to reserve allocation btree blocks.
+        */
+       atomic64_t              m_allocbt_blks;
+
 } xfs_mount_t;
 
 #define M_IGEO(mp)             (&(mp)->m_ino_geo)
index 699c15400d523c3827cc1c839ff823d8121d6446..d99622a6848f9b3ded458b1901e6682694061013 100644 (file)
@@ -3029,6 +3029,7 @@ xfs_alloc_read_agf(
        struct xfs_agf          *agf;           /* ag freelist header */
        struct xfs_perag        *pag;           /* per allocation group data */
        int                     error;
+       int                     allocbt_blks;
 
        trace_xfs_alloc_read_agf(mp, agno);
 
@@ -3059,6 +3060,19 @@ xfs_alloc_read_agf(
                pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
                pag->pagf_init = 1;
                pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+
+               /*
+                * Update the in-core allocbt counter. Filter out the rmapbt
+                * subset of the btreeblks counter because the rmapbt is managed
+                * by perag reservation. Subtract one for the rmapbt root block
+                * because the rmap counter includes it while the btreeblks
+                * counter only tracks non-root blocks.
+                */
+               allocbt_blks = pag->pagf_btreeblks;
+               if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+                       allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
+               if (allocbt_blks > 0)
+                       atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
        }
 #ifdef DEBUG
        else if (!XFS_FORCED_SHUTDOWN(mp)) {
index 9b5dce55e33e238261f2d861ebf9f050c518d925..4611ed0f7dc439acefe8712e5ffbb513bf06b032 100644 (file)
@@ -69,6 +69,7 @@ xfs_allocbt_alloc_block(
                return 0;
        }
 
+       atomic64_inc(&cur->bc_mp->m_allocbt_blks);
        xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
 
        new->s = cpu_to_be32(bno);
@@ -92,6 +93,7 @@ xfs_allocbt_free_block(
        if (error)
                return error;
 
+       atomic64_dec(&cur->bc_mp->m_allocbt_blks);
        xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
                              XFS_EXTENT_BUSY_SKIP_DISCARD);
        return 0;