]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
xfs: remove xfs_buf_cache.bc_lock
authorChristoph Hellwig <hch@lst.de>
Tue, 28 Jan 2025 05:22:58 +0000 (06:22 +0100)
committerCarlos Maiolino <cem@kernel.org>
Tue, 28 Jan 2025 10:18:22 +0000 (11:18 +0100)
xfs_buf_cache.bc_lock serializes adding buffers to and removing them from
the hashtable.  But as the rhashtable code already uses fine grained
internal locking for inserts and removals the extra protection isn't
actually required.

It also happens to fix a lock order inversion vs b_lock added by the
recent lookup race fix.

Fixes: ee10f6fcdb96 ("xfs: fix buffer lookup vs release race")
Reported-by: Lai, Yi <yi1.lai@linux.intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h

index f1252ed8bd0a729d87f63a617fae3f276cffc325..ef207784876c8810cc005f3fe5c87d0d8e968737 100644 (file)
@@ -41,8 +41,7 @@ struct kmem_cache *xfs_buf_cache;
  *
  * xfs_buf_rele:
  *     b_lock
- *       pag_buf_lock
- *         lru_lock
+ *       lru_lock
  *
  * xfs_buftarg_drain_rele
  *     lru_lock
@@ -220,14 +219,21 @@ _xfs_buf_alloc(
         */
        flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 
-       spin_lock_init(&bp->b_lock);
+       /*
+        * A new buffer is held and locked by the owner.  This ensures that the
+        * buffer is owned by the caller and racing RCU lookups right after
+        * inserting into the hash table are safe (and will have to wait for
+        * the unlock to do anything non-trivial).
+        */
        bp->b_hold = 1;
+       sema_init(&bp->b_sema, 0); /* held, no waiters */
+
+       spin_lock_init(&bp->b_lock);
        atomic_set(&bp->b_lru_ref, 1);
        init_completion(&bp->b_iowait);
        INIT_LIST_HEAD(&bp->b_lru);
        INIT_LIST_HEAD(&bp->b_list);
        INIT_LIST_HEAD(&bp->b_li_list);
-       sema_init(&bp->b_sema, 0); /* held, no waiters */
        bp->b_target = target;
        bp->b_mount = target->bt_mount;
        bp->b_flags = flags;
@@ -497,7 +503,6 @@ int
 xfs_buf_cache_init(
        struct xfs_buf_cache    *bch)
 {
-       spin_lock_init(&bch->bc_lock);
        return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
 }
 
@@ -647,17 +652,20 @@ xfs_buf_find_insert(
        if (error)
                goto out_free_buf;
 
-       spin_lock(&bch->bc_lock);
+       /* The new buffer keeps the perag reference until it is freed. */
+       new_bp->b_pag = pag;
+
+       rcu_read_lock();
        bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
                        &new_bp->b_rhash_head, xfs_buf_hash_params);
        if (IS_ERR(bp)) {
+               rcu_read_unlock();
                error = PTR_ERR(bp);
-               spin_unlock(&bch->bc_lock);
                goto out_free_buf;
        }
        if (bp && xfs_buf_try_hold(bp)) {
                /* found an existing buffer */
-               spin_unlock(&bch->bc_lock);
+               rcu_read_unlock();
                error = xfs_buf_find_lock(bp, flags);
                if (error)
                        xfs_buf_rele(bp);
@@ -665,10 +673,8 @@ xfs_buf_find_insert(
                        *bpp = bp;
                goto out_free_buf;
        }
+       rcu_read_unlock();
 
-       /* The new buffer keeps the perag reference until it is freed. */
-       new_bp->b_pag = pag;
-       spin_unlock(&bch->bc_lock);
        *bpp = new_bp;
        return 0;
 
@@ -1085,7 +1091,6 @@ xfs_buf_rele_cached(
        }
 
        /* we are asked to drop the last reference */
-       spin_lock(&bch->bc_lock);
        __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
@@ -1097,7 +1102,6 @@ xfs_buf_rele_cached(
                        bp->b_state &= ~XFS_BSTATE_DISPOSE;
                else
                        bp->b_hold--;
-               spin_unlock(&bch->bc_lock);
        } else {
                bp->b_hold--;
                /*
@@ -1115,7 +1119,6 @@ xfs_buf_rele_cached(
                ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
                rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
                                xfs_buf_hash_params);
-               spin_unlock(&bch->bc_lock);
                if (pag)
                        xfs_perag_put(pag);
                freebuf = true;
index 7e73663c5d4a580b35c75480aec24c88de3cd20d..3b4ed42e11c015d24af1882e4d95c675d431d455 100644 (file)
@@ -80,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t;
 #define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 struct xfs_buf_cache {
-       spinlock_t              bc_lock;
        struct rhashtable       bc_hash;
 };