]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: __btree_node_reclaim_checks()
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 13 Apr 2025 11:45:13 +0000 (07:45 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 22 May 2025 00:14:13 +0000 (20:14 -0400)
Factor out a helper so we're not duplicating checks after locking the
btree node.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c

index 560c2953629344f10da8fb588a3b68eb6eabc93d..9d6f78e9600e0858ac3de3d64766595d1251b83d 100644 (file)
@@ -344,35 +344,84 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc,
        return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
 }
 
-/*
- * this version is for btree nodes that have already been freed (we're not
- * reaping a real btree node)
- */
-static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
+static int __btree_node_reclaim_checks(struct bch_fs *c, struct btree *b,
+                                      bool flush, bool locked)
 {
        struct btree_cache *bc = &c->btree_cache;
-       int ret = 0;
 
        lockdep_assert_held(&bc->lock);
-wait_on_io:
-       if (b->flags & ((1U << BTREE_NODE_dirty)|
-                       (1U << BTREE_NODE_read_in_flight)|
+
+       if (btree_node_noevict(b)) {
+               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_noevict]++;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
+       }
+       if (btree_node_write_blocked(b)) {
+               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_blocked]++;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
+       }
+       if (btree_node_will_make_reachable(b)) {
+               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_will_make_reachable]++;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
+       }
+
+       if (btree_node_dirty(b)) {
+               if (!flush) {
+                       bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++;
+                       return -BCH_ERR_ENOMEM_btree_node_reclaim;
+               }
+
+               if (locked) {
+                       /*
+                        * Using the underscore version because we don't want to compact
+                        * bsets after the write, since this node is about to be evicted
+                        * - unless btree verify mode is enabled, since it runs out of
+                        * the post write cleanup:
+                        */
+                       if (bch2_verify_btree_ondisk)
+                               bch2_btree_node_write(c, b, SIX_LOCK_intent,
+                                                     BTREE_WRITE_cache_reclaim);
+                       else
+                               __bch2_btree_node_write(c, b,
+                                                       BTREE_WRITE_cache_reclaim);
+               }
+       }
+
+       if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
                        (1U << BTREE_NODE_write_in_flight))) {
                if (!flush) {
-                       if (btree_node_dirty(b))
-                               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++;
-                       else if (btree_node_read_in_flight(b))
+                       if (btree_node_read_in_flight(b))
                                bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_read_in_flight]++;
                        else if (btree_node_write_in_flight(b))
                                bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_in_flight]++;
                        return -BCH_ERR_ENOMEM_btree_node_reclaim;
                }
 
+               if (locked)
+                       return -EINTR;
+
                /* XXX: waiting on IO with btree cache lock held */
                bch2_btree_node_wait_on_read(b);
                bch2_btree_node_wait_on_write(b);
        }
 
+       return 0;
+}
+
+/*
+ * this version is for btree nodes that have already been freed (we're not
+ * reaping a real btree node)
+ */
+static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
+{
+       struct btree_cache *bc = &c->btree_cache;
+       int ret = 0;
+
+       lockdep_assert_held(&bc->lock);
+retry_unlocked:
+       ret = __btree_node_reclaim_checks(c, b, flush, false);
+       if (ret)
+               return ret;
+
        if (!six_trylock_intent(&b->c.lock)) {
                bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_intent]++;
                return -BCH_ERR_ENOMEM_btree_node_reclaim;
@@ -380,69 +429,23 @@ wait_on_io:
 
        if (!six_trylock_write(&b->c.lock)) {
                bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_write]++;
-               goto out_unlock_intent;
+               six_unlock_intent(&b->c.lock);
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
        }
 
        /* recheck under lock */
-       if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
-                       (1U << BTREE_NODE_write_in_flight))) {
-               if (!flush) {
-                       if (btree_node_read_in_flight(b))
-                               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_read_in_flight]++;
-                       else if (btree_node_write_in_flight(b))
-                               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_in_flight]++;
-                       goto out_unlock;
-               }
+       ret = __btree_node_reclaim_checks(c, b, flush, true);
+       if (ret) {
                six_unlock_write(&b->c.lock);
                six_unlock_intent(&b->c.lock);
-               goto wait_on_io;
-       }
-
-       if (btree_node_noevict(b)) {
-               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_noevict]++;
-               goto out_unlock;
-       }
-       if (btree_node_write_blocked(b)) {
-               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_blocked]++;
-               goto out_unlock;
-       }
-       if (btree_node_will_make_reachable(b)) {
-               bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_will_make_reachable]++;
-               goto out_unlock;
+               if (ret == -EINTR)
+                       goto retry_unlocked;
+               return ret;
        }
 
-       if (btree_node_dirty(b)) {
-               if (!flush) {
-                       bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++;
-                       goto out_unlock;
-               }
-               /*
-                * Using the underscore version because we don't want to compact
-                * bsets after the write, since this node is about to be evicted
-                * - unless btree verify mode is enabled, since it runs out of
-                * the post write cleanup:
-                */
-               if (bch2_verify_btree_ondisk)
-                       bch2_btree_node_write(c, b, SIX_LOCK_intent,
-                                             BTREE_WRITE_cache_reclaim);
-               else
-                       __bch2_btree_node_write(c, b,
-                                               BTREE_WRITE_cache_reclaim);
-
-               six_unlock_write(&b->c.lock);
-               six_unlock_intent(&b->c.lock);
-               goto wait_on_io;
-       }
-out:
        if (b->hash_val && !ret)
                trace_and_count(c, btree_cache_reap, c, b);
-       return ret;
-out_unlock:
-       six_unlock_write(&b->c.lock);
-out_unlock_intent:
-       six_unlock_intent(&b->c.lock);
-       ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
-       goto out;
+       return 0;
 }
 
 static int btree_node_reclaim(struct bch_fs *c, struct btree *b)