]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/swap: strengthen locking assertions and invariants in cluster allocation
authorHui Zhu <zhuhui@kylinos.cn>
Tue, 10 Mar 2026 01:56:57 +0000 (09:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:25 +0000 (13:53 -0700)
swap_cluster_alloc_table() requires several locks to be held by its
callers: ci->lock, the per-CPU swap_cluster lock, and, for non-solid-state
devices (non-SWP_SOLIDSTATE), the si->global_cluster_lock.

While most call paths (e.g., via cluster_alloc_swap_entry() or
alloc_swap_scan_list()) correctly acquire these locks before invocation,
the path through swap_reclaim_work() -> swap_reclaim_full_clusters() ->
isolate_lock_cluster() is distinct.  This path operates exclusively on
si->full_clusters, where the swap allocation tables are guaranteed to be
already allocated.  Consequently, isolate_lock_cluster() should never
trigger a call to swap_cluster_alloc_table() for these clusters.

Strengthen the locking and state assertions to formalize these invariants:

1. Add a lockdep_assert_held() for si->global_cluster_lock in
   swap_cluster_alloc_table() for non-SWP_SOLIDSTATE devices.
2. Reorder existing lockdep assertions in swap_cluster_alloc_table() to
   match the actual lock acquisition order (per-CPU lock, then global lock,
   then cluster lock).
3. Add a VM_WARN_ON_ONCE() in isolate_lock_cluster() to ensure that table
   allocations are only attempted for clusters being isolated from the
   free list. Attempting to allocate a table for a cluster from other
   lists (like the full list during reclaim) indicates a violation of
   subsystem invariants.

These changes ensure locking consistency and help catch potential
synchronization or logic issues during development.

[zhuhui@kylinos.cn: remove redundant comment, per Barry]
Link: https://lkml.kernel.org/r/20260311022241.177801-1-hui.zhu@linux.dev
[zhuhui@kylinos.cn: initialize `flags', per Chris]
Link: https://lkml.kernel.org/r/20260312023024.903143-1-hui.zhu@linux.dev
Link: https://lkml.kernel.org/r/20260310015657.42395-1-hui.zhu@linux.dev
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
Reviewed-by: Youngjun Park <youngjun.park@lge.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swapfile.c

index 915bc93964dbd58ac6da04cc34c6e2784c1428ea..71a7d6959f3e12e69723b1939171f7766f2ec94f 100644 (file)
@@ -498,8 +498,10 @@ swap_cluster_alloc_table(struct swap_info_struct *si,
         * Only cluster isolation from the allocator does table allocation.
         * Swap allocator uses percpu clusters and holds the local lock.
         */
-       lockdep_assert_held(&ci->lock);
        lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock);
+       if (!(si->flags & SWP_SOLIDSTATE))
+               lockdep_assert_held(&si->global_cluster_lock);
+       lockdep_assert_held(&ci->lock);
 
        /* The cluster must be free and was just isolated from the free list. */
        VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci));
@@ -600,6 +602,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
                struct swap_info_struct *si, struct list_head *list)
 {
        struct swap_cluster_info *ci, *found = NULL;
+       u8 flags = CLUSTER_FLAG_NONE;
 
        spin_lock(&si->lock);
        list_for_each_entry(ci, list, list) {
@@ -612,6 +615,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
                          ci->flags != CLUSTER_FLAG_FULL);
 
                list_del(&ci->list);
+               flags = ci->flags;
                ci->flags = CLUSTER_FLAG_NONE;
                found = ci;
                break;
@@ -620,6 +624,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
 
        if (found && !cluster_table_is_alloced(found)) {
                /* Only an empty free cluster's swap table can be freed. */
+               VM_WARN_ON_ONCE(flags != CLUSTER_FLAG_FREE);
                VM_WARN_ON_ONCE(list != &si->free_clusters);
                VM_WARN_ON_ONCE(!cluster_is_empty(found));
                return swap_cluster_alloc_table(si, found);