]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
0fab60620e2da448664e4564cfa782eafb8093bd
[thirdparty/kernel/stable-queue.git] /
1 From 4f78252da887ee7e9d1875dd6e07d9baa936c04f Mon Sep 17 00:00:00 2001
2 From: Kemeng Shi <shikemeng@huaweicloud.com>
3 Date: Thu, 22 May 2025 20:25:51 +0800
4 Subject: mm: swap: move nr_swap_pages counter decrement from folio_alloc_swap() to swap_range_alloc()
5
6 From: Kemeng Shi <shikemeng@huaweicloud.com>
7
8 commit 4f78252da887ee7e9d1875dd6e07d9baa936c04f upstream.
9
10 Patch series "Some randome fixes and cleanups to swapfile".
11
12 Patch 0-3 are some random fixes. Patch 4 is a cleanup. More details can
13 be found in respective patches.
14
15
16 This patch (of 4):
17
18 When folio_alloc_swap() encounters a failure in either
19 mem_cgroup_try_charge_swap() or add_to_swap_cache(), nr_swap_pages counter
20 is not decremented for allocated entry. However, the following
21 put_swap_folio() will increase nr_swap_pages counter unpairly and lead to
22 an imbalance.
23
24 Move nr_swap_pages decrement from folio_alloc_swap() to swap_range_alloc()
25 to pair the nr_swap_pages counting.
26
27 Link: https://lkml.kernel.org/r/20250522122554.12209-1-shikemeng@huaweicloud.com
28 Link: https://lkml.kernel.org/r/20250522122554.12209-2-shikemeng@huaweicloud.com
29 Fixes: 0ff67f990bd4 ("mm, swap: remove swap slot cache")
30 Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
31 Reviewed-by: Kairui Song <kasong@tencent.com>
32 Reviewed-by: Baoquan He <bhe@redhat.com>
33 Cc: Johannes Weiner <hannes@cmpxchg.org>
34 Cc: <stable@vger.kernel.org>
35 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
36 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
37 ---
38 mm/swapfile.c | 2 +-
39 1 file changed, 1 insertion(+), 1 deletion(-)
40
41 --- a/mm/swapfile.c
42 +++ b/mm/swapfile.c
43 @@ -1115,6 +1115,7 @@ static void swap_range_alloc(struct swap
44 if (vm_swap_full())
45 schedule_work(&si->reclaim_work);
46 }
47 + atomic_long_sub(nr_entries, &nr_swap_pages);
48 }
49
50 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
51 @@ -1313,7 +1314,6 @@ int folio_alloc_swap(struct folio *folio
52 if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
53 goto out_free;
54
55 - atomic_long_sub(size, &nr_swap_pages);
56 return 0;
57
58 out_free: