--- /dev/null
+From b10a1e5643e505c367c7e16aa6d8a9a0dc07354b Mon Sep 17 00:00:00 2001
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+Date: Tue, 3 Dec 2024 15:28:21 +0800
+Subject: erofs: fix rare pcluster memory leak after unmounting
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+commit b10a1e5643e505c367c7e16aa6d8a9a0dc07354b upstream.
+
+There may still exist some pcluster with valid reference counts
+during unmounting. Instead of introducing another synchronization
+primitive, just try again as unmounting is relatively rare. This
+approach is similar to z_erofs_cache_invalidate_folio().
+
+It was also reported by syzbot as a UAF due to commit f5ad9f9a603f
+("erofs: free pclusters if no cached folio is attached"):
+
+BUG: KASAN: slab-use-after-free in do_raw_spin_trylock+0x72/0x1f0 kernel/locking/spinlock_debug.c:123
+..
+ queued_spin_trylock include/asm-generic/qspinlock.h:92 [inline]
+ do_raw_spin_trylock+0x72/0x1f0 kernel/locking/spinlock_debug.c:123
+ __raw_spin_trylock include/linux/spinlock_api_smp.h:89 [inline]
+ _raw_spin_trylock+0x20/0x80 kernel/locking/spinlock.c:138
+ spin_trylock include/linux/spinlock.h:361 [inline]
+ z_erofs_put_pcluster fs/erofs/zdata.c:959 [inline]
+ z_erofs_decompress_pcluster fs/erofs/zdata.c:1403 [inline]
+ z_erofs_decompress_queue+0x3798/0x3ef0 fs/erofs/zdata.c:1425
+ z_erofs_decompressqueue_work+0x99/0xe0 fs/erofs/zdata.c:1437
+ process_one_work kernel/workqueue.c:3229 [inline]
+ process_scheduled_works+0xa68/0x1840 kernel/workqueue.c:3310
+ worker_thread+0x870/0xd30 kernel/workqueue.c:3391
+ kthread+0x2f2/0x390 kernel/kthread.c:389
+ ret_from_fork+0x4d/0x80 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+ </TASK>
+
+However, it seems a long outstanding memory leak. Fix it now.
+
+Fixes: f5ad9f9a603f ("erofs: free pclusters if no cached folio is attached")
+Reported-by: syzbot+7ff87b095e7ca0c5ac39@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/674c1235.050a0220.ad585.0032.GAE@google.com
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20241203072821.1885740-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zutil.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/erofs/zutil.c
++++ b/fs/erofs/zutil.c
+@@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct su
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ mutex_lock(&sbi->umount_mutex);
+- /* clean up all remaining pclusters in memory */
+- z_erofs_shrink_scan(sbi, ~0UL);
+-
++ while (!xa_empty(&sbi->managed_pslots)) {
++ z_erofs_shrink_scan(sbi, ~0UL);
++ cond_resched();
++ }
+ spin_lock(&erofs_sb_list_lock);
+ list_del(&sbi->list);
+ spin_unlock(&erofs_sb_list_lock);
--- /dev/null
+From 5e9388f7984a9cc7e659a105113f6ccf0aebedd0 Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 25 Jun 2025 17:03:55 -0400
+Subject: selftests/bpf: adapt one more case in test_lru_map to the new target_free
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit 5e9388f7984a9cc7e659a105113f6ccf0aebedd0 upstream.
+
+The below commit that updated BPF_MAP_TYPE_LRU_HASH free target,
+also updated tools/testing/selftests/bpf/test_lru_map to match.
+
+But that missed one case that passes with 4 cores, but fails at
+higher cpu counts.
+
+Update test_lru_sanity3 to also adjust its expectation of target_free.
+
+This time tested with 1, 4, 16, 64 and 384 cpu count.
+
+Fixes: d4adf1c9ee77 ("bpf: Adjust free target to avoid global starvation of LRU map")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/20250625210412.2732970-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_lru_map.c | 33 +++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 15 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_lru_map.c
++++ b/tools/testing/selftests/bpf/test_lru_map.c
+@@ -138,6 +138,12 @@ static int sched_next_online(int pid, in
+ return ret;
+ }
+
++/* Derive target_free from map_size, same as bpf_common_lru_populate */
++static unsigned int __tgt_size(unsigned int map_size)
++{
++ return (map_size / nr_cpus) / 2;
++}
++
+ /* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
+ static unsigned int __map_size(unsigned int tgt_free)
+ {
+@@ -410,12 +416,12 @@ static void test_lru_sanity2(int map_typ
+ printf("Pass\n");
+ }
+
+-/* Size of the LRU map is 2*tgt_free
+- * It is to test the active/inactive list rotation
+- * Insert 1 to 2*tgt_free (+2*tgt_free keys)
+- * Lookup key 1 to tgt_free*3/2
+- * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
+- * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
++/* Test the active/inactive list rotation
++ *
++ * Fill the whole map, deplete the free list.
++ * Reference all except the last lru->target_free elements.
++ * Insert lru->target_free new elements. This triggers one shrink.
++ * Verify that the non-referenced elements are replaced.
+ */
+ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
+ {
+@@ -434,8 +440,7 @@ static void test_lru_sanity3(int map_typ
+
+ assert(sched_next_online(0, &next_cpu) != -1);
+
+- batch_size = tgt_free / 2;
+- assert(batch_size * 2 == tgt_free);
++ batch_size = __tgt_size(tgt_free);
+
+ map_size = tgt_free * 2;
+ lru_map_fd = create_map(map_type, map_flags, map_size);
+@@ -446,23 +451,21 @@ static void test_lru_sanity3(int map_typ
+
+ value[0] = 1234;
+
+- /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
+- end_key = 1 + (2 * tgt_free);
++ /* Fill the map */
++ end_key = 1 + map_size;
+ for (key = 1; key < end_key; key++)
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+
+- /* Lookup key 1 to tgt_free*3/2 */
+- end_key = tgt_free + batch_size;
++ /* Reference all but the last batch_size */
++ end_key = 1 + map_size - batch_size;
+ for (key = 1; key < end_key; key++) {
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+ }
+
+- /* Add 1+2*tgt_free to tgt_free*5/2
+- * (+tgt_free/2 keys)
+- */
++ /* Insert new batch_size: replaces the non-referenced elements */
+ key = 2 * tgt_free + 1;
+ end_key = key + batch_size;
+ for (; key < end_key; key++) {