]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/vmalloc: prevent RCU stalls in kasan_release_vmalloc_node
authorDeepanshu Kartikey <kartikey406@gmail.com>
Mon, 12 Jan 2026 10:36:12 +0000 (16:06 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:27 +0000 (20:02 -0800)
When CONFIG_PAGE_OWNER is enabled, freeing KASAN shadow pages during
vmalloc cleanup triggers expensive stack unwinding that acquires RCU read
locks.  Processing a large purge_list without rescheduling can cause the
task to hold CPU for extended periods (10+ seconds), leading to RCU stalls
and potential OOM conditions.

The issue manifests in purge_vmap_node() -> kasan_release_vmalloc_node()
where iterating through hundreds or thousands of vmap_area entries and
freeing their associated shadow pages causes:

  rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
  rcu: Tasks blocked on level-0 rcu_node (CPUs 0-1): P6229/1:b..l
  ...
  task:kworker/0:17 state:R running task stack:28840 pid:6229
  ...
  kasan_release_vmalloc_node+0x1ba/0xad0 mm/vmalloc.c:2299
  purge_vmap_node+0x1ba/0xad0 mm/vmalloc.c:2299

Each call to kasan_release_vmalloc() can free many pages, and with
page_owner tracking, each free triggers save_stack() which performs stack
unwinding under RCU read lock.  Without yielding, this creates an
unbounded RCU critical section.

Add periodic cond_resched() calls within the loop to allow:
- RCU grace periods to complete
- Other tasks to run
- Scheduler to preempt when needed

The fix uses need_resched() for immediate response under load, with a
batch count of 32 as a guaranteed upper bound to prevent worst-case stalls
even under light load.

Link: https://lkml.kernel.org/r/20260112103612.627247-1-kartikey406@gmail.com
Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
Reported-by: syzbot+d8d4c31d40f868eaea30@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=d8d4c31d40f868eaea30
Link: https://lore.kernel.org/all/20260112084723.622910-1-kartikey406@gmail.com/T/
Suggested-by: Uladzislau Rezki <urezki@gmail.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 32d6ee92d4ff88cfdb52c5b262e8208a600a358e..ca4c653286870dacd1713c908d307fd83a5d7f96 100644 (file)
@@ -2273,11 +2273,14 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
        reclaim_list_global(&decay_list);
 }
 
+#define KASAN_RELEASE_BATCH_SIZE 32
+
 static void
 kasan_release_vmalloc_node(struct vmap_node *vn)
 {
        struct vmap_area *va;
        unsigned long start, end;
+       unsigned int batch_count = 0;
 
        start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
        end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
@@ -2287,6 +2290,11 @@ kasan_release_vmalloc_node(struct vmap_node *vn)
                        kasan_release_vmalloc(va->va_start, va->va_end,
                                va->va_start, va->va_end,
                                KASAN_VMALLOC_PAGE_RANGE);
+
+               if (need_resched() || (++batch_count >= KASAN_RELEASE_BATCH_SIZE)) {
+                       cond_resched();
+                       batch_count = 0;
+               }
        }
 
        kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);