From: Matthew Wilcox (Oracle) Date: Thu, 13 Nov 2025 00:09:29 +0000 (+0000) Subject: kasan: Remove references to folio in __kasan_mempool_poison_object() X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=bbe711730515f688a0bf4ab76a2639bcede933f9;p=thirdparty%2Flinux.git kasan: Remove references to folio in __kasan_mempool_poison_object() In preparation for splitting struct slab from struct page and struct folio, remove mentions of struct folio from this function. There is a mild improvement for large kmalloc objects as we will avoid calling compound_head() for them. We can discard the comment as using PageLargeKmalloc() rather than !folio_test_slab() makes it obvious. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: David Hildenbrand Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Vincenzo Frascino Cc: kasan-dev Link: https://patch.msgid.link/20251113000932.1589073-16-willy@infradead.org Acked-by: Harry Yoo Signed-off-by: Vlastimil Babka --- diff --git a/mm/kasan/common.c b/mm/kasan/common.c index d4c14359feaf9..38e8bb0bf3266 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -520,24 +520,20 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { - struct folio *folio = virt_to_folio(ptr); + struct page *page = virt_to_page(ptr); struct slab *slab; - /* - * This function can be called for large kmalloc allocation that get - * their memory from page_alloc. Thus, the folio might not be a slab. - */ - if (unlikely(!folio_test_slab(folio))) { + if (unlikely(PageLargeKmalloc(page))) { if (check_page_allocation(ptr, ip)) return false; - kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + kasan_poison(ptr, page_size(page), KASAN_PAGE_FREE, false); return true; } if (is_kfence_address(ptr)) return true; - slab = folio_slab(folio); + slab = page_slab(page); if (check_slab_allocation(slab->slab_cache, ptr, ip)) return false;