From: Andrey Konovalov Date: Tue, 19 Dec 2023 22:28:46 +0000 (+0100) Subject: kasan: move kasan_mempool_poison_object X-Git-Tag: v6.8-rc1~180^2~112 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9b94fe91099cbf05606151ef05bea9632666f5d5;p=thirdparty%2Flinux.git kasan: move kasan_mempool_poison_object Move kasan_mempool_poison_object after all slab-related KASAN hooks. This is a preparatory change for the following patches in this series. No functional changes. Link: https://lkml.kernel.org/r/23ea215409f43c13cdf9ecc454501a264c107d67.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6310435f528b3..0d1f925c136d9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -172,13 +172,6 @@ static __always_inline void kasan_kfree_large(void *ptr) __kasan_kfree_large(ptr, _RET_IP_); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip); -static __always_inline void kasan_mempool_poison_object(void *ptr) -{ - if (kasan_enabled()) - __kasan_mempool_poison_object(ptr, _RET_IP_); -} - void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( @@ -219,6 +212,13 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } +void __kasan_mempool_poison_object(void *ptr, unsigned long ip); +static __always_inline void kasan_mempool_poison_object(void *ptr) +{ + if (kasan_enabled()) + __kasan_mempool_poison_object(ptr, _RET_IP_); +} + /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. @@ -256,7 +256,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init return false; } static inline void kasan_kfree_large(void *ptr) {} -static inline void kasan_mempool_poison_object(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init) { @@ -276,6 +275,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } +static inline void kasan_mempool_poison_object(void *ptr) {} static inline bool kasan_check_byte(const void *address) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index e0394d0ee7f15..fc7f711607e18 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -282,29 +282,6 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) ____kasan_kfree_large(ptr, ip); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip) -{ - struct folio *folio; - - folio = virt_to_folio(ptr); - - /* - * Even though this function is only called for kmem_cache_alloc and - * kmalloc backed mempool allocations, those allocations can still be - * !PageSlab() when the size provided to kmalloc is larger than - * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. - */ - if (unlikely(!folio_test_slab(folio))) { - if (____kasan_kfree_large(ptr, ip)) - return; - kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); - } else { - struct slab *slab = folio_slab(folio); - - ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); - } -} - void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags, bool init) { @@ -452,6 +429,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } +void __kasan_mempool_poison_object(void *ptr, unsigned long ip) +{ + struct folio *folio; + + folio = virt_to_folio(ptr); + + /* + * Even though this function is only called for kmem_cache_alloc and + * kmalloc backed mempool allocations, those allocations can still be + * !PageSlab() when the size provided to kmalloc is larger than + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + */ + if (unlikely(!folio_test_slab(folio))) { + if (____kasan_kfree_large(ptr, ip)) + return; + kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + } else { + struct slab *slab = folio_slab(folio); + + ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); + } +} + bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) {