]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.14/kasan-slub-move-kasan_poison_slab-hook-before-page_a.patch
drop some duplicated patches that somehow got merged.
[thirdparty/kernel/stable-queue.git] / queue-4.14 / kasan-slub-move-kasan_poison_slab-hook-before-page_a.patch
1 From a8146d43007331bebbe675fd3acbcd6a46342a37 Mon Sep 17 00:00:00 2001
2 From: Andrey Konovalov <andreyknvl@google.com>
3 Date: Wed, 20 Feb 2019 22:19:23 -0800
4 Subject: kasan, slub: move kasan_poison_slab hook before page_address
5
6 [ Upstream commit a71012242837fe5e67d8c999cfc357174ed5dba0 ]
7
8 With tag based KASAN page_address() looks at the page flags to see whether
9 the resulting pointer needs to have a tag set. Since we don't want to set
10 a tag when page_address() is called on SLAB pages, we call
11 page_kasan_tag_reset() in kasan_poison_slab(). However in allocate_slab()
12 page_address() is called before kasan_poison_slab(). Fix it by changing
13 the order.
14
15 [andreyknvl@google.com: fix compilation error when CONFIG_SLUB_DEBUG=n]
16 Link: http://lkml.kernel.org/r/ac27cc0bbaeb414ed77bcd6671a877cf3546d56e.1550066133.git.andreyknvl@google.com
17 Link: http://lkml.kernel.org/r/cd895d627465a3f1c712647072d17f10883be2a1.1549921721.git.andreyknvl@google.com
18 Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
19 Cc: Alexander Potapenko <glider@google.com>
20 Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
21 Cc: Catalin Marinas <catalin.marinas@arm.com>
22 Cc: Christoph Lameter <cl@linux.com>
23 Cc: David Rientjes <rientjes@google.com>
24 Cc: Dmitry Vyukov <dvyukov@google.com>
25 Cc: Evgeniy Stepanov <eugenis@google.com>
26 Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
27 Cc: Kostya Serebryany <kcc@google.com>
28 Cc: Pekka Enberg <penberg@kernel.org>
29 Cc: Qian Cai <cai@lca.pw>
30 Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
31 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
32 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
33 Signed-off-by: Sasha Levin <sashal@kernel.org>
34 ---
35 mm/slub.c | 19 +++++++++++++++----
36 1 file changed, 15 insertions(+), 4 deletions(-)
37
38 diff --git a/mm/slub.c b/mm/slub.c
39 index 220d42e592ef..f14ef59c9e57 100644
40 --- a/mm/slub.c
41 +++ b/mm/slub.c
42 @@ -1087,6 +1087,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
43 init_tracking(s, object);
44 }
45
46 +static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
47 +{
48 + if (!(s->flags & SLAB_POISON))
49 + return;
50 +
51 + metadata_access_enable();
52 + memset(addr, POISON_INUSE, PAGE_SIZE << order);
53 + metadata_access_disable();
54 +}
55 +
56 static inline int alloc_consistency_checks(struct kmem_cache *s,
57 struct page *page,
58 void *object, unsigned long addr)
59 @@ -1304,6 +1314,8 @@ unsigned long kmem_cache_flags(unsigned long object_size,
60 #else /* !CONFIG_SLUB_DEBUG */
61 static inline void setup_object_debug(struct kmem_cache *s,
62 struct page *page, void *object) {}
63 +static inline void setup_page_debug(struct kmem_cache *s,
64 + void *addr, int order) {}
65
66 static inline int alloc_debug_processing(struct kmem_cache *s,
67 struct page *page, void *object, unsigned long addr) { return 0; }
68 @@ -1599,12 +1611,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
69 if (page_is_pfmemalloc(page))
70 SetPageSlabPfmemalloc(page);
71
72 - start = page_address(page);
73 + kasan_poison_slab(page);
74
75 - if (unlikely(s->flags & SLAB_POISON))
76 - memset(start, POISON_INUSE, PAGE_SIZE << order);
77 + start = page_address(page);
78
79 - kasan_poison_slab(page);
80 + setup_page_debug(s, start, order);
81
82 shuffle = shuffle_freelist(s, page);
83
84 --
85 2.19.1
86