]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/5.0.14/kmemleak-powerpc-skip-scanning-holes-in-the-.bss-sec.patch
Linux 4.19.41
[thirdparty/kernel/stable-queue.git] / releases / 5.0.14 / kmemleak-powerpc-skip-scanning-holes-in-the-.bss-sec.patch
1 From 75a3203c35965f5569f845d690a2e061ab1a4b46 Mon Sep 17 00:00:00 2001
2 From: Catalin Marinas <catalin.marinas@arm.com>
3 Date: Fri, 5 Apr 2019 18:38:49 -0700
4 Subject: kmemleak: powerpc: skip scanning holes in the .bss section
5
6 [ Upstream commit 298a32b132087550d3fa80641ca58323c5dfd4d9 ]
7
8 Commit 2d4f567103ff ("KVM: PPC: Introduce kvm_tmp framework") adds
9 kvm_tmp[] into the .bss section and then free the rest of unused spaces
10 back to the page allocator.
11
12 kernel_init
13 kvm_guest_init
14 kvm_free_tmp
15 free_reserved_area
16 free_unref_page
17 free_unref_page_prepare
18
19 With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel. As the
20 result, kmemleak scan will trigger a panic when it scans the .bss
21 section with unmapped pages.
22
23 This patch creates dedicated kmemleak objects for the .data, .bss and
24 potentially .data..ro_after_init sections to allow partial freeing via
25 the kmemleak_free_part() in the powerpc kvm_free_tmp() function.
26
27 Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com
28 Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
29 Reported-by: Qian Cai <cai@lca.pw>
30 Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
31 Tested-by: Qian Cai <cai@lca.pw>
32 Cc: Paul Mackerras <paulus@samba.org>
33 Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
34 Cc: Avi Kivity <avi@redhat.com>
35 Cc: Paolo Bonzini <pbonzini@redhat.com>
36 Cc: Radim Krcmar <rkrcmar@redhat.com>
37 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
38 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
39 Signed-off-by: Sasha Levin (Microsoft) <sashal@kernel.org>
40 ---
41 arch/powerpc/kernel/kvm.c | 7 +++++++
42 mm/kmemleak.c | 16 +++++++++++-----
43 2 files changed, 18 insertions(+), 5 deletions(-)
44
45 diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
46 index 683b5b3805bd..cd381e2291df 100644
47 --- a/arch/powerpc/kernel/kvm.c
48 +++ b/arch/powerpc/kernel/kvm.c
49 @@ -22,6 +22,7 @@
50 #include <linux/kvm_host.h>
51 #include <linux/init.h>
52 #include <linux/export.h>
53 +#include <linux/kmemleak.h>
54 #include <linux/kvm_para.h>
55 #include <linux/slab.h>
56 #include <linux/of.h>
57 @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
58
59 static __init void kvm_free_tmp(void)
60 {
61 + /*
62 + * Inform kmemleak about the hole in the .bss section since the
63 + * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
64 + */
65 + kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
66 + ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
67 free_reserved_area(&kvm_tmp[kvm_tmp_index],
68 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
69 }
70 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71 index 707fa5579f66..6c318f5ac234 100644
72 --- a/mm/kmemleak.c
73 +++ b/mm/kmemleak.c
74 @@ -1529,11 +1529,6 @@ static void kmemleak_scan(void)
75 }
76 rcu_read_unlock();
77
78 - /* data/bss scanning */
79 - scan_large_block(_sdata, _edata);
80 - scan_large_block(__bss_start, __bss_stop);
81 - scan_large_block(__start_ro_after_init, __end_ro_after_init);
82 -
83 #ifdef CONFIG_SMP
84 /* per-cpu sections scanning */
85 for_each_possible_cpu(i)
86 @@ -2071,6 +2066,17 @@ void __init kmemleak_init(void)
87 }
88 local_irq_restore(flags);
89
90 + /* register the data/bss sections */
91 + create_object((unsigned long)_sdata, _edata - _sdata,
92 + KMEMLEAK_GREY, GFP_ATOMIC);
93 + create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
94 + KMEMLEAK_GREY, GFP_ATOMIC);
95 + /* only register .data..ro_after_init if not within .data */
96 + if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
97 + create_object((unsigned long)__start_ro_after_init,
98 + __end_ro_after_init - __start_ro_after_init,
99 + KMEMLEAK_GREY, GFP_ATOMIC);
100 +
101 /*
102 * This is the point where tracking allocations is safe. Automatic
103 * scanning is started during the late initcall. Add the early logged
104 --
105 2.20.1
106