]> git.ipfire.org Git - thirdparty/openembedded/openembedded-core-contrib.git/blob
99402ea5e9650f2e5d492c3e2b9e46bea358eaea
[thirdparty/openembedded/openembedded-core-contrib.git] /
1 From b977f96d0a414e76d4c544f65791919dde1bc57e Mon Sep 17 00:00:00 2001
2 From: Michael Jeanson <mjeanson@efficios.com>
3 Date: Mon, 17 Oct 2022 13:49:51 -0400
4 Subject: [PATCH] fix: mm/slab_common: drop kmem_alloc & avoid dereferencing
5 fields when not using (v6.1)
6
7 See uptream commit:
8
9 commit 2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b
10 Author: Hyeonggon Yoo <42.hyeyoo@gmail.com>
11 Date: Wed Aug 17 19:18:24 2022 +0900
12
13 mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using
14
15 Drop kmem_alloc event class, and define kmalloc and kmem_cache_alloc
16 using TRACE_EVENT() macro.
17
18 And then this patch does:
19 - Do not pass pointer to struct kmem_cache to trace_kmalloc.
20 gfp flag is enough to know if it's accounted or not.
21 - Avoid dereferencing s->object_size and s->size when not using kmem_cache_alloc event.
22 - Avoid dereferencing s->name in when not using kmem_cache_free event.
23 - Adjust s->size to SLOB_UNITS(s->size) * SLOB_UNIT in SLOB
24
25 Upstream-Status: Backport [commit b977f96d0a414e76d4c544f]
26
27 Change-Id: Icd7925731ed4a737699c3746cb7bb7760a4e8009
28 Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
29 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
30 ---
31 include/instrumentation/events/kmem.h | 156 ++++++++++++++++++--------
32 1 file changed, 111 insertions(+), 45 deletions(-)
33
34 diff --git a/include/instrumentation/events/kmem.h b/include/instrumentation/events/kmem.h
35 index 219533a1..0f5bd8e6 100644
36 --- a/include/instrumentation/events/kmem.h
37 +++ b/include/instrumentation/events/kmem.h
38 @@ -10,9 +10,58 @@
39 #include <lttng/kernel-version.h>
40
41 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
42 -
43 #include <../../mm/slab.h>
44 +#endif
45 +
46 +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,1,0))
47 +LTTNG_TRACEPOINT_EVENT_MAP(kmalloc,
48 +
49 + kmem_kmalloc,
50 +
51 + TP_PROTO(unsigned long call_site,
52 + const void *ptr,
53 + size_t bytes_req,
54 + size_t bytes_alloc,
55 + gfp_t gfp_flags,
56 + int node),
57 +
58 + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
59 +
60 + TP_FIELDS(
61 + ctf_integer_hex(unsigned long, call_site, call_site)
62 + ctf_integer_hex(const void *, ptr, ptr)
63 + ctf_integer(size_t, bytes_req, bytes_req)
64 + ctf_integer(size_t, bytes_alloc, bytes_alloc)
65 + ctf_integer(gfp_t, gfp_flags, gfp_flags)
66 + ctf_integer(int, node, node)
67 + ctf_integer(bool, accounted, (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
68 + (gfp_flags & __GFP_ACCOUNT) ? true : false))
69 + )
70 +)
71 +
72 +LTTNG_TRACEPOINT_EVENT(kmem_cache_alloc,
73 +
74 + TP_PROTO(unsigned long call_site,
75 + const void *ptr,
76 + struct kmem_cache *s,
77 + gfp_t gfp_flags,
78 + int node),
79 +
80 + TP_ARGS(call_site, ptr, s, gfp_flags, node),
81
82 + TP_FIELDS(
83 + ctf_integer_hex(unsigned long, call_site, call_site)
84 + ctf_integer_hex(const void *, ptr, ptr)
85 + ctf_integer(size_t, bytes_req, s->object_size)
86 + ctf_integer(size_t, bytes_alloc, s->size)
87 + ctf_integer(gfp_t, gfp_flags, gfp_flags)
88 + ctf_integer(int, node, node)
89 + ctf_integer(bool, accounted, IS_ENABLED(CONFIG_MEMCG_KMEM) ?
90 + ((gfp_flags & __GFP_ACCOUNT) ||
91 + (s->flags & SLAB_ACCOUNT)) : false)
92 + )
93 +)
94 +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
95 LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc,
96
97 TP_PROTO(unsigned long call_site,
98 @@ -53,18 +102,16 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_alloc, kmem_cache_alloc,
99
100 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
101 )
102 -
103 -LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc_node,
104 +#else
105 +LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc,
106
107 TP_PROTO(unsigned long call_site,
108 const void *ptr,
109 - struct kmem_cache *s,
110 size_t bytes_req,
111 size_t bytes_alloc,
112 - gfp_t gfp_flags,
113 - int node),
114 + gfp_t gfp_flags),
115
116 - TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
117 + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
118
119 TP_FIELDS(
120 ctf_integer_hex(unsigned long, call_site, call_site)
121 @@ -72,42 +119,40 @@ LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc_node,
122 ctf_integer(size_t, bytes_req, bytes_req)
123 ctf_integer(size_t, bytes_alloc, bytes_alloc)
124 ctf_integer(gfp_t, gfp_flags, gfp_flags)
125 - ctf_integer(int, node, node)
126 - ctf_integer(bool, accounted, IS_ENABLED(CONFIG_MEMCG_KMEM) ?
127 - ((gfp_flags & __GFP_ACCOUNT) ||
128 - (s && s->flags & SLAB_ACCOUNT)) : false)
129 )
130 )
131
132 -LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_alloc_node, kmalloc_node,
133 +LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_alloc, kmalloc,
134
135 - kmem_kmalloc_node,
136 + kmem_kmalloc,
137
138 TP_PROTO(unsigned long call_site, const void *ptr,
139 - struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
140 - gfp_t gfp_flags, int node),
141 + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
142
143 - TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
144 + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
145 )
146
147 -LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_alloc_node, kmem_cache_alloc_node,
148 +LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_alloc, kmem_cache_alloc,
149
150 TP_PROTO(unsigned long call_site, const void *ptr,
151 - struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
152 - gfp_t gfp_flags, int node),
153 + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
154
155 - TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
156 + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
157 )
158 -#else
159 -LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc,
160 +#endif
161 +
162 +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
163 +LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc_node,
164
165 TP_PROTO(unsigned long call_site,
166 const void *ptr,
167 + struct kmem_cache *s,
168 size_t bytes_req,
169 size_t bytes_alloc,
170 - gfp_t gfp_flags),
171 + gfp_t gfp_flags,
172 + int node),
173
174 - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
175 + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
176
177 TP_FIELDS(
178 ctf_integer_hex(unsigned long, call_site, call_site)
179 @@ -115,27 +160,33 @@ LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc,
180 ctf_integer(size_t, bytes_req, bytes_req)
181 ctf_integer(size_t, bytes_alloc, bytes_alloc)
182 ctf_integer(gfp_t, gfp_flags, gfp_flags)
183 + ctf_integer(int, node, node)
184 + ctf_integer(bool, accounted, IS_ENABLED(CONFIG_MEMCG_KMEM) ?
185 + ((gfp_flags & __GFP_ACCOUNT) ||
186 + (s && s->flags & SLAB_ACCOUNT)) : false)
187 )
188 )
189
190 -LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_alloc, kmalloc,
191 +LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_alloc_node, kmalloc_node,
192
193 - kmem_kmalloc,
194 + kmem_kmalloc_node,
195
196 TP_PROTO(unsigned long call_site, const void *ptr,
197 - size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
198 + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
199 + gfp_t gfp_flags, int node),
200
201 - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
202 + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
203 )
204
205 -LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_alloc, kmem_cache_alloc,
206 +LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_alloc_node, kmem_cache_alloc_node,
207
208 TP_PROTO(unsigned long call_site, const void *ptr,
209 - size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
210 + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
211 + gfp_t gfp_flags, int node),
212
213 - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
214 + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
215 )
216 -
217 +#else
218 LTTNG_TRACEPOINT_EVENT_CLASS(kmem_alloc_node,
219
220 TP_PROTO(unsigned long call_site,
221 @@ -192,19 +243,6 @@ LTTNG_TRACEPOINT_EVENT_MAP(kfree,
222 ctf_integer_hex(const void *, ptr, ptr)
223 )
224 )
225 -
226 -LTTNG_TRACEPOINT_EVENT(kmem_cache_free,
227 -
228 - TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
229 -
230 - TP_ARGS(call_site, ptr, name),
231 -
232 - TP_FIELDS(
233 - ctf_integer_hex(unsigned long, call_site, call_site)
234 - ctf_integer_hex(const void *, ptr, ptr)
235 - ctf_string(name, name)
236 - )
237 -)
238 #else
239 LTTNG_TRACEPOINT_EVENT_CLASS(kmem_free,
240
241 @@ -235,6 +273,34 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(kmem_free, kmem_cache_free,
242 )
243 #endif
244
245 +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,1,0))
246 +LTTNG_TRACEPOINT_EVENT(kmem_cache_free,
247 +
248 + TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
249 +
250 + TP_ARGS(call_site, ptr, s),
251 +
252 + TP_FIELDS(
253 + ctf_integer_hex(unsigned long, call_site, call_site)
254 + ctf_integer_hex(const void *, ptr, ptr)
255 + ctf_string(name, s->name)
256 + )
257 +)
258 +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,12,0))
259 +LTTNG_TRACEPOINT_EVENT(kmem_cache_free,
260 +
261 + TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
262 +
263 + TP_ARGS(call_site, ptr, name),
264 +
265 + TP_FIELDS(
266 + ctf_integer_hex(unsigned long, call_site, call_site)
267 + ctf_integer_hex(const void *, ptr, ptr)
268 + ctf_string(name, name)
269 + )
270 +)
271 +#endif
272 +
273 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0))
274 LTTNG_TRACEPOINT_EVENT_MAP(mm_page_free, kmem_mm_page_free,
275 #else
276 --
277 2.19.1
278