]>
Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
0515e599 | 9 | #include <linux/bpf_perf_event.h> |
2541517c AS |
10 | #include <linux/filter.h> |
11 | #include <linux/uaccess.h> | |
9c959c86 | 12 | #include <linux/ctype.h> |
9802d865 | 13 | #include <linux/kprobes.h> |
41bdc4b4 | 14 | #include <linux/syscalls.h> |
540adea3 | 15 | #include <linux/error-injection.h> |
9802d865 | 16 | |
c7b6f29b NA |
17 | #include <asm/tlb.h> |
18 | ||
9802d865 | 19 | #include "trace_probe.h" |
2541517c AS |
20 | #include "trace.h" |
21 | ||
e672db03 SF |
22 | #define bpf_event_rcu_dereference(p) \ |
23 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) | |
24 | ||
a38d1107 MM |
25 | #ifdef CONFIG_MODULES |
26 | struct bpf_trace_module { | |
27 | struct module *module; | |
28 | struct list_head list; | |
29 | }; | |
30 | ||
31 | static LIST_HEAD(bpf_trace_modules); | |
32 | static DEFINE_MUTEX(bpf_module_mutex); | |
33 | ||
34 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
35 | { | |
36 | struct bpf_raw_event_map *btp, *ret = NULL; | |
37 | struct bpf_trace_module *btm; | |
38 | unsigned int i; | |
39 | ||
40 | mutex_lock(&bpf_module_mutex); | |
41 | list_for_each_entry(btm, &bpf_trace_modules, list) { | |
42 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { | |
43 | btp = &btm->module->bpf_raw_events[i]; | |
44 | if (!strcmp(btp->tp->name, name)) { | |
45 | if (try_module_get(btm->module)) | |
46 | ret = btp; | |
47 | goto out; | |
48 | } | |
49 | } | |
50 | } | |
51 | out: | |
52 | mutex_unlock(&bpf_module_mutex); | |
53 | return ret; | |
54 | } | |
55 | #else | |
56 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
57 | { | |
58 | return NULL; | |
59 | } | |
60 | #endif /* CONFIG_MODULES */ | |
61 | ||
035226b9 | 62 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 63 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 64 | |
2541517c AS |
65 | /** |
66 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 67 | * @call: tracepoint event |
2541517c AS |
68 | * @ctx: opaque context pointer |
69 | * | |
70 | * kprobe handlers execute BPF programs via this helper. | |
71 | * Can be used from static tracepoints in the future. | |
72 | * | |
73 | * Return: BPF programs always return an integer which is interpreted by | |
74 | * kprobe handler as: | |
75 | * 0 - return from kprobe (event is filtered out) | |
76 | * 1 - store kprobe event into ring buffer | |
77 | * Other values are reserved and currently alias to 1 | |
78 | */ | |
e87c6bc3 | 79 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
80 | { |
81 | unsigned int ret; | |
82 | ||
83 | if (in_nmi()) /* not supported yet */ | |
84 | return 1; | |
85 | ||
b0a81b94 | 86 | cant_sleep(); |
2541517c AS |
87 | |
88 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
89 | /* | |
90 | * since some bpf program is already running on this cpu, | |
91 | * don't call into another bpf program (same or different) | |
92 | * and don't send kprobe event into ring-buffer, | |
93 | * so return zero here | |
94 | */ | |
95 | ret = 0; | |
96 | goto out; | |
97 | } | |
98 | ||
e87c6bc3 YS |
99 | /* |
100 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
101 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
102 | * whether call->prog_array is empty or not, which is | |
103 | * a heurisitc to speed up execution. | |
104 | * | |
105 | * If bpf_prog_array_valid() fetched prog_array was | |
106 | * non-NULL, we go into trace_call_bpf() and do the actual | |
107 | * proper rcu_dereference() under RCU lock. | |
108 | * If it turns out that prog_array is NULL then, we bail out. | |
109 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
110 | * was NULL, you'll skip the prog_array with the risk of missing | |
111 | * out of events when it was updated in between this and the | |
112 | * rcu_dereference() which is accepted risk. | |
113 | */ | |
114 | ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); | |
2541517c AS |
115 | |
116 | out: | |
117 | __this_cpu_dec(bpf_prog_active); | |
2541517c AS |
118 | |
119 | return ret; | |
120 | } | |
2541517c | 121 | |
9802d865 JB |
122 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
123 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
124 | { | |
9802d865 | 125 | regs_set_return_value(regs, rc); |
540adea3 | 126 | override_function_with_return(regs); |
9802d865 JB |
127 | return 0; |
128 | } | |
129 | ||
130 | static const struct bpf_func_proto bpf_override_return_proto = { | |
131 | .func = bpf_override_return, | |
132 | .gpl_only = true, | |
133 | .ret_type = RET_INTEGER, | |
134 | .arg1_type = ARG_PTR_TO_CTX, | |
135 | .arg2_type = ARG_ANYTHING, | |
136 | }; | |
137 | #endif | |
138 | ||
6ae08ae3 DB |
139 | BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, |
140 | const void __user *, unsafe_ptr) | |
2541517c | 141 | { |
6ae08ae3 | 142 | int ret = probe_user_read(dst, unsafe_ptr, size); |
2541517c | 143 | |
6ae08ae3 DB |
144 | if (unlikely(ret < 0)) |
145 | memset(dst, 0, size); | |
146 | ||
147 | return ret; | |
148 | } | |
149 | ||
150 | static const struct bpf_func_proto bpf_probe_read_user_proto = { | |
151 | .func = bpf_probe_read_user, | |
152 | .gpl_only = true, | |
153 | .ret_type = RET_INTEGER, | |
154 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
155 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
156 | .arg3_type = ARG_ANYTHING, | |
157 | }; | |
158 | ||
159 | BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, | |
160 | const void __user *, unsafe_ptr) | |
161 | { | |
162 | int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size); | |
163 | ||
164 | if (unlikely(ret < 0)) | |
165 | memset(dst, 0, size); | |
166 | ||
167 | return ret; | |
168 | } | |
169 | ||
170 | static const struct bpf_func_proto bpf_probe_read_user_str_proto = { | |
171 | .func = bpf_probe_read_user_str, | |
172 | .gpl_only = true, | |
173 | .ret_type = RET_INTEGER, | |
174 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
175 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
176 | .arg3_type = ARG_ANYTHING, | |
177 | }; | |
178 | ||
179 | static __always_inline int | |
180 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr, | |
181 | const bool compat) | |
182 | { | |
183 | int ret = security_locked_down(LOCKDOWN_BPF_READ); | |
9d1f8be5 | 184 | |
6ae08ae3 DB |
185 | if (unlikely(ret < 0)) |
186 | goto out; | |
187 | ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) : | |
188 | probe_kernel_read_strict(dst, unsafe_ptr, size); | |
074f528e | 189 | if (unlikely(ret < 0)) |
9d1f8be5 | 190 | out: |
074f528e | 191 | memset(dst, 0, size); |
6ae08ae3 DB |
192 | return ret; |
193 | } | |
074f528e | 194 | |
6ae08ae3 DB |
195 | BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, |
196 | const void *, unsafe_ptr) | |
197 | { | |
198 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false); | |
199 | } | |
200 | ||
201 | static const struct bpf_func_proto bpf_probe_read_kernel_proto = { | |
202 | .func = bpf_probe_read_kernel, | |
203 | .gpl_only = true, | |
204 | .ret_type = RET_INTEGER, | |
205 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
206 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
207 | .arg3_type = ARG_ANYTHING, | |
208 | }; | |
209 | ||
210 | BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, | |
211 | const void *, unsafe_ptr) | |
212 | { | |
213 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true); | |
214 | } | |
215 | ||
216 | static const struct bpf_func_proto bpf_probe_read_compat_proto = { | |
217 | .func = bpf_probe_read_compat, | |
218 | .gpl_only = true, | |
219 | .ret_type = RET_INTEGER, | |
220 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
221 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
222 | .arg3_type = ARG_ANYTHING, | |
223 | }; | |
224 | ||
225 | static __always_inline int | |
226 | bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr, | |
227 | const bool compat) | |
228 | { | |
229 | int ret = security_locked_down(LOCKDOWN_BPF_READ); | |
230 | ||
231 | if (unlikely(ret < 0)) | |
232 | goto out; | |
233 | /* | |
234 | * The strncpy_from_unsafe_*() call will likely not fill the entire | |
235 | * buffer, but that's okay in this circumstance as we're probing | |
236 | * arbitrary memory anyway similar to bpf_probe_read_*() and might | |
237 | * as well probe the stack. Thus, memory is explicitly cleared | |
238 | * only in error case, so that improper users ignoring return | |
239 | * code altogether don't copy garbage; otherwise length of string | |
240 | * is returned that can be used for bpf_perf_event_output() et al. | |
241 | */ | |
242 | ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) : | |
243 | strncpy_from_unsafe_strict(dst, unsafe_ptr, size); | |
244 | if (unlikely(ret < 0)) | |
245 | out: | |
246 | memset(dst, 0, size); | |
074f528e | 247 | return ret; |
2541517c AS |
248 | } |
249 | ||
6ae08ae3 DB |
250 | BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, |
251 | const void *, unsafe_ptr) | |
252 | { | |
253 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false); | |
254 | } | |
255 | ||
256 | static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { | |
257 | .func = bpf_probe_read_kernel_str, | |
258 | .gpl_only = true, | |
259 | .ret_type = RET_INTEGER, | |
260 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
261 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
262 | .arg3_type = ARG_ANYTHING, | |
263 | }; | |
264 | ||
265 | BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, | |
266 | const void *, unsafe_ptr) | |
267 | { | |
268 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true); | |
269 | } | |
270 | ||
271 | static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { | |
272 | .func = bpf_probe_read_compat_str, | |
2541517c AS |
273 | .gpl_only = true, |
274 | .ret_type = RET_INTEGER, | |
39f19ebb | 275 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 276 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
277 | .arg3_type = ARG_ANYTHING, |
278 | }; | |
279 | ||
eb1b6688 | 280 | BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
f3694e00 | 281 | u32, size) |
96ae5227 | 282 | { |
96ae5227 SD |
283 | /* |
284 | * Ensure we're in user context which is safe for the helper to | |
285 | * run. This helper has no business in a kthread. | |
286 | * | |
287 | * access_ok() should prevent writing to non-user memory, but in | |
288 | * some situations (nommu, temporary switch, etc) access_ok() does | |
289 | * not provide enough validation, hence the check on KERNEL_DS. | |
c7b6f29b NA |
290 | * |
291 | * nmi_uaccess_okay() ensures the probe is not run in an interim | |
292 | * state, when the task or mm are switched. This is specifically | |
293 | * required to prevent the use of temporary mm. | |
96ae5227 SD |
294 | */ |
295 | ||
296 | if (unlikely(in_interrupt() || | |
297 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
298 | return -EPERM; | |
db68ce10 | 299 | if (unlikely(uaccess_kernel())) |
96ae5227 | 300 | return -EPERM; |
c7b6f29b NA |
301 | if (unlikely(!nmi_uaccess_okay())) |
302 | return -EPERM; | |
96ae5227 | 303 | |
eb1b6688 | 304 | return probe_user_write(unsafe_ptr, src, size); |
96ae5227 SD |
305 | } |
306 | ||
307 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
308 | .func = bpf_probe_write_user, | |
309 | .gpl_only = true, | |
310 | .ret_type = RET_INTEGER, | |
311 | .arg1_type = ARG_ANYTHING, | |
39f19ebb AS |
312 | .arg2_type = ARG_PTR_TO_MEM, |
313 | .arg3_type = ARG_CONST_SIZE, | |
96ae5227 SD |
314 | }; |
315 | ||
316 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
317 | { | |
318 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", | |
319 | current->comm, task_pid_nr(current)); | |
320 | ||
321 | return &bpf_probe_write_user_proto; | |
322 | } | |
323 | ||
9c959c86 | 324 | /* |
7bda4b40 | 325 | * Only limited trace_printk() conversion specifiers allowed: |
b2a5212f | 326 | * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s |
9c959c86 | 327 | */ |
f3694e00 DB |
328 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
329 | u64, arg2, u64, arg3) | |
9c959c86 | 330 | { |
b2a5212f DB |
331 | int i, mod[3] = {}, fmt_cnt = 0; |
332 | char buf[64], fmt_ptype; | |
333 | void *unsafe_ptr = NULL; | |
8d3b7dce | 334 | bool str_seen = false; |
9c959c86 AS |
335 | |
336 | /* | |
337 | * bpf_check()->check_func_arg()->check_stack_boundary() | |
338 | * guarantees that fmt points to bpf program stack, | |
339 | * fmt_size bytes of it were initialized and fmt_size > 0 | |
340 | */ | |
341 | if (fmt[--fmt_size] != 0) | |
342 | return -EINVAL; | |
343 | ||
344 | /* check format string for allowed specifiers */ | |
345 | for (i = 0; i < fmt_size; i++) { | |
346 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | |
347 | return -EINVAL; | |
348 | ||
349 | if (fmt[i] != '%') | |
350 | continue; | |
351 | ||
352 | if (fmt_cnt >= 3) | |
353 | return -EINVAL; | |
354 | ||
355 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | |
356 | i++; | |
357 | if (fmt[i] == 'l') { | |
358 | mod[fmt_cnt]++; | |
359 | i++; | |
b2a5212f | 360 | } else if (fmt[i] == 'p') { |
9c959c86 | 361 | mod[fmt_cnt]++; |
b2a5212f DB |
362 | if ((fmt[i + 1] == 'k' || |
363 | fmt[i + 1] == 'u') && | |
364 | fmt[i + 2] == 's') { | |
365 | fmt_ptype = fmt[i + 1]; | |
366 | i += 2; | |
367 | goto fmt_str; | |
368 | } | |
369 | ||
1efb6ee3 MP |
370 | /* disallow any further format extensions */ |
371 | if (fmt[i + 1] != 0 && | |
372 | !isspace(fmt[i + 1]) && | |
373 | !ispunct(fmt[i + 1])) | |
9c959c86 | 374 | return -EINVAL; |
b2a5212f DB |
375 | |
376 | goto fmt_next; | |
377 | } else if (fmt[i] == 's') { | |
378 | mod[fmt_cnt]++; | |
379 | fmt_ptype = fmt[i]; | |
380 | fmt_str: | |
381 | if (str_seen) | |
382 | /* allow only one '%s' per fmt string */ | |
383 | return -EINVAL; | |
384 | str_seen = true; | |
385 | ||
386 | if (fmt[i + 1] != 0 && | |
387 | !isspace(fmt[i + 1]) && | |
388 | !ispunct(fmt[i + 1])) | |
389 | return -EINVAL; | |
390 | ||
391 | switch (fmt_cnt) { | |
392 | case 0: | |
393 | unsafe_ptr = (void *)(long)arg1; | |
394 | arg1 = (long)buf; | |
395 | break; | |
396 | case 1: | |
397 | unsafe_ptr = (void *)(long)arg2; | |
398 | arg2 = (long)buf; | |
399 | break; | |
400 | case 2: | |
401 | unsafe_ptr = (void *)(long)arg3; | |
402 | arg3 = (long)buf; | |
403 | break; | |
404 | } | |
405 | ||
406 | buf[0] = 0; | |
407 | switch (fmt_ptype) { | |
408 | case 's': | |
409 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE | |
410 | strncpy_from_unsafe(buf, unsafe_ptr, | |
8d3b7dce | 411 | sizeof(buf)); |
b2a5212f DB |
412 | break; |
413 | #endif | |
414 | case 'k': | |
415 | strncpy_from_unsafe_strict(buf, unsafe_ptr, | |
416 | sizeof(buf)); | |
417 | break; | |
418 | case 'u': | |
419 | strncpy_from_unsafe_user(buf, | |
420 | (__force void __user *)unsafe_ptr, | |
421 | sizeof(buf)); | |
422 | break; | |
8d3b7dce | 423 | } |
b2a5212f | 424 | goto fmt_next; |
9c959c86 AS |
425 | } |
426 | ||
427 | if (fmt[i] == 'l') { | |
428 | mod[fmt_cnt]++; | |
429 | i++; | |
430 | } | |
431 | ||
7bda4b40 JF |
432 | if (fmt[i] != 'i' && fmt[i] != 'd' && |
433 | fmt[i] != 'u' && fmt[i] != 'x') | |
9c959c86 | 434 | return -EINVAL; |
b2a5212f | 435 | fmt_next: |
9c959c86 AS |
436 | fmt_cnt++; |
437 | } | |
438 | ||
88a5c690 DB |
439 | /* Horrid workaround for getting va_list handling working with different |
440 | * argument type combinations generically for 32 and 64 bit archs. | |
441 | */ | |
442 | #define __BPF_TP_EMIT() __BPF_ARG3_TP() | |
443 | #define __BPF_TP(...) \ | |
eefa864a | 444 | __trace_printk(0 /* Fake ip */, \ |
88a5c690 DB |
445 | fmt, ##__VA_ARGS__) |
446 | ||
447 | #define __BPF_ARG1_TP(...) \ | |
448 | ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ | |
449 | ? __BPF_TP(arg1, ##__VA_ARGS__) \ | |
450 | : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ | |
451 | ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ | |
452 | : __BPF_TP((u32)arg1, ##__VA_ARGS__))) | |
453 | ||
454 | #define __BPF_ARG2_TP(...) \ | |
455 | ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ | |
456 | ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ | |
457 | : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ | |
458 | ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ | |
459 | : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) | |
460 | ||
461 | #define __BPF_ARG3_TP(...) \ | |
462 | ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ | |
463 | ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ | |
464 | : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ | |
465 | ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ | |
466 | : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) | |
467 | ||
468 | return __BPF_TP_EMIT(); | |
9c959c86 AS |
469 | } |
470 | ||
471 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
472 | .func = bpf_trace_printk, | |
473 | .gpl_only = true, | |
474 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
475 | .arg1_type = ARG_PTR_TO_MEM, |
476 | .arg2_type = ARG_CONST_SIZE, | |
9c959c86 AS |
477 | }; |
478 | ||
0756ea3e AS |
479 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
480 | { | |
481 | /* | |
482 | * this program might be calling bpf_trace_printk, | |
483 | * so allocate per-cpu printk buffers | |
484 | */ | |
485 | trace_printk_init_buffers(); | |
486 | ||
487 | return &bpf_trace_printk_proto; | |
488 | } | |
489 | ||
908432ca YS |
490 | static __always_inline int |
491 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
492 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 493 | { |
35578d79 | 494 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
495 | unsigned int cpu = smp_processor_id(); |
496 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 497 | struct bpf_event_entry *ee; |
35578d79 | 498 | |
6816a7ff DB |
499 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
500 | return -EINVAL; | |
501 | if (index == BPF_F_CURRENT_CPU) | |
502 | index = cpu; | |
35578d79 KX |
503 | if (unlikely(index >= array->map.max_entries)) |
504 | return -E2BIG; | |
505 | ||
3b1efb19 | 506 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 507 | if (!ee) |
35578d79 KX |
508 | return -ENOENT; |
509 | ||
908432ca YS |
510 | return perf_event_read_local(ee->event, value, enabled, running); |
511 | } | |
512 | ||
513 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
514 | { | |
515 | u64 value = 0; | |
516 | int err; | |
517 | ||
518 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 519 | /* |
f91840a3 AS |
520 | * this api is ugly since we miss [-22..-2] range of valid |
521 | * counter values, but that's uapi | |
35578d79 | 522 | */ |
f91840a3 AS |
523 | if (err) |
524 | return err; | |
525 | return value; | |
35578d79 KX |
526 | } |
527 | ||
62544ce8 | 528 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 529 | .func = bpf_perf_event_read, |
1075ef59 | 530 | .gpl_only = true, |
35578d79 KX |
531 | .ret_type = RET_INTEGER, |
532 | .arg1_type = ARG_CONST_MAP_PTR, | |
533 | .arg2_type = ARG_ANYTHING, | |
534 | }; | |
535 | ||
908432ca YS |
536 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
537 | struct bpf_perf_event_value *, buf, u32, size) | |
538 | { | |
539 | int err = -EINVAL; | |
540 | ||
541 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
542 | goto clear; | |
543 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
544 | &buf->running); | |
545 | if (unlikely(err)) | |
546 | goto clear; | |
547 | return 0; | |
548 | clear: | |
549 | memset(buf, 0, size); | |
550 | return err; | |
551 | } | |
552 | ||
553 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
554 | .func = bpf_perf_event_read_value, | |
555 | .gpl_only = true, | |
556 | .ret_type = RET_INTEGER, | |
557 | .arg1_type = ARG_CONST_MAP_PTR, | |
558 | .arg2_type = ARG_ANYTHING, | |
559 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
560 | .arg4_type = ARG_CONST_SIZE, | |
561 | }; | |
562 | ||
8e7a3920 DB |
563 | static __always_inline u64 |
564 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 565 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 566 | { |
a43eec30 | 567 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 568 | unsigned int cpu = smp_processor_id(); |
1e33759c | 569 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 570 | struct bpf_event_entry *ee; |
a43eec30 | 571 | struct perf_event *event; |
a43eec30 | 572 | |
1e33759c | 573 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 574 | index = cpu; |
a43eec30 AS |
575 | if (unlikely(index >= array->map.max_entries)) |
576 | return -E2BIG; | |
577 | ||
3b1efb19 | 578 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 579 | if (!ee) |
a43eec30 AS |
580 | return -ENOENT; |
581 | ||
3b1efb19 | 582 | event = ee->event; |
a43eec30 AS |
583 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
584 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
585 | return -EINVAL; | |
586 | ||
d7931330 | 587 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
588 | return -EOPNOTSUPP; |
589 | ||
56201969 | 590 | return perf_event_output(event, sd, regs); |
a43eec30 AS |
591 | } |
592 | ||
9594dc3c MM |
593 | /* |
594 | * Support executing tracepoints in normal, irq, and nmi context that each call | |
595 | * bpf_perf_event_output | |
596 | */ | |
597 | struct bpf_trace_sample_data { | |
598 | struct perf_sample_data sds[3]; | |
599 | }; | |
600 | ||
601 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); | |
602 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); | |
f3694e00 DB |
603 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
604 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 605 | { |
9594dc3c MM |
606 | struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); |
607 | int nest_level = this_cpu_inc_return(bpf_trace_nest_level); | |
8e7a3920 DB |
608 | struct perf_raw_record raw = { |
609 | .frag = { | |
610 | .size = size, | |
611 | .data = data, | |
612 | }, | |
613 | }; | |
9594dc3c MM |
614 | struct perf_sample_data *sd; |
615 | int err; | |
8e7a3920 | 616 | |
9594dc3c MM |
617 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
618 | err = -EBUSY; | |
619 | goto out; | |
620 | } | |
621 | ||
622 | sd = &sds->sds[nest_level - 1]; | |
623 | ||
624 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { | |
625 | err = -EINVAL; | |
626 | goto out; | |
627 | } | |
8e7a3920 | 628 | |
283ca526 DB |
629 | perf_sample_data_init(sd, 0, 0); |
630 | sd->raw = &raw; | |
631 | ||
9594dc3c MM |
632 | err = __bpf_perf_event_output(regs, map, flags, sd); |
633 | ||
634 | out: | |
635 | this_cpu_dec(bpf_trace_nest_level); | |
636 | return err; | |
8e7a3920 DB |
637 | } |
638 | ||
a43eec30 AS |
639 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
640 | .func = bpf_perf_event_output, | |
1075ef59 | 641 | .gpl_only = true, |
a43eec30 AS |
642 | .ret_type = RET_INTEGER, |
643 | .arg1_type = ARG_PTR_TO_CTX, | |
644 | .arg2_type = ARG_CONST_MAP_PTR, | |
645 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 646 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 647 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
648 | }; |
649 | ||
768fb61f AZ |
650 | static DEFINE_PER_CPU(int, bpf_event_output_nest_level); |
651 | struct bpf_nested_pt_regs { | |
652 | struct pt_regs regs[3]; | |
653 | }; | |
654 | static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); | |
655 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); | |
bd570ff9 | 656 | |
555c8a86 DB |
657 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
658 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 659 | { |
768fb61f | 660 | int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
555c8a86 DB |
661 | struct perf_raw_frag frag = { |
662 | .copy = ctx_copy, | |
663 | .size = ctx_size, | |
664 | .data = ctx, | |
665 | }; | |
666 | struct perf_raw_record raw = { | |
667 | .frag = { | |
183fc153 AM |
668 | { |
669 | .next = ctx_size ? &frag : NULL, | |
670 | }, | |
555c8a86 DB |
671 | .size = meta_size, |
672 | .data = meta, | |
673 | }, | |
674 | }; | |
768fb61f AZ |
675 | struct perf_sample_data *sd; |
676 | struct pt_regs *regs; | |
677 | u64 ret; | |
678 | ||
679 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { | |
680 | ret = -EBUSY; | |
681 | goto out; | |
682 | } | |
683 | sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); | |
684 | regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); | |
bd570ff9 DB |
685 | |
686 | perf_fetch_caller_regs(regs); | |
283ca526 DB |
687 | perf_sample_data_init(sd, 0, 0); |
688 | sd->raw = &raw; | |
bd570ff9 | 689 | |
768fb61f AZ |
690 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
691 | out: | |
692 | this_cpu_dec(bpf_event_output_nest_level); | |
693 | return ret; | |
bd570ff9 DB |
694 | } |
695 | ||
f3694e00 | 696 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
697 | { |
698 | return (long) current; | |
699 | } | |
700 | ||
701 | static const struct bpf_func_proto bpf_get_current_task_proto = { | |
702 | .func = bpf_get_current_task, | |
703 | .gpl_only = true, | |
704 | .ret_type = RET_INTEGER, | |
705 | }; | |
706 | ||
f3694e00 | 707 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 708 | { |
60d20f91 SD |
709 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
710 | struct cgroup *cgrp; | |
60d20f91 | 711 | |
60d20f91 SD |
712 | if (unlikely(idx >= array->map.max_entries)) |
713 | return -E2BIG; | |
714 | ||
715 | cgrp = READ_ONCE(array->ptrs[idx]); | |
716 | if (unlikely(!cgrp)) | |
717 | return -EAGAIN; | |
718 | ||
719 | return task_under_cgroup_hierarchy(current, cgrp); | |
720 | } | |
721 | ||
722 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
723 | .func = bpf_current_task_under_cgroup, | |
724 | .gpl_only = false, | |
725 | .ret_type = RET_INTEGER, | |
726 | .arg1_type = ARG_CONST_MAP_PTR, | |
727 | .arg2_type = ARG_ANYTHING, | |
728 | }; | |
729 | ||
8b401f9e YS |
730 | struct send_signal_irq_work { |
731 | struct irq_work irq_work; | |
732 | struct task_struct *task; | |
733 | u32 sig; | |
8482941f | 734 | enum pid_type type; |
8b401f9e YS |
735 | }; |
736 | ||
737 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); | |
738 | ||
739 | static void do_bpf_send_signal(struct irq_work *entry) | |
740 | { | |
741 | struct send_signal_irq_work *work; | |
742 | ||
743 | work = container_of(entry, struct send_signal_irq_work, irq_work); | |
8482941f | 744 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); |
8b401f9e YS |
745 | } |
746 | ||
8482941f | 747 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
8b401f9e YS |
748 | { |
749 | struct send_signal_irq_work *work = NULL; | |
750 | ||
751 | /* Similar to bpf_probe_write_user, task needs to be | |
752 | * in a sound condition and kernel memory access be | |
753 | * permitted in order to send signal to the current | |
754 | * task. | |
755 | */ | |
756 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) | |
757 | return -EPERM; | |
758 | if (unlikely(uaccess_kernel())) | |
759 | return -EPERM; | |
760 | if (unlikely(!nmi_uaccess_okay())) | |
761 | return -EPERM; | |
762 | ||
1bc7896e | 763 | if (irqs_disabled()) { |
e1afb702 YS |
764 | /* Do an early check on signal validity. Otherwise, |
765 | * the error is lost in deferred irq_work. | |
766 | */ | |
767 | if (unlikely(!valid_signal(sig))) | |
768 | return -EINVAL; | |
769 | ||
8b401f9e | 770 | work = this_cpu_ptr(&send_signal_work); |
153bedba | 771 | if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) |
8b401f9e YS |
772 | return -EBUSY; |
773 | ||
774 | /* Add the current task, which is the target of sending signal, | |
775 | * to the irq_work. The current task may change when queued | |
776 | * irq works get executed. | |
777 | */ | |
778 | work->task = current; | |
779 | work->sig = sig; | |
8482941f | 780 | work->type = type; |
8b401f9e YS |
781 | irq_work_queue(&work->irq_work); |
782 | return 0; | |
783 | } | |
784 | ||
8482941f YS |
785 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); |
786 | } | |
787 | ||
788 | BPF_CALL_1(bpf_send_signal, u32, sig) | |
789 | { | |
790 | return bpf_send_signal_common(sig, PIDTYPE_TGID); | |
8b401f9e YS |
791 | } |
792 | ||
793 | static const struct bpf_func_proto bpf_send_signal_proto = { | |
794 | .func = bpf_send_signal, | |
795 | .gpl_only = false, | |
796 | .ret_type = RET_INTEGER, | |
797 | .arg1_type = ARG_ANYTHING, | |
798 | }; | |
799 | ||
8482941f YS |
800 | BPF_CALL_1(bpf_send_signal_thread, u32, sig) |
801 | { | |
802 | return bpf_send_signal_common(sig, PIDTYPE_PID); | |
803 | } | |
804 | ||
805 | static const struct bpf_func_proto bpf_send_signal_thread_proto = { | |
806 | .func = bpf_send_signal_thread, | |
807 | .gpl_only = false, | |
808 | .ret_type = RET_INTEGER, | |
809 | .arg1_type = ARG_ANYTHING, | |
810 | }; | |
811 | ||
fc611f47 KS |
812 | const struct bpf_func_proto * |
813 | bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
2541517c AS |
814 | { |
815 | switch (func_id) { | |
816 | case BPF_FUNC_map_lookup_elem: | |
817 | return &bpf_map_lookup_elem_proto; | |
818 | case BPF_FUNC_map_update_elem: | |
819 | return &bpf_map_update_elem_proto; | |
820 | case BPF_FUNC_map_delete_elem: | |
821 | return &bpf_map_delete_elem_proto; | |
02a8c817 AC |
822 | case BPF_FUNC_map_push_elem: |
823 | return &bpf_map_push_elem_proto; | |
824 | case BPF_FUNC_map_pop_elem: | |
825 | return &bpf_map_pop_elem_proto; | |
826 | case BPF_FUNC_map_peek_elem: | |
827 | return &bpf_map_peek_elem_proto; | |
d9847d31 AS |
828 | case BPF_FUNC_ktime_get_ns: |
829 | return &bpf_ktime_get_ns_proto; | |
04fd61ab AS |
830 | case BPF_FUNC_tail_call: |
831 | return &bpf_tail_call_proto; | |
ffeedafb AS |
832 | case BPF_FUNC_get_current_pid_tgid: |
833 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
834 | case BPF_FUNC_get_current_task: |
835 | return &bpf_get_current_task_proto; | |
ffeedafb AS |
836 | case BPF_FUNC_get_current_uid_gid: |
837 | return &bpf_get_current_uid_gid_proto; | |
838 | case BPF_FUNC_get_current_comm: | |
839 | return &bpf_get_current_comm_proto; | |
9c959c86 | 840 | case BPF_FUNC_trace_printk: |
0756ea3e | 841 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
842 | case BPF_FUNC_get_smp_processor_id: |
843 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
844 | case BPF_FUNC_get_numa_node_id: |
845 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
846 | case BPF_FUNC_perf_event_read: |
847 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
848 | case BPF_FUNC_probe_write_user: |
849 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
850 | case BPF_FUNC_current_task_under_cgroup: |
851 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
852 | case BPF_FUNC_get_prandom_u32: |
853 | return &bpf_get_prandom_u32_proto; | |
6ae08ae3 DB |
854 | case BPF_FUNC_probe_read_user: |
855 | return &bpf_probe_read_user_proto; | |
856 | case BPF_FUNC_probe_read_kernel: | |
857 | return &bpf_probe_read_kernel_proto; | |
6ae08ae3 DB |
858 | case BPF_FUNC_probe_read_user_str: |
859 | return &bpf_probe_read_user_str_proto; | |
860 | case BPF_FUNC_probe_read_kernel_str: | |
861 | return &bpf_probe_read_kernel_str_proto; | |
0ebeea8c DB |
862 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
863 | case BPF_FUNC_probe_read: | |
864 | return &bpf_probe_read_compat_proto; | |
a5e8c070 | 865 | case BPF_FUNC_probe_read_str: |
6ae08ae3 | 866 | return &bpf_probe_read_compat_str_proto; |
0ebeea8c | 867 | #endif |
34ea38ca | 868 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
869 | case BPF_FUNC_get_current_cgroup_id: |
870 | return &bpf_get_current_cgroup_id_proto; | |
34ea38ca | 871 | #endif |
8b401f9e YS |
872 | case BPF_FUNC_send_signal: |
873 | return &bpf_send_signal_proto; | |
8482941f YS |
874 | case BPF_FUNC_send_signal_thread: |
875 | return &bpf_send_signal_thread_proto; | |
b80b033b SL |
876 | case BPF_FUNC_perf_event_read_value: |
877 | return &bpf_perf_event_read_value_proto; | |
b4490c5c CN |
878 | case BPF_FUNC_get_ns_current_pid_tgid: |
879 | return &bpf_get_ns_current_pid_tgid_proto; | |
9fd82b61 AS |
880 | default: |
881 | return NULL; | |
882 | } | |
883 | } | |
884 | ||
5e43f899 AI |
885 | static const struct bpf_func_proto * |
886 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
887 | { |
888 | switch (func_id) { | |
a43eec30 AS |
889 | case BPF_FUNC_perf_event_output: |
890 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
891 | case BPF_FUNC_get_stackid: |
892 | return &bpf_get_stackid_proto; | |
c195651e YS |
893 | case BPF_FUNC_get_stack: |
894 | return &bpf_get_stack_proto; | |
9802d865 JB |
895 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
896 | case BPF_FUNC_override_return: | |
897 | return &bpf_override_return_proto; | |
898 | #endif | |
2541517c | 899 | default: |
fc611f47 | 900 | return bpf_tracing_func_proto(func_id, prog); |
2541517c AS |
901 | } |
902 | } | |
903 | ||
904 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 905 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 906 | const struct bpf_prog *prog, |
23994631 | 907 | struct bpf_insn_access_aux *info) |
2541517c | 908 | { |
2541517c AS |
909 | if (off < 0 || off >= sizeof(struct pt_regs)) |
910 | return false; | |
2541517c AS |
911 | if (type != BPF_READ) |
912 | return false; | |
2541517c AS |
913 | if (off % size != 0) |
914 | return false; | |
2d071c64 DB |
915 | /* |
916 | * Assertion for 32 bit to make sure last 8 byte access | |
917 | * (BPF_DW) to the last 4 byte member is disallowed. | |
918 | */ | |
919 | if (off + size > sizeof(struct pt_regs)) | |
920 | return false; | |
921 | ||
2541517c AS |
922 | return true; |
923 | } | |
924 | ||
7de16e3a | 925 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
926 | .get_func_proto = kprobe_prog_func_proto, |
927 | .is_valid_access = kprobe_prog_is_valid_access, | |
928 | }; | |
929 | ||
7de16e3a JK |
930 | const struct bpf_prog_ops kprobe_prog_ops = { |
931 | }; | |
932 | ||
f3694e00 DB |
933 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
934 | u64, flags, void *, data, u64, size) | |
9940d67c | 935 | { |
f3694e00 DB |
936 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
937 | ||
9940d67c AS |
938 | /* |
939 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
940 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 941 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 942 | */ |
f3694e00 | 943 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
944 | } |
945 | ||
946 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
947 | .func = bpf_perf_event_output_tp, | |
948 | .gpl_only = true, | |
949 | .ret_type = RET_INTEGER, | |
950 | .arg1_type = ARG_PTR_TO_CTX, | |
951 | .arg2_type = ARG_CONST_MAP_PTR, | |
952 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 953 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 954 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
955 | }; |
956 | ||
f3694e00 DB |
957 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
958 | u64, flags) | |
9940d67c | 959 | { |
f3694e00 | 960 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 961 | |
f3694e00 DB |
962 | /* |
963 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
964 | * the other helper's function body cannot be inlined due to being | |
965 | * external, thus we need to call raw helper function. | |
966 | */ | |
967 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
968 | flags, 0, 0); | |
9940d67c AS |
969 | } |
970 | ||
971 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
972 | .func = bpf_get_stackid_tp, | |
973 | .gpl_only = true, | |
974 | .ret_type = RET_INTEGER, | |
975 | .arg1_type = ARG_PTR_TO_CTX, | |
976 | .arg2_type = ARG_CONST_MAP_PTR, | |
977 | .arg3_type = ARG_ANYTHING, | |
978 | }; | |
979 | ||
c195651e YS |
980 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
981 | u64, flags) | |
982 | { | |
983 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
984 | ||
985 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
986 | (unsigned long) size, flags, 0); | |
987 | } | |
988 | ||
989 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
990 | .func = bpf_get_stack_tp, | |
991 | .gpl_only = true, | |
992 | .ret_type = RET_INTEGER, | |
993 | .arg1_type = ARG_PTR_TO_CTX, | |
994 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
995 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
996 | .arg4_type = ARG_ANYTHING, | |
997 | }; | |
998 | ||
5e43f899 AI |
999 | static const struct bpf_func_proto * |
1000 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
1001 | { |
1002 | switch (func_id) { | |
1003 | case BPF_FUNC_perf_event_output: | |
1004 | return &bpf_perf_event_output_proto_tp; | |
1005 | case BPF_FUNC_get_stackid: | |
1006 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
1007 | case BPF_FUNC_get_stack: |
1008 | return &bpf_get_stack_proto_tp; | |
f005afed | 1009 | default: |
fc611f47 | 1010 | return bpf_tracing_func_proto(func_id, prog); |
f005afed YS |
1011 | } |
1012 | } | |
1013 | ||
1014 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 1015 | const struct bpf_prog *prog, |
f005afed YS |
1016 | struct bpf_insn_access_aux *info) |
1017 | { | |
1018 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
1019 | return false; | |
1020 | if (type != BPF_READ) | |
1021 | return false; | |
1022 | if (off % size != 0) | |
1023 | return false; | |
1024 | ||
1025 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
1026 | return true; | |
1027 | } | |
1028 | ||
1029 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
1030 | .get_func_proto = tp_prog_func_proto, | |
1031 | .is_valid_access = tp_prog_is_valid_access, | |
1032 | }; | |
1033 | ||
1034 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
1035 | }; | |
1036 | ||
1037 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
1038 | struct bpf_perf_event_value *, buf, u32, size) |
1039 | { | |
1040 | int err = -EINVAL; | |
1041 | ||
1042 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
1043 | goto clear; | |
1044 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
1045 | &buf->running); | |
1046 | if (unlikely(err)) | |
1047 | goto clear; | |
1048 | return 0; | |
1049 | clear: | |
1050 | memset(buf, 0, size); | |
1051 | return err; | |
1052 | } | |
1053 | ||
f005afed YS |
1054 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
1055 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
1056 | .gpl_only = true, |
1057 | .ret_type = RET_INTEGER, | |
1058 | .arg1_type = ARG_PTR_TO_CTX, | |
1059 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1060 | .arg3_type = ARG_CONST_SIZE, | |
1061 | }; | |
1062 | ||
fff7b643 DX |
1063 | BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, |
1064 | void *, buf, u32, size, u64, flags) | |
1065 | { | |
1066 | #ifndef CONFIG_X86 | |
1067 | return -ENOENT; | |
1068 | #else | |
1069 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); | |
1070 | struct perf_branch_stack *br_stack = ctx->data->br_stack; | |
1071 | u32 to_copy; | |
1072 | ||
1073 | if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) | |
1074 | return -EINVAL; | |
1075 | ||
1076 | if (unlikely(!br_stack)) | |
1077 | return -EINVAL; | |
1078 | ||
1079 | if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) | |
1080 | return br_stack->nr * br_entry_size; | |
1081 | ||
1082 | if (!buf || (size % br_entry_size != 0)) | |
1083 | return -EINVAL; | |
1084 | ||
1085 | to_copy = min_t(u32, br_stack->nr * br_entry_size, size); | |
1086 | memcpy(buf, br_stack->entries, to_copy); | |
1087 | ||
1088 | return to_copy; | |
1089 | #endif | |
1090 | } | |
1091 | ||
1092 | static const struct bpf_func_proto bpf_read_branch_records_proto = { | |
1093 | .func = bpf_read_branch_records, | |
1094 | .gpl_only = true, | |
1095 | .ret_type = RET_INTEGER, | |
1096 | .arg1_type = ARG_PTR_TO_CTX, | |
1097 | .arg2_type = ARG_PTR_TO_MEM_OR_NULL, | |
1098 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1099 | .arg4_type = ARG_ANYTHING, | |
1100 | }; | |
1101 | ||
5e43f899 AI |
1102 | static const struct bpf_func_proto * |
1103 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1104 | { |
1105 | switch (func_id) { | |
1106 | case BPF_FUNC_perf_event_output: | |
9940d67c | 1107 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 1108 | case BPF_FUNC_get_stackid: |
9940d67c | 1109 | return &bpf_get_stackid_proto_tp; |
c195651e YS |
1110 | case BPF_FUNC_get_stack: |
1111 | return &bpf_get_stack_proto_tp; | |
4bebdc7a | 1112 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 1113 | return &bpf_perf_prog_read_value_proto; |
fff7b643 DX |
1114 | case BPF_FUNC_read_branch_records: |
1115 | return &bpf_read_branch_records_proto; | |
9fd82b61 | 1116 | default: |
fc611f47 | 1117 | return bpf_tracing_func_proto(func_id, prog); |
9fd82b61 AS |
1118 | } |
1119 | } | |
1120 | ||
c4f6699d AS |
1121 | /* |
1122 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
1123 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
9594dc3c MM |
1124 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
1125 | * | |
1126 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage | |
1127 | * in normal, irq, and nmi context. | |
c4f6699d | 1128 | */ |
9594dc3c MM |
1129 | struct bpf_raw_tp_regs { |
1130 | struct pt_regs regs[3]; | |
1131 | }; | |
1132 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); | |
1133 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); | |
1134 | static struct pt_regs *get_bpf_raw_tp_regs(void) | |
1135 | { | |
1136 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
1137 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); | |
1138 | ||
1139 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { | |
1140 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1141 | return ERR_PTR(-EBUSY); | |
1142 | } | |
1143 | ||
1144 | return &tp_regs->regs[nest_level - 1]; | |
1145 | } | |
1146 | ||
1147 | static void put_bpf_raw_tp_regs(void) | |
1148 | { | |
1149 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1150 | } | |
1151 | ||
c4f6699d AS |
1152 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1153 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
1154 | { | |
9594dc3c MM |
1155 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1156 | int ret; | |
1157 | ||
1158 | if (IS_ERR(regs)) | |
1159 | return PTR_ERR(regs); | |
c4f6699d AS |
1160 | |
1161 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1162 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
1163 | ||
1164 | put_bpf_raw_tp_regs(); | |
1165 | return ret; | |
c4f6699d AS |
1166 | } |
1167 | ||
1168 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
1169 | .func = bpf_perf_event_output_raw_tp, | |
1170 | .gpl_only = true, | |
1171 | .ret_type = RET_INTEGER, | |
1172 | .arg1_type = ARG_PTR_TO_CTX, | |
1173 | .arg2_type = ARG_CONST_MAP_PTR, | |
1174 | .arg3_type = ARG_ANYTHING, | |
1175 | .arg4_type = ARG_PTR_TO_MEM, | |
1176 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
1177 | }; | |
1178 | ||
a7658e1a | 1179 | extern const struct bpf_func_proto bpf_skb_output_proto; |
d831ee84 | 1180 | extern const struct bpf_func_proto bpf_xdp_output_proto; |
a7658e1a | 1181 | |
c4f6699d AS |
1182 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1183 | struct bpf_map *, map, u64, flags) | |
1184 | { | |
9594dc3c MM |
1185 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1186 | int ret; | |
1187 | ||
1188 | if (IS_ERR(regs)) | |
1189 | return PTR_ERR(regs); | |
c4f6699d AS |
1190 | |
1191 | perf_fetch_caller_regs(regs); | |
1192 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
9594dc3c MM |
1193 | ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, |
1194 | flags, 0, 0); | |
1195 | put_bpf_raw_tp_regs(); | |
1196 | return ret; | |
c4f6699d AS |
1197 | } |
1198 | ||
1199 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
1200 | .func = bpf_get_stackid_raw_tp, | |
1201 | .gpl_only = true, | |
1202 | .ret_type = RET_INTEGER, | |
1203 | .arg1_type = ARG_PTR_TO_CTX, | |
1204 | .arg2_type = ARG_CONST_MAP_PTR, | |
1205 | .arg3_type = ARG_ANYTHING, | |
1206 | }; | |
1207 | ||
c195651e YS |
1208 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1209 | void *, buf, u32, size, u64, flags) | |
1210 | { | |
9594dc3c MM |
1211 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1212 | int ret; | |
1213 | ||
1214 | if (IS_ERR(regs)) | |
1215 | return PTR_ERR(regs); | |
c195651e YS |
1216 | |
1217 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1218 | ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, |
1219 | (unsigned long) size, flags, 0); | |
1220 | put_bpf_raw_tp_regs(); | |
1221 | return ret; | |
c195651e YS |
1222 | } |
1223 | ||
1224 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
1225 | .func = bpf_get_stack_raw_tp, | |
1226 | .gpl_only = true, | |
1227 | .ret_type = RET_INTEGER, | |
1228 | .arg1_type = ARG_PTR_TO_CTX, | |
1229 | .arg2_type = ARG_PTR_TO_MEM, | |
1230 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1231 | .arg4_type = ARG_ANYTHING, | |
1232 | }; | |
1233 | ||
5e43f899 AI |
1234 | static const struct bpf_func_proto * |
1235 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
1236 | { |
1237 | switch (func_id) { | |
1238 | case BPF_FUNC_perf_event_output: | |
1239 | return &bpf_perf_event_output_proto_raw_tp; | |
1240 | case BPF_FUNC_get_stackid: | |
1241 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
1242 | case BPF_FUNC_get_stack: |
1243 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 1244 | default: |
fc611f47 | 1245 | return bpf_tracing_func_proto(func_id, prog); |
c4f6699d AS |
1246 | } |
1247 | } | |
1248 | ||
f1b9509c AS |
1249 | static const struct bpf_func_proto * |
1250 | tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
1251 | { | |
1252 | switch (func_id) { | |
1253 | #ifdef CONFIG_NET | |
1254 | case BPF_FUNC_skb_output: | |
1255 | return &bpf_skb_output_proto; | |
d831ee84 EC |
1256 | case BPF_FUNC_xdp_output: |
1257 | return &bpf_xdp_output_proto; | |
f1b9509c AS |
1258 | #endif |
1259 | default: | |
1260 | return raw_tp_prog_func_proto(func_id, prog); | |
1261 | } | |
1262 | } | |
1263 | ||
c4f6699d AS |
1264 | static bool raw_tp_prog_is_valid_access(int off, int size, |
1265 | enum bpf_access_type type, | |
5e43f899 | 1266 | const struct bpf_prog *prog, |
c4f6699d AS |
1267 | struct bpf_insn_access_aux *info) |
1268 | { | |
f1b9509c AS |
1269 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) |
1270 | return false; | |
1271 | if (type != BPF_READ) | |
1272 | return false; | |
1273 | if (off % size != 0) | |
1274 | return false; | |
1275 | return true; | |
1276 | } | |
1277 | ||
1278 | static bool tracing_prog_is_valid_access(int off, int size, | |
1279 | enum bpf_access_type type, | |
1280 | const struct bpf_prog *prog, | |
1281 | struct bpf_insn_access_aux *info) | |
1282 | { | |
1283 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) | |
c4f6699d AS |
1284 | return false; |
1285 | if (type != BPF_READ) | |
1286 | return false; | |
1287 | if (off % size != 0) | |
1288 | return false; | |
9e15db66 | 1289 | return btf_ctx_access(off, size, type, prog, info); |
c4f6699d AS |
1290 | } |
1291 | ||
3e7c67d9 KS |
1292 | int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, |
1293 | const union bpf_attr *kattr, | |
1294 | union bpf_attr __user *uattr) | |
1295 | { | |
1296 | return -ENOTSUPP; | |
1297 | } | |
1298 | ||
c4f6699d AS |
1299 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { |
1300 | .get_func_proto = raw_tp_prog_func_proto, | |
1301 | .is_valid_access = raw_tp_prog_is_valid_access, | |
1302 | }; | |
1303 | ||
1304 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
1305 | }; | |
1306 | ||
f1b9509c AS |
1307 | const struct bpf_verifier_ops tracing_verifier_ops = { |
1308 | .get_func_proto = tracing_prog_func_proto, | |
1309 | .is_valid_access = tracing_prog_is_valid_access, | |
1310 | }; | |
1311 | ||
1312 | const struct bpf_prog_ops tracing_prog_ops = { | |
da00d2f1 | 1313 | .test_run = bpf_prog_test_run_tracing, |
f1b9509c AS |
1314 | }; |
1315 | ||
9df1c28b MM |
1316 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
1317 | enum bpf_access_type type, | |
1318 | const struct bpf_prog *prog, | |
1319 | struct bpf_insn_access_aux *info) | |
1320 | { | |
1321 | if (off == 0) { | |
1322 | if (size != sizeof(u64) || type != BPF_READ) | |
1323 | return false; | |
1324 | info->reg_type = PTR_TO_TP_BUFFER; | |
1325 | } | |
1326 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); | |
1327 | } | |
1328 | ||
1329 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { | |
1330 | .get_func_proto = raw_tp_prog_func_proto, | |
1331 | .is_valid_access = raw_tp_writable_prog_is_valid_access, | |
1332 | }; | |
1333 | ||
1334 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { | |
1335 | }; | |
1336 | ||
0515e599 | 1337 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1338 | const struct bpf_prog *prog, |
23994631 | 1339 | struct bpf_insn_access_aux *info) |
0515e599 | 1340 | { |
95da0cdb | 1341 | const int size_u64 = sizeof(u64); |
31fd8581 | 1342 | |
0515e599 AS |
1343 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
1344 | return false; | |
1345 | if (type != BPF_READ) | |
1346 | return false; | |
bc23105c DB |
1347 | if (off % size != 0) { |
1348 | if (sizeof(unsigned long) != 4) | |
1349 | return false; | |
1350 | if (size != 8) | |
1351 | return false; | |
1352 | if (off % size != 4) | |
1353 | return false; | |
1354 | } | |
31fd8581 | 1355 | |
f96da094 DB |
1356 | switch (off) { |
1357 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
1358 | bpf_ctx_record_field_size(info, size_u64); |
1359 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
1360 | return false; | |
1361 | break; | |
1362 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
1363 | bpf_ctx_record_field_size(info, size_u64); | |
1364 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 1365 | return false; |
f96da094 DB |
1366 | break; |
1367 | default: | |
0515e599 AS |
1368 | if (size != sizeof(long)) |
1369 | return false; | |
1370 | } | |
f96da094 | 1371 | |
0515e599 AS |
1372 | return true; |
1373 | } | |
1374 | ||
6b8cc1d1 DB |
1375 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
1376 | const struct bpf_insn *si, | |
0515e599 | 1377 | struct bpf_insn *insn_buf, |
f96da094 | 1378 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
1379 | { |
1380 | struct bpf_insn *insn = insn_buf; | |
1381 | ||
6b8cc1d1 | 1382 | switch (si->off) { |
0515e599 | 1383 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 1384 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1385 | data), si->dst_reg, si->src_reg, |
0515e599 | 1386 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 1387 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
1388 | bpf_target_off(struct perf_sample_data, period, 8, |
1389 | target_size)); | |
0515e599 | 1390 | break; |
95da0cdb TQ |
1391 | case offsetof(struct bpf_perf_event_data, addr): |
1392 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
1393 | data), si->dst_reg, si->src_reg, | |
1394 | offsetof(struct bpf_perf_event_data_kern, data)); | |
1395 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
1396 | bpf_target_off(struct perf_sample_data, addr, 8, | |
1397 | target_size)); | |
1398 | break; | |
0515e599 | 1399 | default: |
f035a515 | 1400 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1401 | regs), si->dst_reg, si->src_reg, |
0515e599 | 1402 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
1403 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
1404 | si->off); | |
0515e599 AS |
1405 | break; |
1406 | } | |
1407 | ||
1408 | return insn - insn_buf; | |
1409 | } | |
1410 | ||
7de16e3a | 1411 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 1412 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
1413 | .is_valid_access = pe_prog_is_valid_access, |
1414 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
1415 | }; | |
7de16e3a JK |
1416 | |
1417 | const struct bpf_prog_ops perf_event_prog_ops = { | |
1418 | }; | |
e87c6bc3 YS |
1419 | |
1420 | static DEFINE_MUTEX(bpf_event_mutex); | |
1421 | ||
c8c088ba YS |
1422 | #define BPF_TRACE_MAX_PROGS 64 |
1423 | ||
e87c6bc3 YS |
1424 | int perf_event_attach_bpf_prog(struct perf_event *event, |
1425 | struct bpf_prog *prog) | |
1426 | { | |
e672db03 | 1427 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1428 | struct bpf_prog_array *new_array; |
1429 | int ret = -EEXIST; | |
1430 | ||
9802d865 | 1431 | /* |
b4da3340 MH |
1432 | * Kprobe override only works if they are on the function entry, |
1433 | * and only if they are on the opt-in list. | |
9802d865 JB |
1434 | */ |
1435 | if (prog->kprobe_override && | |
b4da3340 | 1436 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
1437 | !trace_kprobe_error_injectable(event->tp_event))) |
1438 | return -EINVAL; | |
1439 | ||
e87c6bc3 YS |
1440 | mutex_lock(&bpf_event_mutex); |
1441 | ||
1442 | if (event->prog) | |
07c41a29 | 1443 | goto unlock; |
e87c6bc3 | 1444 | |
e672db03 | 1445 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
c8c088ba YS |
1446 | if (old_array && |
1447 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
1448 | ret = -E2BIG; | |
1449 | goto unlock; | |
1450 | } | |
1451 | ||
e87c6bc3 YS |
1452 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
1453 | if (ret < 0) | |
07c41a29 | 1454 | goto unlock; |
e87c6bc3 YS |
1455 | |
1456 | /* set the new array to event->tp_event and set event->prog */ | |
1457 | event->prog = prog; | |
1458 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1459 | bpf_prog_array_free(old_array); | |
1460 | ||
07c41a29 | 1461 | unlock: |
e87c6bc3 YS |
1462 | mutex_unlock(&bpf_event_mutex); |
1463 | return ret; | |
1464 | } | |
1465 | ||
1466 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
1467 | { | |
e672db03 | 1468 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1469 | struct bpf_prog_array *new_array; |
1470 | int ret; | |
1471 | ||
1472 | mutex_lock(&bpf_event_mutex); | |
1473 | ||
1474 | if (!event->prog) | |
07c41a29 | 1475 | goto unlock; |
e87c6bc3 | 1476 | |
e672db03 | 1477 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
e87c6bc3 | 1478 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); |
170a7e3e SY |
1479 | if (ret == -ENOENT) |
1480 | goto unlock; | |
e87c6bc3 YS |
1481 | if (ret < 0) { |
1482 | bpf_prog_array_delete_safe(old_array, event->prog); | |
1483 | } else { | |
1484 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1485 | bpf_prog_array_free(old_array); | |
1486 | } | |
1487 | ||
1488 | bpf_prog_put(event->prog); | |
1489 | event->prog = NULL; | |
1490 | ||
07c41a29 | 1491 | unlock: |
e87c6bc3 YS |
1492 | mutex_unlock(&bpf_event_mutex); |
1493 | } | |
f371b304 | 1494 | |
f4e2298e | 1495 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
1496 | { |
1497 | struct perf_event_query_bpf __user *uquery = info; | |
1498 | struct perf_event_query_bpf query = {}; | |
e672db03 | 1499 | struct bpf_prog_array *progs; |
3a38bb98 | 1500 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
1501 | int ret; |
1502 | ||
1503 | if (!capable(CAP_SYS_ADMIN)) | |
1504 | return -EPERM; | |
1505 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
1506 | return -EINVAL; | |
1507 | if (copy_from_user(&query, uquery, sizeof(query))) | |
1508 | return -EFAULT; | |
3a38bb98 YS |
1509 | |
1510 | ids_len = query.ids_len; | |
1511 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 1512 | return -E2BIG; |
3a38bb98 YS |
1513 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1514 | if (!ids) | |
1515 | return -ENOMEM; | |
1516 | /* | |
1517 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
1518 | * is required when user only wants to check for uquery->prog_cnt. | |
1519 | * There is no need to check for it since the case is handled | |
1520 | * gracefully in bpf_prog_array_copy_info. | |
1521 | */ | |
f371b304 YS |
1522 | |
1523 | mutex_lock(&bpf_event_mutex); | |
e672db03 SF |
1524 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
1525 | ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); | |
f371b304 YS |
1526 | mutex_unlock(&bpf_event_mutex); |
1527 | ||
3a38bb98 YS |
1528 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
1529 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
1530 | ret = -EFAULT; | |
1531 | ||
1532 | kfree(ids); | |
f371b304 YS |
1533 | return ret; |
1534 | } | |
c4f6699d AS |
1535 | |
1536 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
1537 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
1538 | ||
a38d1107 | 1539 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
1540 | { |
1541 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
1542 | ||
1543 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
1544 | if (!strcmp(btp->tp->name, name)) | |
1545 | return btp; | |
1546 | } | |
a38d1107 MM |
1547 | |
1548 | return bpf_get_raw_tracepoint_module(name); | |
1549 | } | |
1550 | ||
1551 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) | |
1552 | { | |
1553 | struct module *mod = __module_address((unsigned long)btp); | |
1554 | ||
1555 | if (mod) | |
1556 | module_put(mod); | |
c4f6699d AS |
1557 | } |
1558 | ||
1559 | static __always_inline | |
1560 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
1561 | { | |
f03efe49 | 1562 | cant_sleep(); |
c4f6699d | 1563 | rcu_read_lock(); |
c4f6699d | 1564 | (void) BPF_PROG_RUN(prog, args); |
c4f6699d AS |
1565 | rcu_read_unlock(); |
1566 | } | |
1567 | ||
1568 | #define UNPACK(...) __VA_ARGS__ | |
1569 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
1570 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
1571 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
1572 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
1573 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
1574 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
1575 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
1576 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
1577 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
1578 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
1579 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
1580 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
1581 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
1582 | ||
1583 | #define SARG(X) u64 arg##X | |
1584 | #define COPY(X) args[X] = arg##X | |
1585 | ||
1586 | #define __DL_COM (,) | |
1587 | #define __DL_SEM (;) | |
1588 | ||
1589 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
1590 | ||
1591 | #define BPF_TRACE_DEFN_x(x) \ | |
1592 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
1593 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
1594 | { \ | |
1595 | u64 args[x]; \ | |
1596 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
1597 | __bpf_trace_run(prog, args); \ | |
1598 | } \ | |
1599 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
1600 | BPF_TRACE_DEFN_x(1); | |
1601 | BPF_TRACE_DEFN_x(2); | |
1602 | BPF_TRACE_DEFN_x(3); | |
1603 | BPF_TRACE_DEFN_x(4); | |
1604 | BPF_TRACE_DEFN_x(5); | |
1605 | BPF_TRACE_DEFN_x(6); | |
1606 | BPF_TRACE_DEFN_x(7); | |
1607 | BPF_TRACE_DEFN_x(8); | |
1608 | BPF_TRACE_DEFN_x(9); | |
1609 | BPF_TRACE_DEFN_x(10); | |
1610 | BPF_TRACE_DEFN_x(11); | |
1611 | BPF_TRACE_DEFN_x(12); | |
1612 | ||
1613 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1614 | { | |
1615 | struct tracepoint *tp = btp->tp; | |
1616 | ||
1617 | /* | |
1618 | * check that program doesn't access arguments beyond what's | |
1619 | * available in this tracepoint | |
1620 | */ | |
1621 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
1622 | return -EINVAL; | |
1623 | ||
9df1c28b MM |
1624 | if (prog->aux->max_tp_access > btp->writable_size) |
1625 | return -EINVAL; | |
1626 | ||
c4f6699d AS |
1627 | return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); |
1628 | } | |
1629 | ||
1630 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1631 | { | |
e16ec340 | 1632 | return __bpf_probe_register(btp, prog); |
c4f6699d AS |
1633 | } |
1634 | ||
1635 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1636 | { | |
e16ec340 | 1637 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
c4f6699d | 1638 | } |
41bdc4b4 YS |
1639 | |
1640 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
1641 | u32 *fd_type, const char **buf, | |
1642 | u64 *probe_offset, u64 *probe_addr) | |
1643 | { | |
1644 | bool is_tracepoint, is_syscall_tp; | |
1645 | struct bpf_prog *prog; | |
1646 | int flags, err = 0; | |
1647 | ||
1648 | prog = event->prog; | |
1649 | if (!prog) | |
1650 | return -ENOENT; | |
1651 | ||
1652 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
1653 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
1654 | return -EOPNOTSUPP; | |
1655 | ||
1656 | *prog_id = prog->aux->id; | |
1657 | flags = event->tp_event->flags; | |
1658 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
1659 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
1660 | ||
1661 | if (is_tracepoint || is_syscall_tp) { | |
1662 | *buf = is_tracepoint ? event->tp_event->tp->name | |
1663 | : event->tp_event->name; | |
1664 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
1665 | *probe_offset = 0x0; | |
1666 | *probe_addr = 0x0; | |
1667 | } else { | |
1668 | /* kprobe/uprobe */ | |
1669 | err = -EOPNOTSUPP; | |
1670 | #ifdef CONFIG_KPROBE_EVENTS | |
1671 | if (flags & TRACE_EVENT_FL_KPROBE) | |
1672 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
1673 | probe_offset, probe_addr, | |
1674 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1675 | #endif | |
1676 | #ifdef CONFIG_UPROBE_EVENTS | |
1677 | if (flags & TRACE_EVENT_FL_UPROBE) | |
1678 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
1679 | probe_offset, | |
1680 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1681 | #endif | |
1682 | } | |
1683 | ||
1684 | return err; | |
1685 | } | |
a38d1107 | 1686 | |
9db1ff0a YS |
1687 | static int __init send_signal_irq_work_init(void) |
1688 | { | |
1689 | int cpu; | |
1690 | struct send_signal_irq_work *work; | |
1691 | ||
1692 | for_each_possible_cpu(cpu) { | |
1693 | work = per_cpu_ptr(&send_signal_work, cpu); | |
1694 | init_irq_work(&work->irq_work, do_bpf_send_signal); | |
1695 | } | |
1696 | return 0; | |
1697 | } | |
1698 | ||
1699 | subsys_initcall(send_signal_irq_work_init); | |
1700 | ||
a38d1107 | 1701 | #ifdef CONFIG_MODULES |
390e99cf SF |
1702 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
1703 | void *module) | |
a38d1107 MM |
1704 | { |
1705 | struct bpf_trace_module *btm, *tmp; | |
1706 | struct module *mod = module; | |
1707 | ||
1708 | if (mod->num_bpf_raw_events == 0 || | |
1709 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) | |
1710 | return 0; | |
1711 | ||
1712 | mutex_lock(&bpf_module_mutex); | |
1713 | ||
1714 | switch (op) { | |
1715 | case MODULE_STATE_COMING: | |
1716 | btm = kzalloc(sizeof(*btm), GFP_KERNEL); | |
1717 | if (btm) { | |
1718 | btm->module = module; | |
1719 | list_add(&btm->list, &bpf_trace_modules); | |
1720 | } | |
1721 | break; | |
1722 | case MODULE_STATE_GOING: | |
1723 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { | |
1724 | if (btm->module == module) { | |
1725 | list_del(&btm->list); | |
1726 | kfree(btm); | |
1727 | break; | |
1728 | } | |
1729 | } | |
1730 | break; | |
1731 | } | |
1732 | ||
1733 | mutex_unlock(&bpf_module_mutex); | |
1734 | ||
1735 | return 0; | |
1736 | } | |
1737 | ||
1738 | static struct notifier_block bpf_module_nb = { | |
1739 | .notifier_call = bpf_event_notify, | |
1740 | }; | |
1741 | ||
390e99cf | 1742 | static int __init bpf_event_init(void) |
a38d1107 MM |
1743 | { |
1744 | register_module_notifier(&bpf_module_nb); | |
1745 | return 0; | |
1746 | } | |
1747 | ||
1748 | fs_initcall(bpf_event_init); | |
1749 | #endif /* CONFIG_MODULES */ |