1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module
{
27 struct module
*module
;
28 struct list_head list
;
31 static LIST_HEAD(bpf_trace_modules
);
32 static DEFINE_MUTEX(bpf_module_mutex
);
34 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
36 struct bpf_raw_event_map
*btp
, *ret
= NULL
;
37 struct bpf_trace_module
*btm
;
40 mutex_lock(&bpf_module_mutex
);
41 list_for_each_entry(btm
, &bpf_trace_modules
, list
) {
42 for (i
= 0; i
< btm
->module
->num_bpf_raw_events
; ++i
) {
43 btp
= &btm
->module
->bpf_raw_events
[i
];
44 if (!strcmp(btp
->tp
->name
, name
)) {
45 if (try_module_get(btm
->module
))
52 mutex_unlock(&bpf_module_mutex
);
56 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
60 #endif /* CONFIG_MODULES */
62 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
63 u64
bpf_get_stack(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret
= BPF_PROG_RUN_ARRAY_CHECK(call
->prog_array
, ctx
, BPF_PROG_RUN
);
117 __this_cpu_dec(bpf_prog_active
);
122 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
123 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
125 regs_set_return_value(regs
, rc
);
126 override_function_with_return(regs
);
130 static const struct bpf_func_proto bpf_override_return_proto
= {
131 .func
= bpf_override_return
,
133 .ret_type
= RET_INTEGER
,
134 .arg1_type
= ARG_PTR_TO_CTX
,
135 .arg2_type
= ARG_ANYTHING
,
139 BPF_CALL_3(bpf_probe_read_user
, void *, dst
, u32
, size
,
140 const void __user
*, unsafe_ptr
)
142 int ret
= probe_user_read(dst
, unsafe_ptr
, size
);
144 if (unlikely(ret
< 0))
145 memset(dst
, 0, size
);
150 static const struct bpf_func_proto bpf_probe_read_user_proto
= {
151 .func
= bpf_probe_read_user
,
153 .ret_type
= RET_INTEGER
,
154 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
155 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
156 .arg3_type
= ARG_ANYTHING
,
159 BPF_CALL_3(bpf_probe_read_user_str
, void *, dst
, u32
, size
,
160 const void __user
*, unsafe_ptr
)
162 int ret
= strncpy_from_unsafe_user(dst
, unsafe_ptr
, size
);
164 if (unlikely(ret
< 0))
165 memset(dst
, 0, size
);
170 static const struct bpf_func_proto bpf_probe_read_user_str_proto
= {
171 .func
= bpf_probe_read_user_str
,
173 .ret_type
= RET_INTEGER
,
174 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
175 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
176 .arg3_type
= ARG_ANYTHING
,
179 static __always_inline
int
180 bpf_probe_read_kernel_common(void *dst
, u32 size
, const void *unsafe_ptr
,
183 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
185 if (unlikely(ret
< 0))
187 ret
= compat
? probe_kernel_read(dst
, unsafe_ptr
, size
) :
188 probe_kernel_read_strict(dst
, unsafe_ptr
, size
);
189 if (unlikely(ret
< 0))
191 memset(dst
, 0, size
);
195 BPF_CALL_3(bpf_probe_read_kernel
, void *, dst
, u32
, size
,
196 const void *, unsafe_ptr
)
198 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, false);
201 static const struct bpf_func_proto bpf_probe_read_kernel_proto
= {
202 .func
= bpf_probe_read_kernel
,
204 .ret_type
= RET_INTEGER
,
205 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
206 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
207 .arg3_type
= ARG_ANYTHING
,
210 BPF_CALL_3(bpf_probe_read_compat
, void *, dst
, u32
, size
,
211 const void *, unsafe_ptr
)
213 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, true);
216 static const struct bpf_func_proto bpf_probe_read_compat_proto
= {
217 .func
= bpf_probe_read_compat
,
219 .ret_type
= RET_INTEGER
,
220 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
221 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
222 .arg3_type
= ARG_ANYTHING
,
225 static __always_inline
int
226 bpf_probe_read_kernel_str_common(void *dst
, u32 size
, const void *unsafe_ptr
,
229 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
231 if (unlikely(ret
< 0))
234 * The strncpy_from_unsafe_*() call will likely not fill the entire
235 * buffer, but that's okay in this circumstance as we're probing
236 * arbitrary memory anyway similar to bpf_probe_read_*() and might
237 * as well probe the stack. Thus, memory is explicitly cleared
238 * only in error case, so that improper users ignoring return
239 * code altogether don't copy garbage; otherwise length of string
240 * is returned that can be used for bpf_perf_event_output() et al.
242 ret
= compat
? strncpy_from_unsafe(dst
, unsafe_ptr
, size
) :
243 strncpy_from_unsafe_strict(dst
, unsafe_ptr
, size
);
244 if (unlikely(ret
< 0))
246 memset(dst
, 0, size
);
250 BPF_CALL_3(bpf_probe_read_kernel_str
, void *, dst
, u32
, size
,
251 const void *, unsafe_ptr
)
253 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, false);
256 static const struct bpf_func_proto bpf_probe_read_kernel_str_proto
= {
257 .func
= bpf_probe_read_kernel_str
,
259 .ret_type
= RET_INTEGER
,
260 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
261 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
262 .arg3_type
= ARG_ANYTHING
,
265 BPF_CALL_3(bpf_probe_read_compat_str
, void *, dst
, u32
, size
,
266 const void *, unsafe_ptr
)
268 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, true);
271 static const struct bpf_func_proto bpf_probe_read_compat_str_proto
= {
272 .func
= bpf_probe_read_compat_str
,
274 .ret_type
= RET_INTEGER
,
275 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
276 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
277 .arg3_type
= ARG_ANYTHING
,
280 BPF_CALL_3(bpf_probe_write_user
, void __user
*, unsafe_ptr
, const void *, src
,
284 * Ensure we're in user context which is safe for the helper to
285 * run. This helper has no business in a kthread.
287 * access_ok() should prevent writing to non-user memory, but in
288 * some situations (nommu, temporary switch, etc) access_ok() does
289 * not provide enough validation, hence the check on KERNEL_DS.
291 * nmi_uaccess_okay() ensures the probe is not run in an interim
292 * state, when the task or mm are switched. This is specifically
293 * required to prevent the use of temporary mm.
296 if (unlikely(in_interrupt() ||
297 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
299 if (unlikely(uaccess_kernel()))
301 if (unlikely(!nmi_uaccess_okay()))
304 return probe_user_write(unsafe_ptr
, src
, size
);
307 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
308 .func
= bpf_probe_write_user
,
310 .ret_type
= RET_INTEGER
,
311 .arg1_type
= ARG_ANYTHING
,
312 .arg2_type
= ARG_PTR_TO_MEM
,
313 .arg3_type
= ARG_CONST_SIZE
,
316 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
318 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
319 current
->comm
, task_pid_nr(current
));
321 return &bpf_probe_write_user_proto
;
325 * Only limited trace_printk() conversion specifiers allowed:
326 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
328 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
329 u64
, arg2
, u64
, arg3
)
331 int i
, mod
[3] = {}, fmt_cnt
= 0;
332 char buf
[64], fmt_ptype
;
333 void *unsafe_ptr
= NULL
;
334 bool str_seen
= false;
337 * bpf_check()->check_func_arg()->check_stack_boundary()
338 * guarantees that fmt points to bpf program stack,
339 * fmt_size bytes of it were initialized and fmt_size > 0
341 if (fmt
[--fmt_size
] != 0)
344 /* check format string for allowed specifiers */
345 for (i
= 0; i
< fmt_size
; i
++) {
346 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
355 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
360 } else if (fmt
[i
] == 'p') {
362 if ((fmt
[i
+ 1] == 'k' ||
363 fmt
[i
+ 1] == 'u') &&
365 fmt_ptype
= fmt
[i
+ 1];
370 /* disallow any further format extensions */
371 if (fmt
[i
+ 1] != 0 &&
372 !isspace(fmt
[i
+ 1]) &&
373 !ispunct(fmt
[i
+ 1]))
377 } else if (fmt
[i
] == 's') {
382 /* allow only one '%s' per fmt string */
386 if (fmt
[i
+ 1] != 0 &&
387 !isspace(fmt
[i
+ 1]) &&
388 !ispunct(fmt
[i
+ 1]))
393 unsafe_ptr
= (void *)(long)arg1
;
397 unsafe_ptr
= (void *)(long)arg2
;
401 unsafe_ptr
= (void *)(long)arg3
;
409 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
410 strncpy_from_unsafe(buf
, unsafe_ptr
,
415 strncpy_from_unsafe_strict(buf
, unsafe_ptr
,
419 strncpy_from_unsafe_user(buf
,
420 (__force
void __user
*)unsafe_ptr
,
432 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
433 fmt
[i
] != 'u' && fmt
[i
] != 'x')
439 /* Horrid workaround for getting va_list handling working with different
440 * argument type combinations generically for 32 and 64 bit archs.
442 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
443 #define __BPF_TP(...) \
444 __trace_printk(0 /* Fake ip */, \
447 #define __BPF_ARG1_TP(...) \
448 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
449 ? __BPF_TP(arg1, ##__VA_ARGS__) \
450 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
451 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
452 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
454 #define __BPF_ARG2_TP(...) \
455 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
456 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
457 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
458 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
459 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
461 #define __BPF_ARG3_TP(...) \
462 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
463 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
464 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
465 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
466 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
468 return __BPF_TP_EMIT();
471 static const struct bpf_func_proto bpf_trace_printk_proto
= {
472 .func
= bpf_trace_printk
,
474 .ret_type
= RET_INTEGER
,
475 .arg1_type
= ARG_PTR_TO_MEM
,
476 .arg2_type
= ARG_CONST_SIZE
,
479 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
482 * this program might be calling bpf_trace_printk,
483 * so allocate per-cpu printk buffers
485 trace_printk_init_buffers();
487 return &bpf_trace_printk_proto
;
490 static __always_inline
int
491 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
492 u64
*value
, u64
*enabled
, u64
*running
)
494 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
495 unsigned int cpu
= smp_processor_id();
496 u64 index
= flags
& BPF_F_INDEX_MASK
;
497 struct bpf_event_entry
*ee
;
499 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
501 if (index
== BPF_F_CURRENT_CPU
)
503 if (unlikely(index
>= array
->map
.max_entries
))
506 ee
= READ_ONCE(array
->ptrs
[index
]);
510 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
513 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
518 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
520 * this api is ugly since we miss [-22..-2] range of valid
521 * counter values, but that's uapi
528 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
529 .func
= bpf_perf_event_read
,
531 .ret_type
= RET_INTEGER
,
532 .arg1_type
= ARG_CONST_MAP_PTR
,
533 .arg2_type
= ARG_ANYTHING
,
536 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
537 struct bpf_perf_event_value
*, buf
, u32
, size
)
541 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
543 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
549 memset(buf
, 0, size
);
553 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
554 .func
= bpf_perf_event_read_value
,
556 .ret_type
= RET_INTEGER
,
557 .arg1_type
= ARG_CONST_MAP_PTR
,
558 .arg2_type
= ARG_ANYTHING
,
559 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
560 .arg4_type
= ARG_CONST_SIZE
,
563 static __always_inline u64
564 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
565 u64 flags
, struct perf_sample_data
*sd
)
567 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
568 unsigned int cpu
= smp_processor_id();
569 u64 index
= flags
& BPF_F_INDEX_MASK
;
570 struct bpf_event_entry
*ee
;
571 struct perf_event
*event
;
573 if (index
== BPF_F_CURRENT_CPU
)
575 if (unlikely(index
>= array
->map
.max_entries
))
578 ee
= READ_ONCE(array
->ptrs
[index
]);
583 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
584 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
587 if (unlikely(event
->oncpu
!= cpu
))
590 return perf_event_output(event
, sd
, regs
);
594 * Support executing tracepoints in normal, irq, and nmi context that each call
595 * bpf_perf_event_output
597 struct bpf_trace_sample_data
{
598 struct perf_sample_data sds
[3];
601 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_trace_sds
);
602 static DEFINE_PER_CPU(int, bpf_trace_nest_level
);
603 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
604 u64
, flags
, void *, data
, u64
, size
)
606 struct bpf_trace_sample_data
*sds
= this_cpu_ptr(&bpf_trace_sds
);
607 int nest_level
= this_cpu_inc_return(bpf_trace_nest_level
);
608 struct perf_raw_record raw
= {
614 struct perf_sample_data
*sd
;
617 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(sds
->sds
))) {
622 sd
= &sds
->sds
[nest_level
- 1];
624 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
))) {
629 perf_sample_data_init(sd
, 0, 0);
632 err
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
635 this_cpu_dec(bpf_trace_nest_level
);
639 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
640 .func
= bpf_perf_event_output
,
642 .ret_type
= RET_INTEGER
,
643 .arg1_type
= ARG_PTR_TO_CTX
,
644 .arg2_type
= ARG_CONST_MAP_PTR
,
645 .arg3_type
= ARG_ANYTHING
,
646 .arg4_type
= ARG_PTR_TO_MEM
,
647 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
650 static DEFINE_PER_CPU(int, bpf_event_output_nest_level
);
651 struct bpf_nested_pt_regs
{
652 struct pt_regs regs
[3];
654 static DEFINE_PER_CPU(struct bpf_nested_pt_regs
, bpf_pt_regs
);
655 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_misc_sds
);
657 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
658 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
660 int nest_level
= this_cpu_inc_return(bpf_event_output_nest_level
);
661 struct perf_raw_frag frag
= {
666 struct perf_raw_record raw
= {
669 .next
= ctx_size
? &frag
: NULL
,
675 struct perf_sample_data
*sd
;
676 struct pt_regs
*regs
;
679 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(bpf_misc_sds
.sds
))) {
683 sd
= this_cpu_ptr(&bpf_misc_sds
.sds
[nest_level
- 1]);
684 regs
= this_cpu_ptr(&bpf_pt_regs
.regs
[nest_level
- 1]);
686 perf_fetch_caller_regs(regs
);
687 perf_sample_data_init(sd
, 0, 0);
690 ret
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
692 this_cpu_dec(bpf_event_output_nest_level
);
696 BPF_CALL_0(bpf_get_current_task
)
698 return (long) current
;
701 static const struct bpf_func_proto bpf_get_current_task_proto
= {
702 .func
= bpf_get_current_task
,
704 .ret_type
= RET_INTEGER
,
707 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
709 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
712 if (unlikely(idx
>= array
->map
.max_entries
))
715 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
719 return task_under_cgroup_hierarchy(current
, cgrp
);
722 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
723 .func
= bpf_current_task_under_cgroup
,
725 .ret_type
= RET_INTEGER
,
726 .arg1_type
= ARG_CONST_MAP_PTR
,
727 .arg2_type
= ARG_ANYTHING
,
730 struct send_signal_irq_work
{
731 struct irq_work irq_work
;
732 struct task_struct
*task
;
737 static DEFINE_PER_CPU(struct send_signal_irq_work
, send_signal_work
);
739 static void do_bpf_send_signal(struct irq_work
*entry
)
741 struct send_signal_irq_work
*work
;
743 work
= container_of(entry
, struct send_signal_irq_work
, irq_work
);
744 group_send_sig_info(work
->sig
, SEND_SIG_PRIV
, work
->task
, work
->type
);
747 static int bpf_send_signal_common(u32 sig
, enum pid_type type
)
749 struct send_signal_irq_work
*work
= NULL
;
751 /* Similar to bpf_probe_write_user, task needs to be
752 * in a sound condition and kernel memory access be
753 * permitted in order to send signal to the current
756 if (unlikely(current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
758 if (unlikely(uaccess_kernel()))
760 if (unlikely(!nmi_uaccess_okay()))
763 if (irqs_disabled()) {
764 /* Do an early check on signal validity. Otherwise,
765 * the error is lost in deferred irq_work.
767 if (unlikely(!valid_signal(sig
)))
770 work
= this_cpu_ptr(&send_signal_work
);
771 if (atomic_read(&work
->irq_work
.flags
) & IRQ_WORK_BUSY
)
774 /* Add the current task, which is the target of sending signal,
775 * to the irq_work. The current task may change when queued
776 * irq works get executed.
778 work
->task
= current
;
781 irq_work_queue(&work
->irq_work
);
785 return group_send_sig_info(sig
, SEND_SIG_PRIV
, current
, type
);
788 BPF_CALL_1(bpf_send_signal
, u32
, sig
)
790 return bpf_send_signal_common(sig
, PIDTYPE_TGID
);
793 static const struct bpf_func_proto bpf_send_signal_proto
= {
794 .func
= bpf_send_signal
,
796 .ret_type
= RET_INTEGER
,
797 .arg1_type
= ARG_ANYTHING
,
800 BPF_CALL_1(bpf_send_signal_thread
, u32
, sig
)
802 return bpf_send_signal_common(sig
, PIDTYPE_PID
);
805 static const struct bpf_func_proto bpf_send_signal_thread_proto
= {
806 .func
= bpf_send_signal_thread
,
808 .ret_type
= RET_INTEGER
,
809 .arg1_type
= ARG_ANYTHING
,
812 const struct bpf_func_proto
*
813 bpf_tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
816 case BPF_FUNC_map_lookup_elem
:
817 return &bpf_map_lookup_elem_proto
;
818 case BPF_FUNC_map_update_elem
:
819 return &bpf_map_update_elem_proto
;
820 case BPF_FUNC_map_delete_elem
:
821 return &bpf_map_delete_elem_proto
;
822 case BPF_FUNC_map_push_elem
:
823 return &bpf_map_push_elem_proto
;
824 case BPF_FUNC_map_pop_elem
:
825 return &bpf_map_pop_elem_proto
;
826 case BPF_FUNC_map_peek_elem
:
827 return &bpf_map_peek_elem_proto
;
828 case BPF_FUNC_ktime_get_ns
:
829 return &bpf_ktime_get_ns_proto
;
830 case BPF_FUNC_tail_call
:
831 return &bpf_tail_call_proto
;
832 case BPF_FUNC_get_current_pid_tgid
:
833 return &bpf_get_current_pid_tgid_proto
;
834 case BPF_FUNC_get_current_task
:
835 return &bpf_get_current_task_proto
;
836 case BPF_FUNC_get_current_uid_gid
:
837 return &bpf_get_current_uid_gid_proto
;
838 case BPF_FUNC_get_current_comm
:
839 return &bpf_get_current_comm_proto
;
840 case BPF_FUNC_trace_printk
:
841 return bpf_get_trace_printk_proto();
842 case BPF_FUNC_get_smp_processor_id
:
843 return &bpf_get_smp_processor_id_proto
;
844 case BPF_FUNC_get_numa_node_id
:
845 return &bpf_get_numa_node_id_proto
;
846 case BPF_FUNC_perf_event_read
:
847 return &bpf_perf_event_read_proto
;
848 case BPF_FUNC_probe_write_user
:
849 return bpf_get_probe_write_proto();
850 case BPF_FUNC_current_task_under_cgroup
:
851 return &bpf_current_task_under_cgroup_proto
;
852 case BPF_FUNC_get_prandom_u32
:
853 return &bpf_get_prandom_u32_proto
;
854 case BPF_FUNC_probe_read_user
:
855 return &bpf_probe_read_user_proto
;
856 case BPF_FUNC_probe_read_kernel
:
857 return &bpf_probe_read_kernel_proto
;
858 case BPF_FUNC_probe_read_user_str
:
859 return &bpf_probe_read_user_str_proto
;
860 case BPF_FUNC_probe_read_kernel_str
:
861 return &bpf_probe_read_kernel_str_proto
;
862 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
863 case BPF_FUNC_probe_read
:
864 return &bpf_probe_read_compat_proto
;
865 case BPF_FUNC_probe_read_str
:
866 return &bpf_probe_read_compat_str_proto
;
868 #ifdef CONFIG_CGROUPS
869 case BPF_FUNC_get_current_cgroup_id
:
870 return &bpf_get_current_cgroup_id_proto
;
872 case BPF_FUNC_send_signal
:
873 return &bpf_send_signal_proto
;
874 case BPF_FUNC_send_signal_thread
:
875 return &bpf_send_signal_thread_proto
;
876 case BPF_FUNC_perf_event_read_value
:
877 return &bpf_perf_event_read_value_proto
;
878 case BPF_FUNC_get_ns_current_pid_tgid
:
879 return &bpf_get_ns_current_pid_tgid_proto
;
885 static const struct bpf_func_proto
*
886 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
889 case BPF_FUNC_perf_event_output
:
890 return &bpf_perf_event_output_proto
;
891 case BPF_FUNC_get_stackid
:
892 return &bpf_get_stackid_proto
;
893 case BPF_FUNC_get_stack
:
894 return &bpf_get_stack_proto
;
895 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
896 case BPF_FUNC_override_return
:
897 return &bpf_override_return_proto
;
900 return bpf_tracing_func_proto(func_id
, prog
);
904 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
905 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
906 const struct bpf_prog
*prog
,
907 struct bpf_insn_access_aux
*info
)
909 if (off
< 0 || off
>= sizeof(struct pt_regs
))
911 if (type
!= BPF_READ
)
916 * Assertion for 32 bit to make sure last 8 byte access
917 * (BPF_DW) to the last 4 byte member is disallowed.
919 if (off
+ size
> sizeof(struct pt_regs
))
925 const struct bpf_verifier_ops kprobe_verifier_ops
= {
926 .get_func_proto
= kprobe_prog_func_proto
,
927 .is_valid_access
= kprobe_prog_is_valid_access
,
930 const struct bpf_prog_ops kprobe_prog_ops
= {
933 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
934 u64
, flags
, void *, data
, u64
, size
)
936 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
939 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
940 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
941 * from there and call the same bpf_perf_event_output() helper inline.
943 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
946 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
947 .func
= bpf_perf_event_output_tp
,
949 .ret_type
= RET_INTEGER
,
950 .arg1_type
= ARG_PTR_TO_CTX
,
951 .arg2_type
= ARG_CONST_MAP_PTR
,
952 .arg3_type
= ARG_ANYTHING
,
953 .arg4_type
= ARG_PTR_TO_MEM
,
954 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
957 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
960 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
963 * Same comment as in bpf_perf_event_output_tp(), only that this time
964 * the other helper's function body cannot be inlined due to being
965 * external, thus we need to call raw helper function.
967 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
971 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
972 .func
= bpf_get_stackid_tp
,
974 .ret_type
= RET_INTEGER
,
975 .arg1_type
= ARG_PTR_TO_CTX
,
976 .arg2_type
= ARG_CONST_MAP_PTR
,
977 .arg3_type
= ARG_ANYTHING
,
980 BPF_CALL_4(bpf_get_stack_tp
, void *, tp_buff
, void *, buf
, u32
, size
,
983 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
985 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
986 (unsigned long) size
, flags
, 0);
989 static const struct bpf_func_proto bpf_get_stack_proto_tp
= {
990 .func
= bpf_get_stack_tp
,
992 .ret_type
= RET_INTEGER
,
993 .arg1_type
= ARG_PTR_TO_CTX
,
994 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
995 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
996 .arg4_type
= ARG_ANYTHING
,
999 static const struct bpf_func_proto
*
1000 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1003 case BPF_FUNC_perf_event_output
:
1004 return &bpf_perf_event_output_proto_tp
;
1005 case BPF_FUNC_get_stackid
:
1006 return &bpf_get_stackid_proto_tp
;
1007 case BPF_FUNC_get_stack
:
1008 return &bpf_get_stack_proto_tp
;
1010 return bpf_tracing_func_proto(func_id
, prog
);
1014 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1015 const struct bpf_prog
*prog
,
1016 struct bpf_insn_access_aux
*info
)
1018 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
1020 if (type
!= BPF_READ
)
1022 if (off
% size
!= 0)
1025 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
1029 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
1030 .get_func_proto
= tp_prog_func_proto
,
1031 .is_valid_access
= tp_prog_is_valid_access
,
1034 const struct bpf_prog_ops tracepoint_prog_ops
= {
1037 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
1038 struct bpf_perf_event_value
*, buf
, u32
, size
)
1042 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
1044 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
1050 memset(buf
, 0, size
);
1054 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
1055 .func
= bpf_perf_prog_read_value
,
1057 .ret_type
= RET_INTEGER
,
1058 .arg1_type
= ARG_PTR_TO_CTX
,
1059 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1060 .arg3_type
= ARG_CONST_SIZE
,
1063 BPF_CALL_4(bpf_read_branch_records
, struct bpf_perf_event_data_kern
*, ctx
,
1064 void *, buf
, u32
, size
, u64
, flags
)
1069 static const u32 br_entry_size
= sizeof(struct perf_branch_entry
);
1070 struct perf_branch_stack
*br_stack
= ctx
->data
->br_stack
;
1073 if (unlikely(flags
& ~BPF_F_GET_BRANCH_RECORDS_SIZE
))
1076 if (unlikely(!br_stack
))
1079 if (flags
& BPF_F_GET_BRANCH_RECORDS_SIZE
)
1080 return br_stack
->nr
* br_entry_size
;
1082 if (!buf
|| (size
% br_entry_size
!= 0))
1085 to_copy
= min_t(u32
, br_stack
->nr
* br_entry_size
, size
);
1086 memcpy(buf
, br_stack
->entries
, to_copy
);
1092 static const struct bpf_func_proto bpf_read_branch_records_proto
= {
1093 .func
= bpf_read_branch_records
,
1095 .ret_type
= RET_INTEGER
,
1096 .arg1_type
= ARG_PTR_TO_CTX
,
1097 .arg2_type
= ARG_PTR_TO_MEM_OR_NULL
,
1098 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1099 .arg4_type
= ARG_ANYTHING
,
1102 static const struct bpf_func_proto
*
1103 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1106 case BPF_FUNC_perf_event_output
:
1107 return &bpf_perf_event_output_proto_tp
;
1108 case BPF_FUNC_get_stackid
:
1109 return &bpf_get_stackid_proto_tp
;
1110 case BPF_FUNC_get_stack
:
1111 return &bpf_get_stack_proto_tp
;
1112 case BPF_FUNC_perf_prog_read_value
:
1113 return &bpf_perf_prog_read_value_proto
;
1114 case BPF_FUNC_read_branch_records
:
1115 return &bpf_read_branch_records_proto
;
1117 return bpf_tracing_func_proto(func_id
, prog
);
1122 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1123 * to avoid potential recursive reuse issue when/if tracepoints are added
1124 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1126 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1127 * in normal, irq, and nmi context.
1129 struct bpf_raw_tp_regs
{
1130 struct pt_regs regs
[3];
1132 static DEFINE_PER_CPU(struct bpf_raw_tp_regs
, bpf_raw_tp_regs
);
1133 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level
);
1134 static struct pt_regs
*get_bpf_raw_tp_regs(void)
1136 struct bpf_raw_tp_regs
*tp_regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
1137 int nest_level
= this_cpu_inc_return(bpf_raw_tp_nest_level
);
1139 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(tp_regs
->regs
))) {
1140 this_cpu_dec(bpf_raw_tp_nest_level
);
1141 return ERR_PTR(-EBUSY
);
1144 return &tp_regs
->regs
[nest_level
- 1];
1147 static void put_bpf_raw_tp_regs(void)
1149 this_cpu_dec(bpf_raw_tp_nest_level
);
1152 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1153 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
1155 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1159 return PTR_ERR(regs
);
1161 perf_fetch_caller_regs(regs
);
1162 ret
= ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
1164 put_bpf_raw_tp_regs();
1168 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
1169 .func
= bpf_perf_event_output_raw_tp
,
1171 .ret_type
= RET_INTEGER
,
1172 .arg1_type
= ARG_PTR_TO_CTX
,
1173 .arg2_type
= ARG_CONST_MAP_PTR
,
1174 .arg3_type
= ARG_ANYTHING
,
1175 .arg4_type
= ARG_PTR_TO_MEM
,
1176 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
1179 extern const struct bpf_func_proto bpf_skb_output_proto
;
1180 extern const struct bpf_func_proto bpf_xdp_output_proto
;
1182 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1183 struct bpf_map
*, map
, u64
, flags
)
1185 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1189 return PTR_ERR(regs
);
1191 perf_fetch_caller_regs(regs
);
1192 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1193 ret
= bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
1195 put_bpf_raw_tp_regs();
1199 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
1200 .func
= bpf_get_stackid_raw_tp
,
1202 .ret_type
= RET_INTEGER
,
1203 .arg1_type
= ARG_PTR_TO_CTX
,
1204 .arg2_type
= ARG_CONST_MAP_PTR
,
1205 .arg3_type
= ARG_ANYTHING
,
1208 BPF_CALL_4(bpf_get_stack_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1209 void *, buf
, u32
, size
, u64
, flags
)
1211 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1215 return PTR_ERR(regs
);
1217 perf_fetch_caller_regs(regs
);
1218 ret
= bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
1219 (unsigned long) size
, flags
, 0);
1220 put_bpf_raw_tp_regs();
1224 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp
= {
1225 .func
= bpf_get_stack_raw_tp
,
1227 .ret_type
= RET_INTEGER
,
1228 .arg1_type
= ARG_PTR_TO_CTX
,
1229 .arg2_type
= ARG_PTR_TO_MEM
,
1230 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1231 .arg4_type
= ARG_ANYTHING
,
1234 static const struct bpf_func_proto
*
1235 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1238 case BPF_FUNC_perf_event_output
:
1239 return &bpf_perf_event_output_proto_raw_tp
;
1240 case BPF_FUNC_get_stackid
:
1241 return &bpf_get_stackid_proto_raw_tp
;
1242 case BPF_FUNC_get_stack
:
1243 return &bpf_get_stack_proto_raw_tp
;
1245 return bpf_tracing_func_proto(func_id
, prog
);
1249 static const struct bpf_func_proto
*
1250 tracing_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1254 case BPF_FUNC_skb_output
:
1255 return &bpf_skb_output_proto
;
1256 case BPF_FUNC_xdp_output
:
1257 return &bpf_xdp_output_proto
;
1260 return raw_tp_prog_func_proto(func_id
, prog
);
1264 static bool raw_tp_prog_is_valid_access(int off
, int size
,
1265 enum bpf_access_type type
,
1266 const struct bpf_prog
*prog
,
1267 struct bpf_insn_access_aux
*info
)
1269 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1271 if (type
!= BPF_READ
)
1273 if (off
% size
!= 0)
1278 static bool tracing_prog_is_valid_access(int off
, int size
,
1279 enum bpf_access_type type
,
1280 const struct bpf_prog
*prog
,
1281 struct bpf_insn_access_aux
*info
)
1283 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1285 if (type
!= BPF_READ
)
1287 if (off
% size
!= 0)
1289 return btf_ctx_access(off
, size
, type
, prog
, info
);
1292 int __weak
bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
1293 const union bpf_attr
*kattr
,
1294 union bpf_attr __user
*uattr
)
1299 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
1300 .get_func_proto
= raw_tp_prog_func_proto
,
1301 .is_valid_access
= raw_tp_prog_is_valid_access
,
1304 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
1307 const struct bpf_verifier_ops tracing_verifier_ops
= {
1308 .get_func_proto
= tracing_prog_func_proto
,
1309 .is_valid_access
= tracing_prog_is_valid_access
,
1312 const struct bpf_prog_ops tracing_prog_ops
= {
1313 .test_run
= bpf_prog_test_run_tracing
,
1316 static bool raw_tp_writable_prog_is_valid_access(int off
, int size
,
1317 enum bpf_access_type type
,
1318 const struct bpf_prog
*prog
,
1319 struct bpf_insn_access_aux
*info
)
1322 if (size
!= sizeof(u64
) || type
!= BPF_READ
)
1324 info
->reg_type
= PTR_TO_TP_BUFFER
;
1326 return raw_tp_prog_is_valid_access(off
, size
, type
, prog
, info
);
1329 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops
= {
1330 .get_func_proto
= raw_tp_prog_func_proto
,
1331 .is_valid_access
= raw_tp_writable_prog_is_valid_access
,
1334 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops
= {
1337 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1338 const struct bpf_prog
*prog
,
1339 struct bpf_insn_access_aux
*info
)
1341 const int size_u64
= sizeof(u64
);
1343 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
1345 if (type
!= BPF_READ
)
1347 if (off
% size
!= 0) {
1348 if (sizeof(unsigned long) != 4)
1352 if (off
% size
!= 4)
1357 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
1358 bpf_ctx_record_field_size(info
, size_u64
);
1359 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1362 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
1363 bpf_ctx_record_field_size(info
, size_u64
);
1364 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1368 if (size
!= sizeof(long))
1375 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
1376 const struct bpf_insn
*si
,
1377 struct bpf_insn
*insn_buf
,
1378 struct bpf_prog
*prog
, u32
*target_size
)
1380 struct bpf_insn
*insn
= insn_buf
;
1383 case offsetof(struct bpf_perf_event_data
, sample_period
):
1384 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1385 data
), si
->dst_reg
, si
->src_reg
,
1386 offsetof(struct bpf_perf_event_data_kern
, data
));
1387 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1388 bpf_target_off(struct perf_sample_data
, period
, 8,
1391 case offsetof(struct bpf_perf_event_data
, addr
):
1392 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1393 data
), si
->dst_reg
, si
->src_reg
,
1394 offsetof(struct bpf_perf_event_data_kern
, data
));
1395 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1396 bpf_target_off(struct perf_sample_data
, addr
, 8,
1400 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1401 regs
), si
->dst_reg
, si
->src_reg
,
1402 offsetof(struct bpf_perf_event_data_kern
, regs
));
1403 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
1408 return insn
- insn_buf
;
1411 const struct bpf_verifier_ops perf_event_verifier_ops
= {
1412 .get_func_proto
= pe_prog_func_proto
,
1413 .is_valid_access
= pe_prog_is_valid_access
,
1414 .convert_ctx_access
= pe_prog_convert_ctx_access
,
1417 const struct bpf_prog_ops perf_event_prog_ops
= {
1420 static DEFINE_MUTEX(bpf_event_mutex
);
1422 #define BPF_TRACE_MAX_PROGS 64
1424 int perf_event_attach_bpf_prog(struct perf_event
*event
,
1425 struct bpf_prog
*prog
)
1427 struct bpf_prog_array
*old_array
;
1428 struct bpf_prog_array
*new_array
;
1432 * Kprobe override only works if they are on the function entry,
1433 * and only if they are on the opt-in list.
1435 if (prog
->kprobe_override
&&
1436 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
1437 !trace_kprobe_error_injectable(event
->tp_event
)))
1440 mutex_lock(&bpf_event_mutex
);
1445 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1447 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
1452 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, &new_array
);
1456 /* set the new array to event->tp_event and set event->prog */
1458 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1459 bpf_prog_array_free(old_array
);
1462 mutex_unlock(&bpf_event_mutex
);
1466 void perf_event_detach_bpf_prog(struct perf_event
*event
)
1468 struct bpf_prog_array
*old_array
;
1469 struct bpf_prog_array
*new_array
;
1472 mutex_lock(&bpf_event_mutex
);
1477 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1478 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, &new_array
);
1482 bpf_prog_array_delete_safe(old_array
, event
->prog
);
1484 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1485 bpf_prog_array_free(old_array
);
1488 bpf_prog_put(event
->prog
);
1492 mutex_unlock(&bpf_event_mutex
);
1495 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
1497 struct perf_event_query_bpf __user
*uquery
= info
;
1498 struct perf_event_query_bpf query
= {};
1499 struct bpf_prog_array
*progs
;
1500 u32
*ids
, prog_cnt
, ids_len
;
1503 if (!capable(CAP_SYS_ADMIN
))
1505 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1507 if (copy_from_user(&query
, uquery
, sizeof(query
)))
1510 ids_len
= query
.ids_len
;
1511 if (ids_len
> BPF_TRACE_MAX_PROGS
)
1513 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
1517 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1518 * is required when user only wants to check for uquery->prog_cnt.
1519 * There is no need to check for it since the case is handled
1520 * gracefully in bpf_prog_array_copy_info.
1523 mutex_lock(&bpf_event_mutex
);
1524 progs
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1525 ret
= bpf_prog_array_copy_info(progs
, ids
, ids_len
, &prog_cnt
);
1526 mutex_unlock(&bpf_event_mutex
);
1528 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
1529 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
1536 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
1537 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
1539 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
1541 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
1543 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
1544 if (!strcmp(btp
->tp
->name
, name
))
1548 return bpf_get_raw_tracepoint_module(name
);
1551 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
1553 struct module
*mod
= __module_address((unsigned long)btp
);
1559 static __always_inline
1560 void __bpf_trace_run(struct bpf_prog
*prog
, u64
*args
)
1564 (void) BPF_PROG_RUN(prog
, args
);
1568 #define UNPACK(...) __VA_ARGS__
1569 #define REPEAT_1(FN, DL, X, ...) FN(X)
1570 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1571 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1572 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1573 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1574 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1575 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1576 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1577 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1578 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1579 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1580 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1581 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1583 #define SARG(X) u64 arg##X
1584 #define COPY(X) args[X] = arg##X
1586 #define __DL_COM (,)
1587 #define __DL_SEM (;)
1589 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1591 #define BPF_TRACE_DEFN_x(x) \
1592 void bpf_trace_run##x(struct bpf_prog *prog, \
1593 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1596 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1597 __bpf_trace_run(prog, args); \
1599 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1600 BPF_TRACE_DEFN_x(1);
1601 BPF_TRACE_DEFN_x(2);
1602 BPF_TRACE_DEFN_x(3);
1603 BPF_TRACE_DEFN_x(4);
1604 BPF_TRACE_DEFN_x(5);
1605 BPF_TRACE_DEFN_x(6);
1606 BPF_TRACE_DEFN_x(7);
1607 BPF_TRACE_DEFN_x(8);
1608 BPF_TRACE_DEFN_x(9);
1609 BPF_TRACE_DEFN_x(10);
1610 BPF_TRACE_DEFN_x(11);
1611 BPF_TRACE_DEFN_x(12);
1613 static int __bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1615 struct tracepoint
*tp
= btp
->tp
;
1618 * check that program doesn't access arguments beyond what's
1619 * available in this tracepoint
1621 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
1624 if (prog
->aux
->max_tp_access
> btp
->writable_size
)
1627 return tracepoint_probe_register(tp
, (void *)btp
->bpf_func
, prog
);
1630 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1632 return __bpf_probe_register(btp
, prog
);
1635 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1637 return tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, prog
);
1640 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
1641 u32
*fd_type
, const char **buf
,
1642 u64
*probe_offset
, u64
*probe_addr
)
1644 bool is_tracepoint
, is_syscall_tp
;
1645 struct bpf_prog
*prog
;
1652 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1653 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
)
1656 *prog_id
= prog
->aux
->id
;
1657 flags
= event
->tp_event
->flags
;
1658 is_tracepoint
= flags
& TRACE_EVENT_FL_TRACEPOINT
;
1659 is_syscall_tp
= is_syscall_trace_event(event
->tp_event
);
1661 if (is_tracepoint
|| is_syscall_tp
) {
1662 *buf
= is_tracepoint
? event
->tp_event
->tp
->name
1663 : event
->tp_event
->name
;
1664 *fd_type
= BPF_FD_TYPE_TRACEPOINT
;
1665 *probe_offset
= 0x0;
1670 #ifdef CONFIG_KPROBE_EVENTS
1671 if (flags
& TRACE_EVENT_FL_KPROBE
)
1672 err
= bpf_get_kprobe_info(event
, fd_type
, buf
,
1673 probe_offset
, probe_addr
,
1674 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1676 #ifdef CONFIG_UPROBE_EVENTS
1677 if (flags
& TRACE_EVENT_FL_UPROBE
)
1678 err
= bpf_get_uprobe_info(event
, fd_type
, buf
,
1680 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1687 static int __init
send_signal_irq_work_init(void)
1690 struct send_signal_irq_work
*work
;
1692 for_each_possible_cpu(cpu
) {
1693 work
= per_cpu_ptr(&send_signal_work
, cpu
);
1694 init_irq_work(&work
->irq_work
, do_bpf_send_signal
);
1699 subsys_initcall(send_signal_irq_work_init
);
1701 #ifdef CONFIG_MODULES
1702 static int bpf_event_notify(struct notifier_block
*nb
, unsigned long op
,
1705 struct bpf_trace_module
*btm
, *tmp
;
1706 struct module
*mod
= module
;
1708 if (mod
->num_bpf_raw_events
== 0 ||
1709 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_GOING
))
1712 mutex_lock(&bpf_module_mutex
);
1715 case MODULE_STATE_COMING
:
1716 btm
= kzalloc(sizeof(*btm
), GFP_KERNEL
);
1718 btm
->module
= module
;
1719 list_add(&btm
->list
, &bpf_trace_modules
);
1722 case MODULE_STATE_GOING
:
1723 list_for_each_entry_safe(btm
, tmp
, &bpf_trace_modules
, list
) {
1724 if (btm
->module
== module
) {
1725 list_del(&btm
->list
);
1733 mutex_unlock(&bpf_module_mutex
);
1738 static struct notifier_block bpf_module_nb
= {
1739 .notifier_call
= bpf_event_notify
,
1742 static int __init
bpf_event_init(void)
1744 register_module_notifier(&bpf_module_nb
);
1748 fs_initcall(bpf_event_init
);
1749 #endif /* CONFIG_MODULES */