]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - kernel/trace/bpf_trace.c
bpf: avoid excessive stack usage for perf_sample_data
[thirdparty/kernel/linux.git] / kernel / trace / bpf_trace.c
CommitLineData
2541517c 1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 2 * Copyright (c) 2016 Facebook
2541517c
AS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
0515e599 12#include <linux/bpf_perf_event.h>
2541517c
AS
13#include <linux/filter.h>
14#include <linux/uaccess.h>
9c959c86 15#include <linux/ctype.h>
2541517c
AS
16#include "trace.h"
17
2541517c
AS
18/**
19 * trace_call_bpf - invoke BPF program
20 * @prog: BPF program
21 * @ctx: opaque context pointer
22 *
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
25 *
26 * Return: BPF programs always return an integer which is interpreted by
27 * kprobe handler as:
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
31 */
32unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
33{
34 unsigned int ret;
35
36 if (in_nmi()) /* not supported yet */
37 return 1;
38
39 preempt_disable();
40
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
42 /*
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
46 * so return zero here
47 */
48 ret = 0;
49 goto out;
50 }
51
52 rcu_read_lock();
53 ret = BPF_PROG_RUN(prog, ctx);
54 rcu_read_unlock();
55
56 out:
57 __this_cpu_dec(bpf_prog_active);
58 preempt_enable();
59
60 return ret;
61}
62EXPORT_SYMBOL_GPL(trace_call_bpf);
63
f3694e00 64BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
2541517c 65{
f3694e00 66 int ret;
2541517c 67
074f528e
DB
68 ret = probe_kernel_read(dst, unsafe_ptr, size);
69 if (unlikely(ret < 0))
70 memset(dst, 0, size);
71
72 return ret;
2541517c
AS
73}
74
75static const struct bpf_func_proto bpf_probe_read_proto = {
76 .func = bpf_probe_read,
77 .gpl_only = true,
78 .ret_type = RET_INTEGER,
39f19ebb
AS
79 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
80 .arg2_type = ARG_CONST_SIZE,
2541517c
AS
81 .arg3_type = ARG_ANYTHING,
82};
83
f3694e00
DB
84BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
85 u32, size)
96ae5227 86{
96ae5227
SD
87 /*
88 * Ensure we're in user context which is safe for the helper to
89 * run. This helper has no business in a kthread.
90 *
91 * access_ok() should prevent writing to non-user memory, but in
92 * some situations (nommu, temporary switch, etc) access_ok() does
93 * not provide enough validation, hence the check on KERNEL_DS.
94 */
95
96 if (unlikely(in_interrupt() ||
97 current->flags & (PF_KTHREAD | PF_EXITING)))
98 return -EPERM;
db68ce10 99 if (unlikely(uaccess_kernel()))
96ae5227
SD
100 return -EPERM;
101 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
102 return -EPERM;
103
104 return probe_kernel_write(unsafe_ptr, src, size);
105}
106
107static const struct bpf_func_proto bpf_probe_write_user_proto = {
108 .func = bpf_probe_write_user,
109 .gpl_only = true,
110 .ret_type = RET_INTEGER,
111 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
112 .arg2_type = ARG_PTR_TO_MEM,
113 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
114};
115
116static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
117{
118 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 current->comm, task_pid_nr(current));
120
121 return &bpf_probe_write_user_proto;
122}
123
9c959c86
AS
124/*
125 * limited trace_printk()
8d3b7dce 126 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
9c959c86 127 */
f3694e00
DB
128BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 u64, arg2, u64, arg3)
9c959c86 130{
8d3b7dce 131 bool str_seen = false;
9c959c86
AS
132 int mod[3] = {};
133 int fmt_cnt = 0;
8d3b7dce
AS
134 u64 unsafe_addr;
135 char buf[64];
9c959c86
AS
136 int i;
137
138 /*
139 * bpf_check()->check_func_arg()->check_stack_boundary()
140 * guarantees that fmt points to bpf program stack,
141 * fmt_size bytes of it were initialized and fmt_size > 0
142 */
143 if (fmt[--fmt_size] != 0)
144 return -EINVAL;
145
146 /* check format string for allowed specifiers */
147 for (i = 0; i < fmt_size; i++) {
148 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
149 return -EINVAL;
150
151 if (fmt[i] != '%')
152 continue;
153
154 if (fmt_cnt >= 3)
155 return -EINVAL;
156
157 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
158 i++;
159 if (fmt[i] == 'l') {
160 mod[fmt_cnt]++;
161 i++;
8d3b7dce 162 } else if (fmt[i] == 'p' || fmt[i] == 's') {
9c959c86
AS
163 mod[fmt_cnt]++;
164 i++;
165 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
166 return -EINVAL;
167 fmt_cnt++;
8d3b7dce
AS
168 if (fmt[i - 1] == 's') {
169 if (str_seen)
170 /* allow only one '%s' per fmt string */
171 return -EINVAL;
172 str_seen = true;
173
174 switch (fmt_cnt) {
175 case 1:
f3694e00
DB
176 unsafe_addr = arg1;
177 arg1 = (long) buf;
8d3b7dce
AS
178 break;
179 case 2:
f3694e00
DB
180 unsafe_addr = arg2;
181 arg2 = (long) buf;
8d3b7dce
AS
182 break;
183 case 3:
f3694e00
DB
184 unsafe_addr = arg3;
185 arg3 = (long) buf;
8d3b7dce
AS
186 break;
187 }
188 buf[0] = 0;
189 strncpy_from_unsafe(buf,
190 (void *) (long) unsafe_addr,
191 sizeof(buf));
192 }
9c959c86
AS
193 continue;
194 }
195
196 if (fmt[i] == 'l') {
197 mod[fmt_cnt]++;
198 i++;
199 }
200
201 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
202 return -EINVAL;
203 fmt_cnt++;
204 }
205
206 return __trace_printk(1/* fake ip will not be printed */, fmt,
f3694e00
DB
207 mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
208 mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
209 mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
9c959c86
AS
210}
211
212static const struct bpf_func_proto bpf_trace_printk_proto = {
213 .func = bpf_trace_printk,
214 .gpl_only = true,
215 .ret_type = RET_INTEGER,
39f19ebb
AS
216 .arg1_type = ARG_PTR_TO_MEM,
217 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
218};
219
0756ea3e
AS
220const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
221{
222 /*
223 * this program might be calling bpf_trace_printk,
224 * so allocate per-cpu printk buffers
225 */
226 trace_printk_init_buffers();
227
228 return &bpf_trace_printk_proto;
229}
230
f3694e00 231BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
35578d79 232{
35578d79 233 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
234 unsigned int cpu = smp_processor_id();
235 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 236 struct bpf_event_entry *ee;
f91840a3
AS
237 u64 value = 0;
238 int err;
35578d79 239
6816a7ff
DB
240 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
241 return -EINVAL;
242 if (index == BPF_F_CURRENT_CPU)
243 index = cpu;
35578d79
KX
244 if (unlikely(index >= array->map.max_entries))
245 return -E2BIG;
246
3b1efb19 247 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 248 if (!ee)
35578d79
KX
249 return -ENOENT;
250
f91840a3 251 err = perf_event_read_local(ee->event, &value);
35578d79 252 /*
f91840a3
AS
253 * this api is ugly since we miss [-22..-2] range of valid
254 * counter values, but that's uapi
35578d79 255 */
f91840a3
AS
256 if (err)
257 return err;
258 return value;
35578d79
KX
259}
260
62544ce8 261static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 262 .func = bpf_perf_event_read,
1075ef59 263 .gpl_only = true,
35578d79
KX
264 .ret_type = RET_INTEGER,
265 .arg1_type = ARG_CONST_MAP_PTR,
266 .arg2_type = ARG_ANYTHING,
267};
268
20b9d7ac
DB
269static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
270
8e7a3920
DB
271static __always_inline u64
272__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
273 u64 flags, struct perf_raw_record *raw)
a43eec30 274{
a43eec30 275 struct bpf_array *array = container_of(map, struct bpf_array, map);
20b9d7ac 276 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
d7931330 277 unsigned int cpu = smp_processor_id();
1e33759c 278 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 279 struct bpf_event_entry *ee;
a43eec30 280 struct perf_event *event;
a43eec30 281
1e33759c 282 if (index == BPF_F_CURRENT_CPU)
d7931330 283 index = cpu;
a43eec30
AS
284 if (unlikely(index >= array->map.max_entries))
285 return -E2BIG;
286
3b1efb19 287 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 288 if (!ee)
a43eec30
AS
289 return -ENOENT;
290
3b1efb19 291 event = ee->event;
a43eec30
AS
292 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
293 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
294 return -EINVAL;
295
d7931330 296 if (unlikely(event->oncpu != cpu))
a43eec30
AS
297 return -EOPNOTSUPP;
298
20b9d7ac
DB
299 perf_sample_data_init(sd, 0, 0);
300 sd->raw = raw;
301 perf_event_output(event, sd, regs);
a43eec30
AS
302 return 0;
303}
304
f3694e00
DB
305BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
306 u64, flags, void *, data, u64, size)
8e7a3920 307{
8e7a3920
DB
308 struct perf_raw_record raw = {
309 .frag = {
310 .size = size,
311 .data = data,
312 },
313 };
314
315 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
316 return -EINVAL;
317
318 return __bpf_perf_event_output(regs, map, flags, &raw);
319}
320
a43eec30
AS
321static const struct bpf_func_proto bpf_perf_event_output_proto = {
322 .func = bpf_perf_event_output,
1075ef59 323 .gpl_only = true,
a43eec30
AS
324 .ret_type = RET_INTEGER,
325 .arg1_type = ARG_PTR_TO_CTX,
326 .arg2_type = ARG_CONST_MAP_PTR,
327 .arg3_type = ARG_ANYTHING,
39f19ebb
AS
328 .arg4_type = ARG_PTR_TO_MEM,
329 .arg5_type = ARG_CONST_SIZE,
a43eec30
AS
330};
331
bd570ff9
DB
332static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
333
555c8a86
DB
334u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
335 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9
DB
336{
337 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
555c8a86
DB
338 struct perf_raw_frag frag = {
339 .copy = ctx_copy,
340 .size = ctx_size,
341 .data = ctx,
342 };
343 struct perf_raw_record raw = {
344 .frag = {
183fc153
AM
345 {
346 .next = ctx_size ? &frag : NULL,
347 },
555c8a86
DB
348 .size = meta_size,
349 .data = meta,
350 },
351 };
bd570ff9
DB
352
353 perf_fetch_caller_regs(regs);
354
555c8a86 355 return __bpf_perf_event_output(regs, map, flags, &raw);
bd570ff9
DB
356}
357
f3694e00 358BPF_CALL_0(bpf_get_current_task)
606274c5
AS
359{
360 return (long) current;
361}
362
363static const struct bpf_func_proto bpf_get_current_task_proto = {
364 .func = bpf_get_current_task,
365 .gpl_only = true,
366 .ret_type = RET_INTEGER,
367};
368
f3694e00 369BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 370{
60d20f91
SD
371 struct bpf_array *array = container_of(map, struct bpf_array, map);
372 struct cgroup *cgrp;
60d20f91
SD
373
374 if (unlikely(in_interrupt()))
375 return -EINVAL;
60d20f91
SD
376 if (unlikely(idx >= array->map.max_entries))
377 return -E2BIG;
378
379 cgrp = READ_ONCE(array->ptrs[idx]);
380 if (unlikely(!cgrp))
381 return -EAGAIN;
382
383 return task_under_cgroup_hierarchy(current, cgrp);
384}
385
386static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
387 .func = bpf_current_task_under_cgroup,
388 .gpl_only = false,
389 .ret_type = RET_INTEGER,
390 .arg1_type = ARG_CONST_MAP_PTR,
391 .arg2_type = ARG_ANYTHING,
392};
393
a5e8c070
GB
394BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
395 const void *, unsafe_ptr)
396{
397 int ret;
398
399 /*
400 * The strncpy_from_unsafe() call will likely not fill the entire
401 * buffer, but that's okay in this circumstance as we're probing
402 * arbitrary memory anyway similar to bpf_probe_read() and might
403 * as well probe the stack. Thus, memory is explicitly cleared
404 * only in error case, so that improper users ignoring return
405 * code altogether don't copy garbage; otherwise length of string
406 * is returned that can be used for bpf_perf_event_output() et al.
407 */
408 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
409 if (unlikely(ret < 0))
410 memset(dst, 0, size);
411
412 return ret;
413}
414
415static const struct bpf_func_proto bpf_probe_read_str_proto = {
416 .func = bpf_probe_read_str,
417 .gpl_only = true,
418 .ret_type = RET_INTEGER,
419 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
420 .arg2_type = ARG_CONST_SIZE,
421 .arg3_type = ARG_ANYTHING,
422};
423
9fd82b61 424static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
2541517c
AS
425{
426 switch (func_id) {
427 case BPF_FUNC_map_lookup_elem:
428 return &bpf_map_lookup_elem_proto;
429 case BPF_FUNC_map_update_elem:
430 return &bpf_map_update_elem_proto;
431 case BPF_FUNC_map_delete_elem:
432 return &bpf_map_delete_elem_proto;
433 case BPF_FUNC_probe_read:
434 return &bpf_probe_read_proto;
d9847d31
AS
435 case BPF_FUNC_ktime_get_ns:
436 return &bpf_ktime_get_ns_proto;
04fd61ab
AS
437 case BPF_FUNC_tail_call:
438 return &bpf_tail_call_proto;
ffeedafb
AS
439 case BPF_FUNC_get_current_pid_tgid:
440 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
441 case BPF_FUNC_get_current_task:
442 return &bpf_get_current_task_proto;
ffeedafb
AS
443 case BPF_FUNC_get_current_uid_gid:
444 return &bpf_get_current_uid_gid_proto;
445 case BPF_FUNC_get_current_comm:
446 return &bpf_get_current_comm_proto;
9c959c86 447 case BPF_FUNC_trace_printk:
0756ea3e 448 return bpf_get_trace_printk_proto();
ab1973d3
AS
449 case BPF_FUNC_get_smp_processor_id:
450 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
451 case BPF_FUNC_get_numa_node_id:
452 return &bpf_get_numa_node_id_proto;
35578d79
KX
453 case BPF_FUNC_perf_event_read:
454 return &bpf_perf_event_read_proto;
96ae5227
SD
455 case BPF_FUNC_probe_write_user:
456 return bpf_get_probe_write_proto();
60d20f91
SD
457 case BPF_FUNC_current_task_under_cgroup:
458 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
459 case BPF_FUNC_get_prandom_u32:
460 return &bpf_get_prandom_u32_proto;
a5e8c070
GB
461 case BPF_FUNC_probe_read_str:
462 return &bpf_probe_read_str_proto;
9fd82b61
AS
463 default:
464 return NULL;
465 }
466}
467
468static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
469{
470 switch (func_id) {
a43eec30
AS
471 case BPF_FUNC_perf_event_output:
472 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
473 case BPF_FUNC_get_stackid:
474 return &bpf_get_stackid_proto;
2541517c 475 default:
9fd82b61 476 return tracing_func_proto(func_id);
2541517c
AS
477 }
478}
479
480/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7
AS
481static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
482 enum bpf_reg_type *reg_type)
2541517c 483{
2541517c
AS
484 if (off < 0 || off >= sizeof(struct pt_regs))
485 return false;
2541517c
AS
486 if (type != BPF_READ)
487 return false;
2541517c
AS
488 if (off % size != 0)
489 return false;
2d071c64
DB
490 /*
491 * Assertion for 32 bit to make sure last 8 byte access
492 * (BPF_DW) to the last 4 byte member is disallowed.
493 */
494 if (off + size > sizeof(struct pt_regs))
495 return false;
496
2541517c
AS
497 return true;
498}
499
be9370a7 500const struct bpf_verifier_ops kprobe_prog_ops = {
2541517c
AS
501 .get_func_proto = kprobe_prog_func_proto,
502 .is_valid_access = kprobe_prog_is_valid_access,
503};
504
f3694e00
DB
505BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
506 u64, flags, void *, data, u64, size)
9940d67c 507{
f3694e00
DB
508 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
509
9940d67c
AS
510 /*
511 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
512 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 513 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 514 */
f3694e00 515 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
516}
517
518static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
519 .func = bpf_perf_event_output_tp,
520 .gpl_only = true,
521 .ret_type = RET_INTEGER,
522 .arg1_type = ARG_PTR_TO_CTX,
523 .arg2_type = ARG_CONST_MAP_PTR,
524 .arg3_type = ARG_ANYTHING,
39f19ebb
AS
525 .arg4_type = ARG_PTR_TO_MEM,
526 .arg5_type = ARG_CONST_SIZE,
9940d67c
AS
527};
528
f3694e00
DB
529BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
530 u64, flags)
9940d67c 531{
f3694e00 532 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 533
f3694e00
DB
534 /*
535 * Same comment as in bpf_perf_event_output_tp(), only that this time
536 * the other helper's function body cannot be inlined due to being
537 * external, thus we need to call raw helper function.
538 */
539 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
540 flags, 0, 0);
9940d67c
AS
541}
542
543static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
544 .func = bpf_get_stackid_tp,
545 .gpl_only = true,
546 .ret_type = RET_INTEGER,
547 .arg1_type = ARG_PTR_TO_CTX,
548 .arg2_type = ARG_CONST_MAP_PTR,
549 .arg3_type = ARG_ANYTHING,
550};
551
9fd82b61
AS
552static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
553{
554 switch (func_id) {
555 case BPF_FUNC_perf_event_output:
9940d67c 556 return &bpf_perf_event_output_proto_tp;
9fd82b61 557 case BPF_FUNC_get_stackid:
9940d67c 558 return &bpf_get_stackid_proto_tp;
9fd82b61
AS
559 default:
560 return tracing_func_proto(func_id);
561 }
562}
563
19de99f7
AS
564static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
565 enum bpf_reg_type *reg_type)
9fd82b61
AS
566{
567 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
568 return false;
569 if (type != BPF_READ)
570 return false;
571 if (off % size != 0)
572 return false;
2d071c64
DB
573
574 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
9fd82b61
AS
575 return true;
576}
577
be9370a7 578const struct bpf_verifier_ops tracepoint_prog_ops = {
9fd82b61
AS
579 .get_func_proto = tp_prog_func_proto,
580 .is_valid_access = tp_prog_is_valid_access,
581};
582
0515e599
AS
583static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
584 enum bpf_reg_type *reg_type)
585{
586 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
587 return false;
588 if (type != BPF_READ)
589 return false;
590 if (off % size != 0)
591 return false;
592 if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
593 if (size != sizeof(u64))
594 return false;
595 } else {
596 if (size != sizeof(long))
597 return false;
598 }
599 return true;
600}
601
6b8cc1d1
DB
602static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
603 const struct bpf_insn *si,
0515e599
AS
604 struct bpf_insn *insn_buf,
605 struct bpf_prog *prog)
606{
607 struct bpf_insn *insn = insn_buf;
608
6b8cc1d1 609 switch (si->off) {
0515e599
AS
610 case offsetof(struct bpf_perf_event_data, sample_period):
611 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
f035a515
DB
612
613 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 614 data), si->dst_reg, si->src_reg,
0515e599 615 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 616 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
0515e599
AS
617 offsetof(struct perf_sample_data, period));
618 break;
619 default:
f035a515 620 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 621 regs), si->dst_reg, si->src_reg,
0515e599 622 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
623 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
624 si->off);
0515e599
AS
625 break;
626 }
627
628 return insn - insn_buf;
629}
630
be9370a7 631const struct bpf_verifier_ops perf_event_prog_ops = {
0515e599
AS
632 .get_func_proto = tp_prog_func_proto,
633 .is_valid_access = pe_prog_is_valid_access,
634 .convert_ctx_access = pe_prog_convert_ctx_access,
635};