]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/perf/util/bpf-event.c
x86/fpu/xstate: Restore supervisor states for signal return
[thirdparty/linux.git] / tools / perf / util / bpf-event.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <bpf/bpf.h>
5 #include <bpf/btf.h>
6 #include <bpf/libbpf.h>
7 #include <linux/btf.h>
8 #include <linux/err.h>
9 #include "bpf-event.h"
10 #include "debug.h"
11 #include "dso.h"
12 #include "symbol.h"
13 #include "machine.h"
14 #include "env.h"
15 #include "session.h"
16 #include "map.h"
17 #include "evlist.h"
18 #include "record.h"
19 #include "util/synthetic-events.h"
20
21 #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
22
23 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
24 {
25 int ret = 0;
26 size_t i;
27
28 for (i = 0; i < len; i++)
29 ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
30 return ret;
31 }
32
33 static int machine__process_bpf_event_load(struct machine *machine,
34 union perf_event *event,
35 struct perf_sample *sample __maybe_unused)
36 {
37 struct bpf_prog_info_linear *info_linear;
38 struct bpf_prog_info_node *info_node;
39 struct perf_env *env = machine->env;
40 int id = event->bpf.id;
41 unsigned int i;
42
43 /* perf-record, no need to handle bpf-event */
44 if (env == NULL)
45 return 0;
46
47 info_node = perf_env__find_bpf_prog_info(env, id);
48 if (!info_node)
49 return 0;
50 info_linear = info_node->info_linear;
51
52 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
53 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
54 u64 addr = addrs[i];
55 struct map *map = maps__find(&machine->kmaps, addr);
56
57 if (map) {
58 map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
59 map->dso->bpf_prog.id = id;
60 map->dso->bpf_prog.sub_id = i;
61 map->dso->bpf_prog.env = env;
62 }
63 }
64 return 0;
65 }
66
67 int machine__process_bpf(struct machine *machine, union perf_event *event,
68 struct perf_sample *sample)
69 {
70 if (dump_trace)
71 perf_event__fprintf_bpf(event, stdout);
72
73 switch (event->bpf.type) {
74 case PERF_BPF_EVENT_PROG_LOAD:
75 return machine__process_bpf_event_load(machine, event, sample);
76
77 case PERF_BPF_EVENT_PROG_UNLOAD:
78 /*
79 * Do not free bpf_prog_info and btf of the program here,
80 * as annotation still need them. They will be freed at
81 * the end of the session.
82 */
83 break;
84 default:
85 pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
86 break;
87 }
88 return 0;
89 }
90
91 static int perf_env__fetch_btf(struct perf_env *env,
92 u32 btf_id,
93 struct btf *btf)
94 {
95 struct btf_node *node;
96 u32 data_size;
97 const void *data;
98
99 data = btf__get_raw_data(btf, &data_size);
100
101 node = malloc(data_size + sizeof(struct btf_node));
102 if (!node)
103 return -1;
104
105 node->id = btf_id;
106 node->data_size = data_size;
107 memcpy(node->data, data, data_size);
108
109 perf_env__insert_btf(env, node);
110 return 0;
111 }
112
113 static int synthesize_bpf_prog_name(char *buf, int size,
114 struct bpf_prog_info *info,
115 struct btf *btf,
116 u32 sub_id)
117 {
118 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
119 void *func_infos = (void *)(uintptr_t)(info->func_info);
120 u32 sub_prog_cnt = info->nr_jited_ksyms;
121 const struct bpf_func_info *finfo;
122 const char *short_name = NULL;
123 const struct btf_type *t;
124 int name_len;
125
126 name_len = snprintf(buf, size, "bpf_prog_");
127 name_len += snprintf_hex(buf + name_len, size - name_len,
128 prog_tags[sub_id], BPF_TAG_SIZE);
129 if (btf) {
130 finfo = func_infos + sub_id * info->func_info_rec_size;
131 t = btf__type_by_id(btf, finfo->type_id);
132 short_name = btf__name_by_offset(btf, t->name_off);
133 } else if (sub_id == 0 && sub_prog_cnt == 1) {
134 /* no subprog */
135 if (info->name[0])
136 short_name = info->name;
137 } else
138 short_name = "F";
139 if (short_name)
140 name_len += snprintf(buf + name_len, size - name_len,
141 "_%s", short_name);
142 return name_len;
143 }
144
145 /*
146 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
147 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
148 * one PERF_RECORD_KSYMBOL is generated for each sub program.
149 *
150 * Returns:
151 * 0 for success;
152 * -1 for failures;
153 * -2 for lack of kernel support.
154 */
155 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
156 perf_event__handler_t process,
157 struct machine *machine,
158 int fd,
159 union perf_event *event,
160 struct record_opts *opts)
161 {
162 struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
163 struct perf_record_bpf_event *bpf_event = &event->bpf;
164 struct bpf_prog_info_linear *info_linear;
165 struct perf_tool *tool = session->tool;
166 struct bpf_prog_info_node *info_node;
167 struct bpf_prog_info *info;
168 struct btf *btf = NULL;
169 struct perf_env *env;
170 u32 sub_prog_cnt, i;
171 int err = 0;
172 u64 arrays;
173
174 /*
175 * for perf-record and perf-report use header.env;
176 * otherwise, use global perf_env.
177 */
178 env = session->data ? &session->header.env : &perf_env;
179
180 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
181 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
182 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
183 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
184 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
185 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
186 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
187
188 info_linear = bpf_program__get_prog_info_linear(fd, arrays);
189 if (IS_ERR_OR_NULL(info_linear)) {
190 info_linear = NULL;
191 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
192 return -1;
193 }
194
195 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
196 pr_debug("%s: the kernel is too old, aborting\n", __func__);
197 return -2;
198 }
199
200 info = &info_linear->info;
201
202 /* number of ksyms, func_lengths, and tags should match */
203 sub_prog_cnt = info->nr_jited_ksyms;
204 if (sub_prog_cnt != info->nr_prog_tags ||
205 sub_prog_cnt != info->nr_jited_func_lens)
206 return -1;
207
208 /* check BTF func info support */
209 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
210 /* btf func info number should be same as sub_prog_cnt */
211 if (sub_prog_cnt != info->nr_func_info) {
212 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
213 err = -1;
214 goto out;
215 }
216 if (btf__get_from_id(info->btf_id, &btf)) {
217 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
218 err = -1;
219 btf = NULL;
220 goto out;
221 }
222 perf_env__fetch_btf(env, info->btf_id, btf);
223 }
224
225 /* Synthesize PERF_RECORD_KSYMBOL */
226 for (i = 0; i < sub_prog_cnt; i++) {
227 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
228 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
229 int name_len;
230
231 *ksymbol_event = (struct perf_record_ksymbol) {
232 .header = {
233 .type = PERF_RECORD_KSYMBOL,
234 .size = offsetof(struct perf_record_ksymbol, name),
235 },
236 .addr = prog_addrs[i],
237 .len = prog_lens[i],
238 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
239 .flags = 0,
240 };
241
242 name_len = synthesize_bpf_prog_name(ksymbol_event->name,
243 KSYM_NAME_LEN, info, btf, i);
244 ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
245 sizeof(u64));
246
247 memset((void *)event + event->header.size, 0, machine->id_hdr_size);
248 event->header.size += machine->id_hdr_size;
249 err = perf_tool__process_synth_event(tool, event,
250 machine, process);
251 }
252
253 if (!opts->no_bpf_event) {
254 /* Synthesize PERF_RECORD_BPF_EVENT */
255 *bpf_event = (struct perf_record_bpf_event) {
256 .header = {
257 .type = PERF_RECORD_BPF_EVENT,
258 .size = sizeof(struct perf_record_bpf_event),
259 },
260 .type = PERF_BPF_EVENT_PROG_LOAD,
261 .flags = 0,
262 .id = info->id,
263 };
264 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
265 memset((void *)event + event->header.size, 0, machine->id_hdr_size);
266 event->header.size += machine->id_hdr_size;
267
268 /* save bpf_prog_info to env */
269 info_node = malloc(sizeof(struct bpf_prog_info_node));
270 if (!info_node) {
271 err = -1;
272 goto out;
273 }
274
275 info_node->info_linear = info_linear;
276 perf_env__insert_bpf_prog_info(env, info_node);
277 info_linear = NULL;
278
279 /*
280 * process after saving bpf_prog_info to env, so that
281 * required information is ready for look up
282 */
283 err = perf_tool__process_synth_event(tool, event,
284 machine, process);
285 }
286
287 out:
288 free(info_linear);
289 free(btf);
290 return err ? -1 : 0;
291 }
292
293 int perf_event__synthesize_bpf_events(struct perf_session *session,
294 perf_event__handler_t process,
295 struct machine *machine,
296 struct record_opts *opts)
297 {
298 union perf_event *event;
299 __u32 id = 0;
300 int err;
301 int fd;
302
303 event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
304 if (!event)
305 return -1;
306 while (true) {
307 err = bpf_prog_get_next_id(id, &id);
308 if (err) {
309 if (errno == ENOENT) {
310 err = 0;
311 break;
312 }
313 pr_debug("%s: can't get next program: %s%s\n",
314 __func__, strerror(errno),
315 errno == EINVAL ? " -- kernel too old?" : "");
316 /* don't report error on old kernel or EPERM */
317 err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
318 break;
319 }
320 fd = bpf_prog_get_fd_by_id(id);
321 if (fd < 0) {
322 pr_debug("%s: failed to get fd for prog_id %u\n",
323 __func__, id);
324 continue;
325 }
326
327 err = perf_event__synthesize_one_bpf_prog(session, process,
328 machine, fd,
329 event, opts);
330 close(fd);
331 if (err) {
332 /* do not return error for old kernel */
333 if (err == -2)
334 err = 0;
335 break;
336 }
337 }
338 free(event);
339 return err;
340 }
341
342 static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
343 {
344 struct bpf_prog_info_linear *info_linear;
345 struct bpf_prog_info_node *info_node;
346 struct btf *btf = NULL;
347 u64 arrays;
348 u32 btf_id;
349 int fd;
350
351 fd = bpf_prog_get_fd_by_id(id);
352 if (fd < 0)
353 return;
354
355 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
356 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
357 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
358 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
359 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
360 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
361 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
362
363 info_linear = bpf_program__get_prog_info_linear(fd, arrays);
364 if (IS_ERR_OR_NULL(info_linear)) {
365 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
366 goto out;
367 }
368
369 btf_id = info_linear->info.btf_id;
370
371 info_node = malloc(sizeof(struct bpf_prog_info_node));
372 if (info_node) {
373 info_node->info_linear = info_linear;
374 perf_env__insert_bpf_prog_info(env, info_node);
375 } else
376 free(info_linear);
377
378 if (btf_id == 0)
379 goto out;
380
381 if (btf__get_from_id(btf_id, &btf)) {
382 pr_debug("%s: failed to get BTF of id %u, aborting\n",
383 __func__, btf_id);
384 goto out;
385 }
386 perf_env__fetch_btf(env, btf_id, btf);
387
388 out:
389 free(btf);
390 close(fd);
391 }
392
393 static int bpf_event__sb_cb(union perf_event *event, void *data)
394 {
395 struct perf_env *env = data;
396
397 if (event->header.type != PERF_RECORD_BPF_EVENT)
398 return -1;
399
400 switch (event->bpf.type) {
401 case PERF_BPF_EVENT_PROG_LOAD:
402 perf_env__add_bpf_info(env, event->bpf.id);
403
404 case PERF_BPF_EVENT_PROG_UNLOAD:
405 /*
406 * Do not free bpf_prog_info and btf of the program here,
407 * as annotation still need them. They will be freed at
408 * the end of the session.
409 */
410 break;
411 default:
412 pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
413 break;
414 }
415
416 return 0;
417 }
418
419 int bpf_event__add_sb_event(struct evlist **evlist,
420 struct perf_env *env)
421 {
422 struct perf_event_attr attr = {
423 .type = PERF_TYPE_SOFTWARE,
424 .config = PERF_COUNT_SW_DUMMY,
425 .sample_id_all = 1,
426 .watermark = 1,
427 .bpf_event = 1,
428 .size = sizeof(attr), /* to capture ABI version */
429 };
430
431 /*
432 * Older gcc versions don't support designated initializers, like above,
433 * for unnamed union members, such as the following:
434 */
435 attr.wakeup_watermark = 1;
436
437 return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
438 }
439
440 void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
441 struct perf_env *env,
442 FILE *fp)
443 {
444 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
445 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
446 char name[KSYM_NAME_LEN];
447 struct btf *btf = NULL;
448 u32 sub_prog_cnt, i;
449
450 sub_prog_cnt = info->nr_jited_ksyms;
451 if (sub_prog_cnt != info->nr_prog_tags ||
452 sub_prog_cnt != info->nr_jited_func_lens)
453 return;
454
455 if (info->btf_id) {
456 struct btf_node *node;
457
458 node = perf_env__find_btf(env, info->btf_id);
459 if (node)
460 btf = btf__new((__u8 *)(node->data),
461 node->data_size);
462 }
463
464 if (sub_prog_cnt == 1) {
465 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
466 fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
467 info->id, name, prog_addrs[0], prog_lens[0]);
468 return;
469 }
470
471 fprintf(fp, "# bpf_prog_info %u:\n", info->id);
472 for (i = 0; i < sub_prog_cnt; i++) {
473 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
474
475 fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
476 i, name, prog_addrs[i], prog_lens[i]);
477 }
478 }