1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
5 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
9 * Data type definitions, declarations, prototypes.
11 * Started by: Thomas Gleixner and Ingo Molnar
13 * For licencing details see kernel-base/COPYING
15 #ifndef _UAPI_LINUX_PERF_EVENT_H
16 #define _UAPI_LINUX_PERF_EVENT_H
18 #include <linux/types.h>
19 #include <linux/ioctl.h>
20 #include <asm/byteorder.h>
23 * User-space ABI bits:
30 PERF_TYPE_HARDWARE
= 0,
31 PERF_TYPE_SOFTWARE
= 1,
32 PERF_TYPE_TRACEPOINT
= 2,
33 PERF_TYPE_HW_CACHE
= 3,
35 PERF_TYPE_BREAKPOINT
= 5,
37 PERF_TYPE_MAX
, /* non-ABI */
41 * Generalized performance event event_id types, used by the
42 * attr.event_id parameter of the sys_perf_event_open()
47 * Common hardware events, generalized by the kernel:
49 PERF_COUNT_HW_CPU_CYCLES
= 0,
50 PERF_COUNT_HW_INSTRUCTIONS
= 1,
51 PERF_COUNT_HW_CACHE_REFERENCES
= 2,
52 PERF_COUNT_HW_CACHE_MISSES
= 3,
53 PERF_COUNT_HW_BRANCH_INSTRUCTIONS
= 4,
54 PERF_COUNT_HW_BRANCH_MISSES
= 5,
55 PERF_COUNT_HW_BUS_CYCLES
= 6,
56 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
= 7,
57 PERF_COUNT_HW_STALLED_CYCLES_BACKEND
= 8,
58 PERF_COUNT_HW_REF_CPU_CYCLES
= 9,
60 PERF_COUNT_HW_MAX
, /* non-ABI */
64 * Generalized hardware cache events:
66 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
67 * { read, write, prefetch } x
68 * { accesses, misses }
70 enum perf_hw_cache_id
{
71 PERF_COUNT_HW_CACHE_L1D
= 0,
72 PERF_COUNT_HW_CACHE_L1I
= 1,
73 PERF_COUNT_HW_CACHE_LL
= 2,
74 PERF_COUNT_HW_CACHE_DTLB
= 3,
75 PERF_COUNT_HW_CACHE_ITLB
= 4,
76 PERF_COUNT_HW_CACHE_BPU
= 5,
77 PERF_COUNT_HW_CACHE_NODE
= 6,
79 PERF_COUNT_HW_CACHE_MAX
, /* non-ABI */
82 enum perf_hw_cache_op_id
{
83 PERF_COUNT_HW_CACHE_OP_READ
= 0,
84 PERF_COUNT_HW_CACHE_OP_WRITE
= 1,
85 PERF_COUNT_HW_CACHE_OP_PREFETCH
= 2,
87 PERF_COUNT_HW_CACHE_OP_MAX
, /* non-ABI */
90 enum perf_hw_cache_op_result_id
{
91 PERF_COUNT_HW_CACHE_RESULT_ACCESS
= 0,
92 PERF_COUNT_HW_CACHE_RESULT_MISS
= 1,
94 PERF_COUNT_HW_CACHE_RESULT_MAX
, /* non-ABI */
98 * Special "software" events provided by the kernel, even if the hardware
99 * does not support performance events. These events measure various
100 * physical and sw events of the kernel (and allow the profiling of them as
104 PERF_COUNT_SW_CPU_CLOCK
= 0,
105 PERF_COUNT_SW_TASK_CLOCK
= 1,
106 PERF_COUNT_SW_PAGE_FAULTS
= 2,
107 PERF_COUNT_SW_CONTEXT_SWITCHES
= 3,
108 PERF_COUNT_SW_CPU_MIGRATIONS
= 4,
109 PERF_COUNT_SW_PAGE_FAULTS_MIN
= 5,
110 PERF_COUNT_SW_PAGE_FAULTS_MAJ
= 6,
111 PERF_COUNT_SW_ALIGNMENT_FAULTS
= 7,
112 PERF_COUNT_SW_EMULATION_FAULTS
= 8,
113 PERF_COUNT_SW_DUMMY
= 9,
114 PERF_COUNT_SW_BPF_OUTPUT
= 10,
116 PERF_COUNT_SW_MAX
, /* non-ABI */
120 * Bits that can be set in attr.sample_type to request information
121 * in the overflow packets.
123 enum perf_event_sample_format
{
124 PERF_SAMPLE_IP
= 1U << 0,
125 PERF_SAMPLE_TID
= 1U << 1,
126 PERF_SAMPLE_TIME
= 1U << 2,
127 PERF_SAMPLE_ADDR
= 1U << 3,
128 PERF_SAMPLE_READ
= 1U << 4,
129 PERF_SAMPLE_CALLCHAIN
= 1U << 5,
130 PERF_SAMPLE_ID
= 1U << 6,
131 PERF_SAMPLE_CPU
= 1U << 7,
132 PERF_SAMPLE_PERIOD
= 1U << 8,
133 PERF_SAMPLE_STREAM_ID
= 1U << 9,
134 PERF_SAMPLE_RAW
= 1U << 10,
135 PERF_SAMPLE_BRANCH_STACK
= 1U << 11,
136 PERF_SAMPLE_REGS_USER
= 1U << 12,
137 PERF_SAMPLE_STACK_USER
= 1U << 13,
138 PERF_SAMPLE_WEIGHT
= 1U << 14,
139 PERF_SAMPLE_DATA_SRC
= 1U << 15,
140 PERF_SAMPLE_IDENTIFIER
= 1U << 16,
141 PERF_SAMPLE_TRANSACTION
= 1U << 17,
142 PERF_SAMPLE_REGS_INTR
= 1U << 18,
143 PERF_SAMPLE_PHYS_ADDR
= 1U << 19,
145 PERF_SAMPLE_MAX
= 1U << 20, /* non-ABI */
147 __PERF_SAMPLE_CALLCHAIN_EARLY
= 1ULL << 63, /* non-ABI; internal use */
151 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
153 * If the user does not pass priv level information via branch_sample_type,
154 * the kernel uses the event's priv level. Branch and event priv levels do
155 * not have to match. Branch priv level is checked for permissions.
157 * The branch types can be combined, however BRANCH_ANY covers all types
158 * of branches and therefore it supersedes all the other types.
160 enum perf_branch_sample_type_shift
{
161 PERF_SAMPLE_BRANCH_USER_SHIFT
= 0, /* user branches */
162 PERF_SAMPLE_BRANCH_KERNEL_SHIFT
= 1, /* kernel branches */
163 PERF_SAMPLE_BRANCH_HV_SHIFT
= 2, /* hypervisor branches */
165 PERF_SAMPLE_BRANCH_ANY_SHIFT
= 3, /* any branch types */
166 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
= 4, /* any call branch */
167 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
= 5, /* any return branch */
168 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
= 6, /* indirect calls */
169 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT
= 7, /* transaction aborts */
170 PERF_SAMPLE_BRANCH_IN_TX_SHIFT
= 8, /* in transaction */
171 PERF_SAMPLE_BRANCH_NO_TX_SHIFT
= 9, /* not in transaction */
172 PERF_SAMPLE_BRANCH_COND_SHIFT
= 10, /* conditional branches */
174 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT
= 11, /* call/ret stack */
175 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
= 12, /* indirect jumps */
176 PERF_SAMPLE_BRANCH_CALL_SHIFT
= 13, /* direct call */
178 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT
= 14, /* no flags */
179 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT
= 15, /* no cycles */
181 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT
= 16, /* save branch type */
183 PERF_SAMPLE_BRANCH_MAX_SHIFT
/* non-ABI */
186 enum perf_branch_sample_type
{
187 PERF_SAMPLE_BRANCH_USER
= 1U << PERF_SAMPLE_BRANCH_USER_SHIFT
,
188 PERF_SAMPLE_BRANCH_KERNEL
= 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT
,
189 PERF_SAMPLE_BRANCH_HV
= 1U << PERF_SAMPLE_BRANCH_HV_SHIFT
,
191 PERF_SAMPLE_BRANCH_ANY
= 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT
,
192 PERF_SAMPLE_BRANCH_ANY_CALL
= 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
,
193 PERF_SAMPLE_BRANCH_ANY_RETURN
= 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
,
194 PERF_SAMPLE_BRANCH_IND_CALL
= 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
,
195 PERF_SAMPLE_BRANCH_ABORT_TX
= 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT
,
196 PERF_SAMPLE_BRANCH_IN_TX
= 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT
,
197 PERF_SAMPLE_BRANCH_NO_TX
= 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT
,
198 PERF_SAMPLE_BRANCH_COND
= 1U << PERF_SAMPLE_BRANCH_COND_SHIFT
,
200 PERF_SAMPLE_BRANCH_CALL_STACK
= 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT
,
201 PERF_SAMPLE_BRANCH_IND_JUMP
= 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
,
202 PERF_SAMPLE_BRANCH_CALL
= 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT
,
204 PERF_SAMPLE_BRANCH_NO_FLAGS
= 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT
,
205 PERF_SAMPLE_BRANCH_NO_CYCLES
= 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT
,
207 PERF_SAMPLE_BRANCH_TYPE_SAVE
=
208 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT
,
210 PERF_SAMPLE_BRANCH_MAX
= 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT
,
214 * Common flow change classification
217 PERF_BR_UNKNOWN
= 0, /* unknown */
218 PERF_BR_COND
= 1, /* conditional */
219 PERF_BR_UNCOND
= 2, /* unconditional */
220 PERF_BR_IND
= 3, /* indirect */
221 PERF_BR_CALL
= 4, /* function call */
222 PERF_BR_IND_CALL
= 5, /* indirect function call */
223 PERF_BR_RET
= 6, /* function return */
224 PERF_BR_SYSCALL
= 7, /* syscall */
225 PERF_BR_SYSRET
= 8, /* syscall return */
226 PERF_BR_COND_CALL
= 9, /* conditional function call */
227 PERF_BR_COND_RET
= 10, /* conditional function return */
231 #define PERF_SAMPLE_BRANCH_PLM_ALL \
232 (PERF_SAMPLE_BRANCH_USER|\
233 PERF_SAMPLE_BRANCH_KERNEL|\
234 PERF_SAMPLE_BRANCH_HV)
237 * Values to determine ABI of the registers dump.
239 enum perf_sample_regs_abi
{
240 PERF_SAMPLE_REGS_ABI_NONE
= 0,
241 PERF_SAMPLE_REGS_ABI_32
= 1,
242 PERF_SAMPLE_REGS_ABI_64
= 2,
246 * Values for the memory transaction event qualifier, mostly for
247 * abort events. Multiple bits can be set.
250 PERF_TXN_ELISION
= (1 << 0), /* From elision */
251 PERF_TXN_TRANSACTION
= (1 << 1), /* From transaction */
252 PERF_TXN_SYNC
= (1 << 2), /* Instruction is related */
253 PERF_TXN_ASYNC
= (1 << 3), /* Instruction not related */
254 PERF_TXN_RETRY
= (1 << 4), /* Retry possible */
255 PERF_TXN_CONFLICT
= (1 << 5), /* Conflict abort */
256 PERF_TXN_CAPACITY_WRITE
= (1 << 6), /* Capacity write abort */
257 PERF_TXN_CAPACITY_READ
= (1 << 7), /* Capacity read abort */
259 PERF_TXN_MAX
= (1 << 8), /* non-ABI */
261 /* bits 32..63 are reserved for the abort code */
263 PERF_TXN_ABORT_MASK
= (0xffffffffULL
<< 32),
264 PERF_TXN_ABORT_SHIFT
= 32,
268 * The format of the data returned by read() on a perf event fd,
269 * as specified by attr.read_format:
271 * struct read_format {
273 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
274 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
275 * { u64 id; } && PERF_FORMAT_ID
276 * } && !PERF_FORMAT_GROUP
279 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
280 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
282 * { u64 id; } && PERF_FORMAT_ID
284 * } && PERF_FORMAT_GROUP
287 enum perf_event_read_format
{
288 PERF_FORMAT_TOTAL_TIME_ENABLED
= 1U << 0,
289 PERF_FORMAT_TOTAL_TIME_RUNNING
= 1U << 1,
290 PERF_FORMAT_ID
= 1U << 2,
291 PERF_FORMAT_GROUP
= 1U << 3,
293 PERF_FORMAT_MAX
= 1U << 4, /* non-ABI */
296 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
297 #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
298 #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
299 #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
300 /* add: sample_stack_user */
301 #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
302 #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
305 * Hardware event_id to monitor via a performance monitoring event:
307 * @sample_max_stack: Max number of frame pointers in a callchain,
308 * should be < /proc/sys/kernel/perf_event_max_stack
310 struct perf_event_attr
{
313 * Major type: hardware/software/tracepoint/etc.
318 * Size of the attr structure, for fwd/bwd compat.
323 * Type specific configuration information.
335 __u64 disabled
: 1, /* off by default */
336 inherit
: 1, /* children inherit it */
337 pinned
: 1, /* must always be on PMU */
338 exclusive
: 1, /* only group on PMU */
339 exclude_user
: 1, /* don't count user */
340 exclude_kernel
: 1, /* ditto kernel */
341 exclude_hv
: 1, /* ditto hypervisor */
342 exclude_idle
: 1, /* don't count when idle */
343 mmap
: 1, /* include mmap data */
344 comm
: 1, /* include comm data */
345 freq
: 1, /* use freq, not period */
346 inherit_stat
: 1, /* per task counts */
347 enable_on_exec
: 1, /* next exec enables */
348 task
: 1, /* trace fork/exit */
349 watermark
: 1, /* wakeup_watermark */
353 * 0 - SAMPLE_IP can have arbitrary skid
354 * 1 - SAMPLE_IP must have constant skid
355 * 2 - SAMPLE_IP requested to have 0 skid
356 * 3 - SAMPLE_IP must have 0 skid
358 * See also PERF_RECORD_MISC_EXACT_IP
360 precise_ip
: 2, /* skid constraint */
361 mmap_data
: 1, /* non-exec mmap data */
362 sample_id_all
: 1, /* sample_type all events */
364 exclude_host
: 1, /* don't count in host */
365 exclude_guest
: 1, /* don't count in guest */
367 exclude_callchain_kernel
: 1, /* exclude kernel callchains */
368 exclude_callchain_user
: 1, /* exclude user callchains */
369 mmap2
: 1, /* include mmap with inode data */
370 comm_exec
: 1, /* flag comm events that are due to an exec */
371 use_clockid
: 1, /* use @clockid for time fields */
372 context_switch
: 1, /* context switch data */
373 write_backward
: 1, /* Write ring buffer from end to beginning */
374 namespaces
: 1, /* include namespaces data */
375 ksymbol
: 1, /* include ksymbol events */
379 __u32 wakeup_events
; /* wakeup every n events */
380 __u32 wakeup_watermark
; /* bytes before wakeup */
386 __u64 kprobe_func
; /* for perf_kprobe */
387 __u64 uprobe_path
; /* for perf_uprobe */
388 __u64 config1
; /* extension of config */
392 __u64 kprobe_addr
; /* when kprobe_func == NULL */
393 __u64 probe_offset
; /* for perf_[k,u]probe */
394 __u64 config2
; /* extension of config1 */
396 __u64 branch_sample_type
; /* enum perf_branch_sample_type */
399 * Defines set of user regs to dump on samples.
400 * See asm/perf_regs.h for details.
402 __u64 sample_regs_user
;
405 * Defines size of the user stack to dump on samples.
407 __u32 sample_stack_user
;
411 * Defines set of regs to dump for each sample
413 * - precise = 0: PMU interrupt
414 * - precise > 0: sampled instruction
416 * See asm/perf_regs.h for details.
418 __u64 sample_regs_intr
;
421 * Wakeup watermark for AUX area
424 __u16 sample_max_stack
;
425 __u16 __reserved_2
; /* align to __u64 */
429 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
430 * to query bpf programs attached to the same perf tracepoint
431 * as the given perf event.
433 struct perf_event_query_bpf
{
435 * The below ids array length
439 * Set by the kernel to indicate the number of
444 * User provided buffer to store program ids
450 * Ioctls that can be done on a perf event fd:
452 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
453 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
454 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
455 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
456 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
457 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
458 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
459 #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
460 #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
461 #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
462 #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
463 #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
465 enum perf_event_ioc_flags
{
466 PERF_IOC_FLAG_GROUP
= 1U << 0,
470 * Structure of the page that can be mapped via mmap
472 struct perf_event_mmap_page
{
473 __u32 version
; /* version number of this structure */
474 __u32 compat_version
; /* lowest version this is compat with */
477 * Bits needed to read the hw events in user-space.
479 * u32 seq, time_mult, time_shift, index, width;
480 * u64 count, enabled, running;
481 * u64 cyc, time_offset;
488 * enabled = pc->time_enabled;
489 * running = pc->time_running;
491 * if (pc->cap_usr_time && enabled != running) {
493 * time_offset = pc->time_offset;
494 * time_mult = pc->time_mult;
495 * time_shift = pc->time_shift;
499 * count = pc->offset;
500 * if (pc->cap_user_rdpmc && index) {
501 * width = pc->pmc_width;
502 * pmc = rdpmc(index - 1);
506 * } while (pc->lock != seq);
508 * NOTE: for obvious reason this only works on self-monitoring
511 __u32 lock
; /* seqlock for synchronization */
512 __u32 index
; /* hardware event identifier */
513 __s64 offset
; /* add to hardware event value */
514 __u64 time_enabled
; /* time event active */
515 __u64 time_running
; /* time event on cpu */
519 __u64 cap_bit0
: 1, /* Always 0, deprecated, see commit 860f085b74e9 */
520 cap_bit0_is_deprecated
: 1, /* Always 1, signals that bit 0 is zero */
522 cap_user_rdpmc
: 1, /* The RDPMC instruction can be used to read counts */
523 cap_user_time
: 1, /* The time_* fields are used */
524 cap_user_time_zero
: 1, /* The time_zero field is used */
530 * If cap_user_rdpmc this field provides the bit-width of the value
531 * read using the rdpmc() or equivalent instruction. This can be used
532 * to sign extend the result like:
534 * pmc <<= 64 - width;
535 * pmc >>= 64 - width; // signed shift right
541 * If cap_usr_time the below fields can be used to compute the time
542 * delta since time_enabled (in ns) using rdtsc or similar.
547 * quot = (cyc >> time_shift);
548 * rem = cyc & (((u64)1 << time_shift) - 1);
549 * delta = time_offset + quot * time_mult +
550 * ((rem * time_mult) >> time_shift);
552 * Where time_offset,time_mult,time_shift and cyc are read in the
553 * seqcount loop described above. This delta can then be added to
554 * enabled and possible running (if index), improving the scaling:
560 * quot = count / running;
561 * rem = count % running;
562 * count = quot * enabled + (rem * enabled) / running;
568 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
569 * from sample timestamps.
571 * time = timestamp - time_zero;
572 * quot = time / time_mult;
573 * rem = time % time_mult;
574 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
578 * quot = cyc >> time_shift;
579 * rem = cyc & (((u64)1 << time_shift) - 1);
580 * timestamp = time_zero + quot * time_mult +
581 * ((rem * time_mult) >> time_shift);
584 __u32 size
; /* Header size up to __reserved[] fields. */
587 * Hole for extension of the self monitor capabilities
590 __u8 __reserved
[118*8+4]; /* align to 1k. */
593 * Control data for the mmap() data buffer.
595 * User-space reading the @data_head value should issue an smp_rmb(),
596 * after reading this value.
598 * When the mapping is PROT_WRITE the @data_tail value should be
599 * written by userspace to reflect the last read data, after issueing
600 * an smp_mb() to separate the data read from the ->data_tail store.
601 * In this case the kernel will not over-write unread data.
603 * See perf_output_put_handle() for the data ordering.
605 * data_{offset,size} indicate the location and size of the perf record
606 * buffer within the mmapped area.
608 __u64 data_head
; /* head in the data section */
609 __u64 data_tail
; /* user-space written tail */
610 __u64 data_offset
; /* where the buffer starts */
611 __u64 data_size
; /* data buffer size */
614 * AUX area is defined by aux_{offset,size} fields that should be set
615 * by the userspace, so that
617 * aux_offset >= data_offset + data_size
619 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
621 * Ring buffer pointers aux_{head,tail} have the same semantics as
622 * data_{head,tail} and same ordering rules apply.
630 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
631 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
632 #define PERF_RECORD_MISC_KERNEL (1 << 0)
633 #define PERF_RECORD_MISC_USER (2 << 0)
634 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
635 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
636 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
639 * Indicates that /proc/PID/maps parsing are truncated by time out.
641 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
643 * Following PERF_RECORD_MISC_* are used on different
644 * events, so can reuse the same bit position:
646 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
647 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
648 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal)
649 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
651 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
652 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
653 #define PERF_RECORD_MISC_FORK_EXEC (1 << 13)
654 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
656 * These PERF_RECORD_MISC_* flags below are safely reused
657 * for the following events:
659 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
660 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
663 * PERF_RECORD_MISC_EXACT_IP:
664 * Indicates that the content of PERF_SAMPLE_IP points to
665 * the actual instruction that triggered the event. See also
666 * perf_event_attr::precise_ip.
668 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
669 * Indicates that thread was preempted in TASK_RUNNING state.
671 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
672 #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
674 * Reserve the last bit to indicate some extended misc field
676 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
678 struct perf_event_header
{
684 struct perf_ns_link_info
{
698 NR_NAMESPACES
, /* number of available namespaces */
701 enum perf_event_type
{
704 * If perf_event_attr.sample_id_all is set then all event types will
705 * have the sample_type selected fields related to where/when
706 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
707 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
708 * just after the perf_event_header and the fields already present for
709 * the existing fields, i.e. at the end of the payload. That way a newer
710 * perf.data file will be supported by older perf tools, with these new
711 * optional fields being ignored.
714 * { u32 pid, tid; } && PERF_SAMPLE_TID
715 * { u64 time; } && PERF_SAMPLE_TIME
716 * { u64 id; } && PERF_SAMPLE_ID
717 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
718 * { u32 cpu, res; } && PERF_SAMPLE_CPU
719 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
720 * } && perf_event_attr::sample_id_all
722 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
723 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
724 * relative to header.size.
728 * The MMAP events record the PROT_EXEC mappings so that we can
729 * correlate userspace IPs to code. They have the following structure:
732 * struct perf_event_header header;
739 * struct sample_id sample_id;
742 PERF_RECORD_MMAP
= 1,
746 * struct perf_event_header header;
749 * struct sample_id sample_id;
752 PERF_RECORD_LOST
= 2,
756 * struct perf_event_header header;
760 * struct sample_id sample_id;
763 PERF_RECORD_COMM
= 3,
767 * struct perf_event_header header;
771 * struct sample_id sample_id;
774 PERF_RECORD_EXIT
= 4,
778 * struct perf_event_header header;
782 * struct sample_id sample_id;
785 PERF_RECORD_THROTTLE
= 5,
786 PERF_RECORD_UNTHROTTLE
= 6,
790 * struct perf_event_header header;
794 * struct sample_id sample_id;
797 PERF_RECORD_FORK
= 7,
801 * struct perf_event_header header;
804 * struct read_format values;
805 * struct sample_id sample_id;
808 PERF_RECORD_READ
= 8,
812 * struct perf_event_header header;
815 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
816 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
817 * # is fixed relative to header.
820 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
821 * { u64 ip; } && PERF_SAMPLE_IP
822 * { u32 pid, tid; } && PERF_SAMPLE_TID
823 * { u64 time; } && PERF_SAMPLE_TIME
824 * { u64 addr; } && PERF_SAMPLE_ADDR
825 * { u64 id; } && PERF_SAMPLE_ID
826 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
827 * { u32 cpu, res; } && PERF_SAMPLE_CPU
828 * { u64 period; } && PERF_SAMPLE_PERIOD
830 * { struct read_format values; } && PERF_SAMPLE_READ
833 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
836 * # The RAW record below is opaque data wrt the ABI
838 * # That is, the ABI doesn't make any promises wrt to
839 * # the stability of its content, it may vary depending
840 * # on event, hardware, kernel version and phase of
843 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
847 * char data[size];}&& PERF_SAMPLE_RAW
850 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
852 * { u64 abi; # enum perf_sample_regs_abi
853 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
857 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
859 * { u64 weight; } && PERF_SAMPLE_WEIGHT
860 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
861 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
862 * { u64 abi; # enum perf_sample_regs_abi
863 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
864 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
867 PERF_RECORD_SAMPLE
= 9,
870 * The MMAP2 records are an augmented version of MMAP, they add
871 * maj, min, ino numbers to be used to uniquely identify each mapping
874 * struct perf_event_header header;
883 * u64 ino_generation;
886 * struct sample_id sample_id;
889 PERF_RECORD_MMAP2
= 10,
892 * Records that new data landed in the AUX buffer part.
895 * struct perf_event_header header;
900 * struct sample_id sample_id;
903 PERF_RECORD_AUX
= 11,
906 * Indicates that instruction trace has started
909 * struct perf_event_header header;
912 * struct sample_id sample_id;
915 PERF_RECORD_ITRACE_START
= 12,
918 * Records the dropped/lost sample number.
921 * struct perf_event_header header;
924 * struct sample_id sample_id;
927 PERF_RECORD_LOST_SAMPLES
= 13,
930 * Records a context switch in or out (flagged by
931 * PERF_RECORD_MISC_SWITCH_OUT). See also
932 * PERF_RECORD_SWITCH_CPU_WIDE.
935 * struct perf_event_header header;
936 * struct sample_id sample_id;
939 PERF_RECORD_SWITCH
= 14,
942 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
943 * next_prev_tid that are the next (switching out) or previous
944 * (switching in) pid/tid.
947 * struct perf_event_header header;
950 * struct sample_id sample_id;
953 PERF_RECORD_SWITCH_CPU_WIDE
= 15,
957 * struct perf_event_header header;
961 * { u64 dev, inode; } [nr_namespaces];
962 * struct sample_id sample_id;
965 PERF_RECORD_NAMESPACES
= 16,
968 * Record ksymbol register/unregister events:
971 * struct perf_event_header header;
977 * struct sample_id sample_id;
980 PERF_RECORD_KSYMBOL
= 17,
982 PERF_RECORD_MAX
, /* non-ABI */
985 enum perf_record_ksymbol_type
{
986 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN
= 0,
987 PERF_RECORD_KSYMBOL_TYPE_BPF
= 1,
988 PERF_RECORD_KSYMBOL_TYPE_MAX
/* non-ABI */
991 #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
993 #define PERF_MAX_STACK_DEPTH 127
994 #define PERF_MAX_CONTEXTS_PER_STACK 8
996 enum perf_callchain_context
{
997 PERF_CONTEXT_HV
= (__u64
)-32,
998 PERF_CONTEXT_KERNEL
= (__u64
)-128,
999 PERF_CONTEXT_USER
= (__u64
)-512,
1001 PERF_CONTEXT_GUEST
= (__u64
)-2048,
1002 PERF_CONTEXT_GUEST_KERNEL
= (__u64
)-2176,
1003 PERF_CONTEXT_GUEST_USER
= (__u64
)-2560,
1005 PERF_CONTEXT_MAX
= (__u64
)-4095,
1009 * PERF_RECORD_AUX::flags bits
1011 #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
1012 #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
1013 #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
1014 #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
1016 #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
1017 #define PERF_FLAG_FD_OUTPUT (1UL << 1)
1018 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
1019 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
1021 #if defined(__LITTLE_ENDIAN_BITFIELD)
1022 union perf_mem_data_src
{
1025 __u64 mem_op
:5, /* type of opcode */
1026 mem_lvl
:14, /* memory hierarchy level */
1027 mem_snoop
:5, /* snoop mode */
1028 mem_lock
:2, /* lock instr */
1029 mem_dtlb
:7, /* tlb access */
1030 mem_lvl_num
:4, /* memory hierarchy level number */
1031 mem_remote
:1, /* remote */
1032 mem_snoopx
:2, /* snoop mode, ext */
1036 #elif defined(__BIG_ENDIAN_BITFIELD)
1037 union perf_mem_data_src
{
1041 mem_snoopx
:2, /* snoop mode, ext */
1042 mem_remote
:1, /* remote */
1043 mem_lvl_num
:4, /* memory hierarchy level number */
1044 mem_dtlb
:7, /* tlb access */
1045 mem_lock
:2, /* lock instr */
1046 mem_snoop
:5, /* snoop mode */
1047 mem_lvl
:14, /* memory hierarchy level */
1048 mem_op
:5; /* type of opcode */
1052 #error "Unknown endianness"
1055 /* type of opcode (load/store/prefetch,code) */
1056 #define PERF_MEM_OP_NA 0x01 /* not available */
1057 #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
1058 #define PERF_MEM_OP_STORE 0x04 /* store instruction */
1059 #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
1060 #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
1061 #define PERF_MEM_OP_SHIFT 0
1063 /* memory hierarchy (memory level, hit or miss) */
1064 #define PERF_MEM_LVL_NA 0x01 /* not available */
1065 #define PERF_MEM_LVL_HIT 0x02 /* hit level */
1066 #define PERF_MEM_LVL_MISS 0x04 /* miss level */
1067 #define PERF_MEM_LVL_L1 0x08 /* L1 */
1068 #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
1069 #define PERF_MEM_LVL_L2 0x20 /* L2 */
1070 #define PERF_MEM_LVL_L3 0x40 /* L3 */
1071 #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
1072 #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
1073 #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
1074 #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
1075 #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
1076 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
1077 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
1078 #define PERF_MEM_LVL_SHIFT 5
1080 #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
1081 #define PERF_MEM_REMOTE_SHIFT 37
1083 #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
1084 #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
1085 #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
1086 #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
1087 /* 5-0xa available */
1088 #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1089 #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
1090 #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
1091 #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
1092 #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
1094 #define PERF_MEM_LVLNUM_SHIFT 33
1097 #define PERF_MEM_SNOOP_NA 0x01 /* not available */
1098 #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
1099 #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
1100 #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
1101 #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
1102 #define PERF_MEM_SNOOP_SHIFT 19
1104 #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
1106 #define PERF_MEM_SNOOPX_SHIFT 37
1108 /* locked instruction */
1109 #define PERF_MEM_LOCK_NA 0x01 /* not available */
1110 #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
1111 #define PERF_MEM_LOCK_SHIFT 24
1114 #define PERF_MEM_TLB_NA 0x01 /* not available */
1115 #define PERF_MEM_TLB_HIT 0x02 /* hit level */
1116 #define PERF_MEM_TLB_MISS 0x04 /* miss level */
1117 #define PERF_MEM_TLB_L1 0x08 /* L1 */
1118 #define PERF_MEM_TLB_L2 0x10 /* L2 */
1119 #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
1120 #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
1121 #define PERF_MEM_TLB_SHIFT 26
1123 #define PERF_MEM_S(a, s) \
1124 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1127 * single taken branch record layout:
1129 * from: source instruction (may not always be a branch insn)
1131 * mispred: branch target was mispredicted
1132 * predicted: branch target was predicted
1134 * support for mispred, predicted is optional. In case it
1135 * is not supported mispred = predicted = 0.
1137 * in_tx: running in a hardware transaction
1138 * abort: aborting a hardware transaction
1139 * cycles: cycles from last branch (or 0 if not supported)
1142 struct perf_branch_entry
{
1145 __u64 mispred
:1, /* target mispredicted */
1146 predicted
:1,/* target predicted */
1147 in_tx
:1, /* in transaction */
1148 abort
:1, /* transaction abort */
1149 cycles
:16, /* cycle count to last branch */
1150 type
:4, /* branch type */
1154 #endif /* _UAPI_LINUX_PERF_EVENT_H */