]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/riscv/include/asm/perf_event.h
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / riscv / include / asm / perf_event.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2018 SiFive
4 * Copyright (C) 2018 Andes Technology Corporation
5 *
6 */
7
8 #ifndef _ASM_RISCV_PERF_EVENT_H
9 #define _ASM_RISCV_PERF_EVENT_H
10
11 #include <linux/perf_event.h>
12 #include <linux/ptrace.h>
13 #include <linux/interrupt.h>
14
15 #define RISCV_BASE_COUNTERS 2
16
17 /*
18 * The RISCV_MAX_COUNTERS parameter should be specified.
19 */
20
21 #ifdef CONFIG_RISCV_BASE_PMU
22 #define RISCV_MAX_COUNTERS 2
23 #endif
24
25 #ifndef RISCV_MAX_COUNTERS
26 #error "Please provide a valid RISCV_MAX_COUNTERS for the PMU."
27 #endif
28
29 /*
30 * These are the indexes of bits in counteren register *minus* 1,
31 * except for cycle. It would be coherent if it can directly mapped
32 * to counteren bit definition, but there is a *time* register at
33 * counteren[1]. Per-cpu structure is scarce resource here.
34 *
35 * According to the spec, an implementation can support counter up to
36 * mhpmcounter31, but many high-end processors has at most 6 general
37 * PMCs, we give the definition to MHPMCOUNTER8 here.
38 */
39 #define RISCV_PMU_CYCLE 0
40 #define RISCV_PMU_INSTRET 1
41 #define RISCV_PMU_MHPMCOUNTER3 2
42 #define RISCV_PMU_MHPMCOUNTER4 3
43 #define RISCV_PMU_MHPMCOUNTER5 4
44 #define RISCV_PMU_MHPMCOUNTER6 5
45 #define RISCV_PMU_MHPMCOUNTER7 6
46 #define RISCV_PMU_MHPMCOUNTER8 7
47
48 #define RISCV_OP_UNSUPP (-EOPNOTSUPP)
49
50 struct cpu_hw_events {
51 /* # currently enabled events*/
52 int n_events;
53 /* currently enabled events */
54 struct perf_event *events[RISCV_MAX_COUNTERS];
55 /* vendor-defined PMU data */
56 void *platform;
57 };
58
59 struct riscv_pmu {
60 struct pmu *pmu;
61
62 /* generic hw/cache events table */
63 const int *hw_events;
64 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
65 [PERF_COUNT_HW_CACHE_OP_MAX]
66 [PERF_COUNT_HW_CACHE_RESULT_MAX];
67 /* method used to map hw/cache events */
68 int (*map_hw_event)(u64 config);
69 int (*map_cache_event)(u64 config);
70
71 /* max generic hw events in map */
72 int max_events;
73 /* number total counters, 2(base) + x(general) */
74 int num_counters;
75 /* the width of the counter */
76 int counter_width;
77
78 /* vendor-defined PMU features */
79 void *platform;
80
81 irqreturn_t (*handle_irq)(int irq_num, void *dev);
82 int irq;
83 };
84
85 #ifdef CONFIG_PERF_EVENTS
86 #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
87 #endif
88
89 #endif /* _ASM_RISCV_PERF_EVENT_H */