SBI_EXT_PMU_COUNTER_FW_READ,
SBI_EXT_PMU_COUNTER_FW_READ_HI,
SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ SBI_EXT_PMU_EVENT_GET_INFO,
};
union sbi_pmu_ctr_info {
u64 reserved[447];
};
+struct riscv_pmu_event_info {
+ u32 event_idx;
+ u32 output;
+ u64 event_data;
+};
+
+#define RISCV_PMU_EVENT_INFO_OUTPUT_MASK 0x01
+
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
/* SBI v3.0 allows extended hpmeventX width value */
},
};
+static int pmu_sbi_check_event_info(void)
+{
+ int num_events = ARRAY_SIZE(pmu_hw_event_map) + PERF_COUNT_HW_CACHE_MAX *
+ PERF_COUNT_HW_CACHE_OP_MAX * PERF_COUNT_HW_CACHE_RESULT_MAX;
+ struct riscv_pmu_event_info *event_info_shmem;
+ phys_addr_t base_addr;
+ int i, j, k, result = 0, count = 0;
+ struct sbiret ret;
+
+ event_info_shmem = kcalloc(num_events, sizeof(*event_info_shmem), GFP_KERNEL);
+ if (!event_info_shmem)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+ event_info_shmem[count++].event_idx = pmu_hw_event_map[i].event_idx;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+ event_info_shmem[count++].event_idx =
+ pmu_cache_event_map[i][j][k].event_idx;
+ }
+ }
+
+ base_addr = __pa(event_info_shmem);
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), count, 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, base_addr, 0,
+ count, 0, 0, 0);
+ if (ret.error) {
+ result = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) {
+ if (!(event_info_shmem[i].output & RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_hw_event_map[i].event_idx = -ENOENT;
+ }
+
+ count = ARRAY_SIZE(pmu_hw_event_map);
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) {
+ if (!(event_info_shmem[count].output &
+ RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_cache_event_map[i][j][k].event_idx = -ENOENT;
+ count++;
+ }
+ }
+ }
+
+free_mem:
+ kfree(event_info_shmem);
+
+ return result;
+}
+
static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
{
struct sbiret ret;
static void pmu_sbi_check_std_events(struct work_struct *work)
{
+ int ret;
+
+ if (sbi_v3_available) {
+ ret = pmu_sbi_check_event_info();
+ if (ret)
+ pr_err("pmu_sbi_check_event_info failed with error %d\n", ret);
+ return;
+ }
+
for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
pmu_sbi_check_event(&pmu_hw_event_map[i]);