]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drivers/perf: riscv: Implement PMU event info function
authorAtish Patra <atishp@rivosinc.com>
Tue, 9 Sep 2025 07:03:23 +0000 (00:03 -0700)
committerAnup Patel <anup@brainfault.org>
Tue, 16 Sep 2025 06:19:31 +0000 (11:49 +0530)
With the new SBI PMU event info function, we can query the availability
of the all standard SBI PMU events at boot time with a single ecall.
This improves the bootime by avoiding making an SBI call for each
standard PMU event. Since this function is defined only in SBI v3.0,
invoke this only if the underlying SBI implementation is v3.0 or higher.

Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Acked-by: Paul Walmsley <pjw@kernel.org>
Link: https://lore.kernel.org/r/20250909-pmu_event_info-v6-4-d8f80cacb884@rivosinc.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/sbi.h
drivers/perf/riscv_pmu_sbi.c

index b0c41ef56968fb84cf72035d94957a1a34bd4df3..5ca7cebc13cc831ed48d365eaac685ef9afa25c9 100644 (file)
@@ -136,6 +136,7 @@ enum sbi_ext_pmu_fid {
        SBI_EXT_PMU_COUNTER_FW_READ,
        SBI_EXT_PMU_COUNTER_FW_READ_HI,
        SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+       SBI_EXT_PMU_EVENT_GET_INFO,
 };
 
 union sbi_pmu_ctr_info {
@@ -159,6 +160,14 @@ struct riscv_pmu_snapshot_data {
        u64 reserved[447];
 };
 
+struct riscv_pmu_event_info {
+       u32 event_idx;
+       u32 output;
+       u64 event_data;
+};
+
+#define RISCV_PMU_EVENT_INFO_OUTPUT_MASK 0x01
+
 #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
 #define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
 /* SBI v3.0 allows extended hpmeventX width value */
index 3644bed4c8aba367bef6349e8ae70b8b9d06c7e2..a6c479f853e1ad8e992428e6b8967ff2dbd5f9c2 100644 (file)
@@ -299,6 +299,66 @@ static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
        },
 };
 
+static int pmu_sbi_check_event_info(void)
+{
+       int num_events = ARRAY_SIZE(pmu_hw_event_map) + PERF_COUNT_HW_CACHE_MAX *
+                        PERF_COUNT_HW_CACHE_OP_MAX * PERF_COUNT_HW_CACHE_RESULT_MAX;
+       struct riscv_pmu_event_info *event_info_shmem;
+       phys_addr_t base_addr;
+       int i, j, k, result = 0, count = 0;
+       struct sbiret ret;
+
+       event_info_shmem = kcalloc(num_events, sizeof(*event_info_shmem), GFP_KERNEL);
+       if (!event_info_shmem)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+               event_info_shmem[count++].event_idx = pmu_hw_event_map[i].event_idx;
+
+       for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+               for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+                       for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+                               event_info_shmem[count++].event_idx =
+                                                       pmu_cache_event_map[i][j][k].event_idx;
+               }
+       }
+
+       base_addr = __pa(event_info_shmem);
+       if (IS_ENABLED(CONFIG_32BIT))
+               ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, lower_32_bits(base_addr),
+                               upper_32_bits(base_addr), count, 0, 0, 0);
+       else
+               ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, base_addr, 0,
+                               count, 0, 0, 0);
+       if (ret.error) {
+               result = -EOPNOTSUPP;
+               goto free_mem;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) {
+               if (!(event_info_shmem[i].output & RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+                       pmu_hw_event_map[i].event_idx = -ENOENT;
+       }
+
+       count = ARRAY_SIZE(pmu_hw_event_map);
+
+       for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+               for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+                       for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) {
+                               if (!(event_info_shmem[count].output &
+                                     RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+                                       pmu_cache_event_map[i][j][k].event_idx = -ENOENT;
+                               count++;
+                       }
+               }
+       }
+
+free_mem:
+       kfree(event_info_shmem);
+
+       return result;
+}
+
 static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
 {
        struct sbiret ret;
@@ -316,6 +376,15 @@ static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
 
 static void pmu_sbi_check_std_events(struct work_struct *work)
 {
+       int ret;
+
+       if (sbi_v3_available) {
+               ret = pmu_sbi_check_event_info();
+               if (ret)
+                       pr_err("pmu_sbi_check_event_info failed with error %d\n", ret);
+               return;
+       }
+
        for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
                pmu_sbi_check_event(&pmu_hw_event_map[i]);