]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: LoongArch: selftests: Add basic PMU event counting test
authorSong Gao <gaosong@loongson.cn>
Thu, 9 Apr 2026 10:56:37 +0000 (18:56 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Thu, 9 Apr 2026 10:56:37 +0000 (18:56 +0800)
Introduce a basic PMU test that verifies hardware event counting for
four performance counters. The test enables the events for CPU cycles,
instructions retired, branch instructions, and branch misses, runs a
fixed number of loops, and checks that the counter values fall within
expected ranges. It also validates that the host supports PMU and that
the VM feature is enabled.

Signed-off-by: Song Gao <gaosong@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
tools/testing/selftests/kvm/Makefile.kvm
tools/testing/selftests/kvm/include/loongarch/pmu.h [new file with mode: 0644]
tools/testing/selftests/kvm/include/loongarch/processor.h
tools/testing/selftests/kvm/lib/loongarch/processor.c
tools/testing/selftests/kvm/loongarch/pmu_test.c [new file with mode: 0644]

index 6471fa214a9f957793e04e0ded8ea569ea4283cf..502c99258bd1f0713baad893c3de66431c08a5b5 100644 (file)
@@ -222,7 +222,8 @@ TEST_GEN_PROGS_riscv += mmu_stress_test
 TEST_GEN_PROGS_riscv += rseq_test
 TEST_GEN_PROGS_riscv += steal_time
 
-TEST_GEN_PROGS_loongarch = arch_timer
+TEST_GEN_PROGS_loongarch = loongarch/pmu_test
+TEST_GEN_PROGS_loongarch += arch_timer
 TEST_GEN_PROGS_loongarch += coalesced_io_test
 TEST_GEN_PROGS_loongarch += demand_paging_test
 TEST_GEN_PROGS_loongarch += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/include/loongarch/pmu.h b/tools/testing/selftests/kvm/include/loongarch/pmu.h
new file mode 100644 (file)
index 0000000..2f734a1
--- /dev/null
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * LoongArch PMU specific interface
+ */
+#ifndef SELFTEST_KVM_PMU_H
+#define SELFTEST_KVM_PMU_H
+
+#include "processor.h"
+
+#define LOONGARCH_CPUCFG6                      0x6
+#define  CPUCFG6_PMP                           BIT(0)
+#define  CPUCFG6_PAMVER                                GENMASK(3, 1)
+#define  CPUCFG6_PMNUM                         GENMASK(7, 4)
+#define  CPUCFG6_PMNUM_SHIFT                   4
+#define  CPUCFG6_PMBITS                                GENMASK(13, 8)
+#define  CPUCFG6_PMBITS_SHIFT                  8
+#define  CPUCFG6_UPM                           BIT(14)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0                        0x200   /* perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0                        0x201   /* perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1                        0x202   /* perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1                        0x203   /* perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2                        0x204   /* perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2                        0x205   /* perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3                        0x206   /* perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3                        0x207   /* perf event 3 count value */
+#define  CSR_PERFCTRL_PLV0                     BIT(16)
+#define  CSR_PERFCTRL_PLV1                     BIT(17)
+#define  CSR_PERFCTRL_PLV2                     BIT(18)
+#define  CSR_PERFCTRL_PLV3                     BIT(19)
+#define PMU_ENVENT_ENABLED     (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
+/* Hardware event codes (from LoongArch perf_event.c */
+#define LOONGARCH_PMU_EVENT_CYCLES             0x00  /* CPU cycles */
+#define LOONGARCH_PMU_EVENT_INSTR_RETIRED      0x01  /* Instructions retired */
+#define PERF_COUNT_HW_BRANCH_INSTRUCTIONS      0x02  /* Branch instructions */
+#define PERF_COUNT_HW_BRANCH_MISSES            0x03  /* Branch misses */
+
+#define NUM_LOOPS                               1000
+#define EXPECTED_INSTR_MIN                      (NUM_LOOPS + 10)  /* Loop + overhead */
+#define EXPECTED_CYCLES_MIN                     NUM_LOOPS       /* At least 1 cycle per iteration */
+#define UPPER_BOUND                            (10 * NUM_LOOPS)
+
+#endif
index 6c1e59484485f4ce4312c4b3a45449e83d9c2d26..916426707c8677a791ef1540ffd531163748f654 100644 (file)
@@ -189,6 +189,7 @@ struct handlers {
        handler_fn exception_handlers[VECTOR_NUM];
 };
 
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu);
 void vm_init_descriptor_tables(struct kvm_vm *vm);
 void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler);
 
index 0ad4544517e95873423da01c950f23f82eb6bb23..ee4ad3b1d2a4fb8c2404a97f44883a71b1cf9820 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <asm/kvm.h>
 #include "kvm_util.h"
+#include "pmu.h"
 #include "processor.h"
 #include "ucall_common.h"
 
@@ -275,9 +276,10 @@ static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
        __vcpu_set_reg(vcpu, csrid, val);
 }
 
-static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
        int width;
+       unsigned int cfg;
        unsigned long val;
        struct kvm_vm *vm = vcpu->vm;
 
@@ -290,6 +292,9 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
                TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
        }
 
+       cfg = read_cpucfg(LOONGARCH_CPUCFG6);
+       loongarch_set_cpucfg(vcpu, LOONGARCH_CPUCFG6, cfg);
+
        /* kernel mode and page enable mode */
        val = PLV_KERN | CSR_CRMD_PG;
        loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
diff --git a/tools/testing/selftests/kvm/loongarch/pmu_test.c b/tools/testing/selftests/kvm/loongarch/pmu_test.c
new file mode 100644 (file)
index 0000000..c0f2597
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch KVM PMU event counting test
+ *
+ * Test hardware event counting: CPU_CYCLES, INSTR_RETIRED,
+ * BRANCH_INSTRUCTIONS and BRANCH_MISSES.
+ */
+#include <linux/bitops.h>
+#include "kvm_util.h"
+#include "pmu.h"
+#include "loongarch/processor.h"
+
+/* Check PMU support */
+static bool has_pmu_support(void)
+{
+       uint32_t cfg6;
+
+       /* Read CPUCFG6 to check PMU */
+       cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+
+       /* Check PMU present bit */
+       if (!(cfg6 & CPUCFG6_PMP))
+               return false;
+
+       /* Check that at least one counter exists */
+       if (((cfg6 & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) == 0)
+               return false;
+
+       return true;
+}
+
+/* Dump PMU capabilities */
+static void dump_pmu_caps(void)
+{
+       uint32_t cfg6;
+       int nr_counters, counter_bits;
+
+       cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+       nr_counters = ((cfg6 & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
+       counter_bits = ((cfg6 & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
+
+       pr_info("PMU capabilities:\n");
+       pr_info("  Counters present: %s\n", cfg6 & CPUCFG6_PMP ? "yes" : "no");
+       pr_info("  Number of counters: %d\n", nr_counters);
+       pr_info("  Counter width: %d bits\n", counter_bits);
+}
+
+/* Guest test code - runs inside VM */
+static void guest_pmu_base_test(void)
+{
+       int i;
+       uint32_t cfg6, pmnum;
+       uint64_t cnt[4];
+
+       cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+       pmnum = (cfg6 >> 4) & 0xf;
+       GUEST_PRINTF("CPUCFG6 = 0x%x\n", cfg6);
+       GUEST_PRINTF("PMP enabled: %s\n", (cfg6 & 0x1) ? "YES" : "NO");
+       GUEST_PRINTF("Number of counters (PMNUM): %x\n", pmnum + 1);
+       GUEST_ASSERT(pmnum == 3);
+
+       GUEST_PRINTF("Clean csr_perfcntr0-3\n");
+       csr_write(0, LOONGARCH_CSR_PERFCNTR0);
+       csr_write(0, LOONGARCH_CSR_PERFCNTR1);
+       csr_write(0, LOONGARCH_CSR_PERFCNTR2);
+       csr_write(0, LOONGARCH_CSR_PERFCNTR3);
+       GUEST_PRINTF("Set csr_perfctrl0 for cycles event\n");
+       csr_write(PMU_ENVENT_ENABLED |
+               LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0);
+       GUEST_PRINTF("Set csr_perfctrl1 for instr_retired event\n");
+       csr_write(PMU_ENVENT_ENABLED |
+               LOONGARCH_PMU_EVENT_INSTR_RETIRED, LOONGARCH_CSR_PERFCTRL1);
+       GUEST_PRINTF("Set csr_perfctrl2 for branch_instructions event\n");
+       csr_write(PMU_ENVENT_ENABLED |
+               PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LOONGARCH_CSR_PERFCTRL2);
+       GUEST_PRINTF("Set csr_perfctrl3 for branch_misses event\n");
+       csr_write(PMU_ENVENT_ENABLED |
+               PERF_COUNT_HW_BRANCH_MISSES, LOONGARCH_CSR_PERFCTRL3);
+
+       for (i = 0; i < NUM_LOOPS; i++)
+               cpu_relax();
+
+       cnt[0] = csr_read(LOONGARCH_CSR_PERFCNTR0);
+       GUEST_PRINTF("csr_perfcntr0 is %lx\n", cnt[0]);
+       cnt[1] = csr_read(LOONGARCH_CSR_PERFCNTR1);
+       GUEST_PRINTF("csr_perfcntr1 is %lx\n", cnt[1]);
+       cnt[2] = csr_read(LOONGARCH_CSR_PERFCNTR2);
+       GUEST_PRINTF("csr_perfcntr2 is %lx\n", cnt[2]);
+       cnt[3] = csr_read(LOONGARCH_CSR_PERFCNTR3);
+       GUEST_PRINTF("csr_perfcntr3 is %lx\n", cnt[3]);
+
+       GUEST_PRINTF("assert csr_perfcntr0 >EXPECTED_CYCLES_MIN && csr_perfcntr0 < UPPER_BOUND\n");
+       GUEST_ASSERT(cnt[0] > EXPECTED_CYCLES_MIN && cnt[0] < UPPER_BOUND);
+       GUEST_PRINTF("assert csr_perfcntr1 > EXPECTED_INSTR_MIN && csr_perfcntr1 < UPPER_BOUND\n");
+       GUEST_ASSERT(cnt[1] > EXPECTED_INSTR_MIN && cnt[1] < UPPER_BOUND);
+       GUEST_PRINTF("assert csr_perfcntr2 > 0 && csr_perfcntr2 < UPPER_BOUND\n");
+       GUEST_ASSERT(cnt[2] > 0 && cnt[2] < UPPER_BOUND);
+       GUEST_PRINTF("assert csr_perfcntr3 > 0 && csr_perfcntr3 < UPPER_BOUND\n");
+       GUEST_ASSERT(cnt[3] > 0 && cnt[3] < UPPER_BOUND);
+}
+
+static void guest_code(void)
+{
+       guest_pmu_base_test();
+
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       int ret = 0;
+       struct kvm_device_attr attr;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       struct ucall uc;
+
+       /* Check host KVM PMU support */
+       if (!has_pmu_support()) {
+               print_skip("PMU not supported by host hardware\n");
+               dump_pmu_caps();
+               return KSFT_SKIP;
+       }
+       pr_info("Host support PMU\n");
+
+       /* Dump PMU capabilities */
+       dump_pmu_caps();
+
+       vm = vm_create(VM_MODE_P47V47_16K);
+       vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+       vm_init_descriptor_tables(vm);
+       loongarch_vcpu_setup(vcpu);
+
+       attr.group = KVM_LOONGARCH_VM_FEAT_CTRL,
+       attr.attr = KVM_LOONGARCH_VM_FEAT_PMU,
+
+       ret = ioctl(vm->fd, KVM_HAS_DEVICE_ATTR, &attr);
+
+       if (ret == 0) {
+               pr_info("PMU is enabled in VM\n");
+       } else {
+               print_skip("PMU not enabled by VM config\n");
+               return KSFT_SKIP;
+       }
+
+       while (1) {
+               vcpu_run(vcpu);
+               switch (get_ucall(vcpu, &uc)) {
+               case UCALL_PRINTF:
+                       printf("%s", (const char *)uc.buffer);
+                       break;
+               case UCALL_DONE:
+                       printf("PMU test PASSED\n");
+                       goto done;
+               case UCALL_ABORT:
+                       printf("PMU test FAILED\n");
+                       ret = -1;
+                       goto done;
+               default:
+                       printf("Unexpected exit\n");
+                       ret = -1;
+                       goto done;
+               }
+       }
+
+done:
+       kvm_vm_free(vm);
+       return ret;
+}