#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
+#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
unsigned long flags;
};
+#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
+ /* Host PMU CSR */
+ u64 perf_ctrl[HOST_MAX_PMNUM];
+ u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
#define KVM_LARCH_LBT (0x1 << 3)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 4)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 5)
+#define KVM_LARCH_PMU (0x1 << 4)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
struct kvm_vcpu_arch {
/*
/* CSR state */
struct loongarch_csrs *csr;
+ /* Guest max PMU CSR id */
+ int max_pmu_csrid;
+
/* GPR used as IO source/target */
u32 io_gpr;
return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
}
+static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[6] & CPUCFG6_PMP;
+}
+
+static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
+{
+ return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
sizeof(kvm_vcpu_stats_desc),
};
+static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ context->perf_cntr[0] = read_csr_perfcntr0();
+ context->perf_cntr[1] = read_csr_perfcntr1();
+ context->perf_cntr[2] = read_csr_perfcntr2();
+ context->perf_cntr[3] = read_csr_perfcntr3();
+ context->perf_ctrl[0] = write_csr_perfctrl0(0);
+ context->perf_ctrl[1] = write_csr_perfctrl1(0);
+ context->perf_ctrl[2] = write_csr_perfctrl2(0);
+ context->perf_ctrl[3] = write_csr_perfctrl3(0);
+}
+
+static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ write_csr_perfcntr0(context->perf_cntr[0]);
+ write_csr_perfcntr1(context->perf_cntr[1]);
+ write_csr_perfcntr2(context->perf_cntr[2]);
+ write_csr_perfcntr3(context->perf_cntr[3]);
+ write_csr_perfctrl0(context->perf_ctrl[0]);
+ write_csr_perfctrl1(context->perf_ctrl[1]);
+ write_csr_perfctrl2(context->perf_ctrl[2]);
+ write_csr_perfctrl3(context->perf_ctrl[3]);
+}
+
+
+static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static int kvm_own_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ if (!kvm_guest_has_pmu(&vcpu->arch))
+ return -EINVAL;
+
+ kvm_save_host_pmu(vcpu);
+
+ /* Set PM0-PM(num) to guest */
+ val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
+ val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
+ write_csr_gcfg(val);
+
+ kvm_restore_guest_pmu(vcpu);
+
+ return 0;
+}
+
+static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ return;
+
+ kvm_save_guest_pmu(vcpu);
+
+ /* Disable pmu access from guest */
+ write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
+
+ /*
+ * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
+ * exiting the guest, so that the next time trap into the guest.
+ * We don't need to deal with PMU CSRs contexts.
+ */
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+ if (!(val & KVM_PMU_EVENT_ENABLED))
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
+
+ kvm_restore_host_pmu(vcpu);
+}
+
+static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+}
+
+static void kvm_check_pmu(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
+ kvm_own_pmu(vcpu);
+ vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
+ }
+}
+
static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
u32 version;
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
+ kvm_check_pmu(vcpu);
/*
* Called after function kvm_check_vpid()
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_lose_pmu(vcpu);
+
guest_timing_exit_irqoff();
guest_state_exit_irqoff();
local_irq_enable();
kvm_write_sw_gcsr(csr, id, val);
+ /*
+ * After modifying the PMU CSR register value of the vcpu.
+ * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
+ */
+ if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
+ unsigned long val;
+
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+
+ if (val & KVM_PMU_EVENT_ENABLED)
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
+
return ret;
}
case LOONGARCH_CPUCFG5:
*v = GENMASK(31, 0);
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (cpu_has_pmp)
+ *v = GENMASK(14, 0);
+ else
+ *v = 0;
+ return 0;
case LOONGARCH_CPUCFG16:
*v = GENMASK(16, 0);
return 0;
/* LASX architecturally implies LSX and FP but val does not satisfy that */
return -EINVAL;
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (val & CPUCFG6_PMP) {
+ u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
+ if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
+ return -EINVAL;
+ if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
+ return -EINVAL;
+ if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
+ return -EINVAL;
+ }
+ return 0;
default:
/*
* Values for the other CPUCFG IDs are not being further validated
if (ret)
break;
vcpu->arch.cpucfg[id] = (u32)v;
+ if (id == LOONGARCH_CPUCFG6)
+ vcpu->arch.max_pmu_csrid =
+ LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
break;
case KVM_REG_LOONGARCH_LBT:
if (!kvm_guest_has_lbt(&vcpu->arch))
struct kvm_device_attr *attr)
{
switch (attr->attr) {
- case 2:
+ case LOONGARCH_CPUCFG2:
+ case LOONGARCH_CPUCFG6:
return 0;
default:
return -ENXIO;
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ /* Restore hardware PMU CSRs */
+ kvm_restore_pmu(vcpu);
+
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
return 0;