]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RISC-V: KVM: Use NACL HFENCEs for KVM request based HFENCEs
authorAnup Patel <apatel@ventanamicro.com>
Sun, 20 Oct 2024 19:47:34 +0000 (01:17 +0530)
committerAnup Patel <anup@brainfault.org>
Mon, 28 Oct 2024 11:14:08 +0000 (16:44 +0530)
When running under some other hypervisor, use SBI NACL based HFENCEs
for TLB shoot-down via KVM requests. This makes HFENCEs faster whenever
SBI nested acceleration is available.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20241020194734.58686-14-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/kvm/tlb.c

index 23c0e82b5103cdd950b2da266258260292c0cea5..2f91ea5f8493253ea6eb68a3e75047c20830e57c 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/csr.h>
 #include <asm/cpufeature.h>
 #include <asm/insn-def.h>
+#include <asm/kvm_nacl.h>
 
 #define has_svinval()  riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
 
@@ -186,18 +187,24 @@ void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
 
 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vmid *vmid;
+       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+       unsigned long vmid = READ_ONCE(v->vmid);
 
-       vmid = &vcpu->kvm->arch.vmid;
-       kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
+       if (kvm_riscv_nacl_available())
+               nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
+       else
+               kvm_riscv_local_hfence_gvma_vmid_all(vmid);
 }
 
 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vmid *vmid;
+       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+       unsigned long vmid = READ_ONCE(v->vmid);
 
-       vmid = &vcpu->kvm->arch.vmid;
-       kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
+       if (kvm_riscv_nacl_available())
+               nacl_hfence_vvma_all(nacl_shmem(), vmid);
+       else
+               kvm_riscv_local_hfence_vvma_all(vmid);
 }
 
 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
@@ -251,6 +258,7 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
 
 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
 {
+       unsigned long vmid;
        struct kvm_riscv_hfence d = { 0 };
        struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
 
@@ -259,26 +267,41 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
                case KVM_RISCV_HFENCE_UNKNOWN:
                        break;
                case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
-                       kvm_riscv_local_hfence_gvma_vmid_gpa(
-                                               READ_ONCE(v->vmid),
-                                               d.addr, d.size, d.order);
+                       vmid = READ_ONCE(v->vmid);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
+                                                     d.addr, d.size, d.order);
+                       else
+                               kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
+                                                                    d.size, d.order);
                        break;
                case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
-                       kvm_riscv_local_hfence_vvma_asid_gva(
-                                               READ_ONCE(v->vmid), d.asid,
-                                               d.addr, d.size, d.order);
+                       vmid = READ_ONCE(v->vmid);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
+                                                     d.addr, d.size, d.order);
+                       else
+                               kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
+                                                                    d.size, d.order);
                        break;
                case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
-                       kvm_riscv_local_hfence_vvma_asid_all(
-                                               READ_ONCE(v->vmid), d.asid);
+                       vmid = READ_ONCE(v->vmid);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
+                       else
+                               kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
                        break;
                case KVM_RISCV_HFENCE_VVMA_GVA:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
-                       kvm_riscv_local_hfence_vvma_gva(
-                                               READ_ONCE(v->vmid),
-                                               d.addr, d.size, d.order);
+                       vmid = READ_ONCE(v->vmid);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_vvma(nacl_shmem(), vmid,
+                                                d.addr, d.size, d.order);
+                       else
+                               kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
+                                                               d.size, d.order);
                        break;
                default:
                        break;