]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RISC-V: KVM: Pass VMID as parameter to kvm_riscv_hfence_xyz() APIs
authorAnup Patel <apatel@ventanamicro.com>
Wed, 18 Jun 2025 11:35:32 +0000 (17:05 +0530)
committerAnup Patel <anup@brainfault.org>
Mon, 28 Jul 2025 16:57:32 +0000 (22:27 +0530)
Currently, all kvm_riscv_hfence_xyz() APIs assume VMID to be the
host VMID of the Guest/VM which resticts use of these APIs only
for host TLB maintenance. Let's allow passing VMID as a parameter
to all kvm_riscv_hfence_xyz() APIs so that they can be re-used
for nested virtualization related TLB maintenance.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Tested-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com>
Link: https://lore.kernel.org/r/20250618113532.471448-13-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_tlb.h
arch/riscv/kvm/gstage.c
arch/riscv/kvm/tlb.c
arch/riscv/kvm/vcpu_sbi_replace.c
arch/riscv/kvm/vcpu_sbi_v01.c

index f67e03edeaec549afb076ecba5699dbc7637a97c..38a2f933ad3abe67244f10885f82941ab9869c63 100644 (file)
 enum kvm_riscv_hfence_type {
        KVM_RISCV_HFENCE_UNKNOWN = 0,
        KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+       KVM_RISCV_HFENCE_GVMA_VMID_ALL,
        KVM_RISCV_HFENCE_VVMA_ASID_GVA,
        KVM_RISCV_HFENCE_VVMA_ASID_ALL,
        KVM_RISCV_HFENCE_VVMA_GVA,
+       KVM_RISCV_HFENCE_VVMA_ALL
 };
 
 struct kvm_riscv_hfence {
@@ -59,21 +61,24 @@ void kvm_riscv_fence_i(struct kvm *kvm,
 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    gpa_t gpa, gpa_t gpsz,
-                                   unsigned long order);
+                                   unsigned long order, unsigned long vmid);
 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
-                                   unsigned long hbase, unsigned long hmask);
+                                   unsigned long hbase, unsigned long hmask,
+                                   unsigned long vmid);
 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    unsigned long gva, unsigned long gvsz,
-                                   unsigned long order, unsigned long asid);
+                                   unsigned long order, unsigned long asid,
+                                   unsigned long vmid);
 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
-                                   unsigned long asid);
+                                   unsigned long asid, unsigned long vmid);
 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask,
                               unsigned long gva, unsigned long gvsz,
-                              unsigned long order);
+                              unsigned long order, unsigned long vmid);
 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
-                              unsigned long hbase, unsigned long hmask);
+                              unsigned long hbase, unsigned long hmask,
+                              unsigned long vmid);
 
 #endif
index 9c7c44f09b05bbfaca131d5bcd996036ff222c2c..24c270d6d0e27457a8b228593a120e506758b09a 100644 (file)
@@ -117,7 +117,8 @@ static void gstage_tlb_flush(struct kvm_gstage *gstage, u32 level, gpa_t addr)
        if (gstage->flags & KVM_GSTAGE_FLAGS_LOCAL)
                kvm_riscv_local_hfence_gvma_vmid_gpa(gstage->vmid, addr, BIT(order), order);
        else
-               kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order);
+               kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order,
+                                              gstage->vmid);
 }
 
 int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
index 349fcfc93f540a3b8206d2408acb598cc6586230..3c5a70a2b9271761d2a2d2bee6db3524cf8d4a3c 100644 (file)
@@ -251,6 +251,12 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
                                kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
                                                                     d.size, d.order);
                        break;
+               case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
+                       else
+                               kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
+                       break;
                case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
                        if (kvm_riscv_nacl_available())
@@ -276,6 +282,13 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
                                kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
                                                                d.size, d.order);
                        break;
+               case KVM_RISCV_HFENCE_VVMA_ALL:
+                       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
+                       else
+                               kvm_riscv_local_hfence_vvma_all(d.vmid);
+                       break;
                default:
                        break;
                }
@@ -328,14 +341,13 @@ void kvm_riscv_fence_i(struct kvm *kvm,
 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    gpa_t gpa, gpa_t gpsz,
-                                   unsigned long order)
+                                   unsigned long order, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
        data.asid = 0;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gpa;
        data.size = gpsz;
        data.order = order;
@@ -344,23 +356,28 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
 }
 
 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
-                                   unsigned long hbase, unsigned long hmask)
+                                   unsigned long hbase, unsigned long hmask,
+                                   unsigned long vmid)
 {
-       make_xfence_request(kvm, hbase, hmask, KVM_REQ_TLB_FLUSH,
-                           KVM_REQ_TLB_FLUSH, NULL);
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_TLB_FLUSH, &data);
 }
 
 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    unsigned long gva, unsigned long gvsz,
-                                   unsigned long order, unsigned long asid)
+                                   unsigned long order, unsigned long asid,
+                                   unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
        data.asid = asid;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gva;
        data.size = gvsz;
        data.order = order;
@@ -370,15 +387,13 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
 
 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
-                                   unsigned long asid)
+                                   unsigned long asid, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
-       struct kvm_riscv_hfence data;
+       struct kvm_riscv_hfence data = {0};
 
        data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
        data.asid = asid;
-       data.vmid = READ_ONCE(v->vmid);
-       data.addr = data.size = data.order = 0;
+       data.vmid = vmid;
        make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
                            KVM_REQ_HFENCE_VVMA_ALL, &data);
 }
@@ -386,14 +401,13 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask,
                               unsigned long gva, unsigned long gvsz,
-                              unsigned long order)
+                              unsigned long order, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_VVMA_GVA;
        data.asid = 0;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gva;
        data.size = gvsz;
        data.order = order;
@@ -402,16 +416,21 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
 }
 
 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
-                              unsigned long hbase, unsigned long hmask)
+                              unsigned long hbase, unsigned long hmask,
+                              unsigned long vmid)
 {
-       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
-                           KVM_REQ_HFENCE_VVMA_ALL, NULL);
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_HFENCE_VVMA_ALL;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_HFENCE_VVMA_ALL, &data);
 }
 
 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
 {
        kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
                                       gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
-                                      PAGE_SHIFT);
+                                      PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
        return 0;
 }
index b17fad091babdc3446521656db82e958329a1db3..b490ed1428a682db245929836516e60b3be62955 100644 (file)
@@ -96,6 +96,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
        unsigned long hmask = cp->a0;
        unsigned long hbase = cp->a1;
        unsigned long funcid = cp->a6;
+       unsigned long vmid;
 
        switch (funcid) {
        case SBI_EXT_RFENCE_REMOTE_FENCE_I:
@@ -103,22 +104,22 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+               vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
-                       kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
+                       kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
                else
                        kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
-                                                 cp->a2, cp->a3, PAGE_SHIFT);
+                                                 cp->a2, cp->a3, PAGE_SHIFT, vmid);
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+               vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
-                       kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
-                                                      hbase, hmask, cp->a4);
+                       kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
+                                                      cp->a4, vmid);
                else
-                       kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
-                                                      hbase, hmask,
-                                                      cp->a2, cp->a3,
-                                                      PAGE_SHIFT, cp->a4);
+                       kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
+                                                      cp->a3, PAGE_SHIFT, cp->a4, vmid);
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
index 8f4c4fa16227a125d9af80354d44145ee38884f0..368dfddd23d96cdff316416b3797bc75f24b413f 100644 (file)
@@ -23,6 +23,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
        struct kvm *kvm = vcpu->kvm;
        struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
        struct kvm_cpu_trap *utrap = retdata->utrap;
+       unsigned long vmid;
 
        switch (cp->a7) {
        case SBI_EXT_0_1_CONSOLE_GETCHAR:
@@ -78,25 +79,21 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
                if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
                        kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
                else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
+                       vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                        if (cp->a1 == 0 && cp->a2 == 0)
-                               kvm_riscv_hfence_vvma_all(vcpu->kvm,
-                                                         0, hmask);
+                               kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid);
                        else
-                               kvm_riscv_hfence_vvma_gva(vcpu->kvm,
-                                                         0, hmask,
-                                                         cp->a1, cp->a2,
-                                                         PAGE_SHIFT);
+                               kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1,
+                                                         cp->a2, PAGE_SHIFT, vmid);
                } else {
+                       vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                        if (cp->a1 == 0 && cp->a2 == 0)
-                               kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
-                                                              0, hmask,
-                                                              cp->a3);
+                               kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask,
+                                                              cp->a3, vmid);
                        else
-                               kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
-                                                              0, hmask,
-                                                              cp->a1, cp->a2,
-                                                              PAGE_SHIFT,
-                                                              cp->a3);
+                               kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask,
+                                                              cp->a1, cp->a2, PAGE_SHIFT,
+                                                              cp->a3, vmid);
                }
                break;
        default: