]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RISC-V: KVM: Introduce struct kvm_gstage_mapping
authorAnup Patel <apatel@ventanamicro.com>
Wed, 18 Jun 2025 11:35:29 +0000 (17:05 +0530)
committerAnup Patel <anup@brainfault.org>
Mon, 28 Jul 2025 16:57:25 +0000 (22:27 +0530)
Introduce struct kvm_gstage_mapping which represents a g-stage
mapping at a particular g-stage page table level. Also, update
the kvm_riscv_gstage_map() to return the g-stage mapping upon
success.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Tested-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com>
Link: https://lore.kernel.org/r/20250618113532.471448-10-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_mmu.h
arch/riscv/kvm/mmu.c
arch/riscv/kvm/vcpu_exit.c

index 4e1654282ee4bd3a4433e15f38d3218e7cf19ca4..91c11e692dc75bcb08a4cfe670fac740728be865 100644 (file)
@@ -8,6 +8,12 @@
 
 #include <linux/kvm_types.h>
 
+struct kvm_gstage_mapping {
+       gpa_t addr;
+       pte_t pte;
+       u32 level;
+};
+
 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
                             phys_addr_t hpa, unsigned long size,
                             bool writable, bool in_atomic);
@@ -15,7 +21,8 @@ void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
                              unsigned long size);
 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                         struct kvm_memory_slot *memslot,
-                        gpa_t gpa, unsigned long hva, bool is_write);
+                        gpa_t gpa, unsigned long hva, bool is_write,
+                        struct kvm_gstage_mapping *out_map);
 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
index c1a3eb076df31dbf527919f96b4bb25c54445430..806614b3e46d47ca60be84d3c59429e5b6945d43 100644 (file)
@@ -135,18 +135,18 @@ static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
        kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
 }
 
-static int gstage_set_pte(struct kvm *kvm, u32 level,
-                          struct kvm_mmu_memory_cache *pcache,
-                          gpa_t addr, const pte_t *new_pte)
+static int gstage_set_pte(struct kvm *kvm,
+                         struct kvm_mmu_memory_cache *pcache,
+                         const struct kvm_gstage_mapping *map)
 {
        u32 current_level = gstage_pgd_levels - 1;
        pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
-       pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];
+       pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
 
-       if (current_level < level)
+       if (current_level < map->level)
                return -EINVAL;
 
-       while (current_level != level) {
+       while (current_level != map->level) {
                if (gstage_pte_leaf(ptep))
                        return -EEXIST;
 
@@ -165,13 +165,13 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
                }
 
                current_level--;
-               ptep = &next_ptep[gstage_pte_index(addr, current_level)];
+               ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
        }
 
-       if (pte_val(*ptep) != pte_val(*new_pte)) {
-               set_pte(ptep, *new_pte);
+       if (pte_val(*ptep) != pte_val(map->pte)) {
+               set_pte(ptep, map->pte);
                if (gstage_pte_leaf(ptep))
-                       gstage_remote_tlb_flush(kvm, current_level, addr);
+                       gstage_remote_tlb_flush(kvm, current_level, map->addr);
        }
 
        return 0;
@@ -181,14 +181,16 @@ static int gstage_map_page(struct kvm *kvm,
                           struct kvm_mmu_memory_cache *pcache,
                           gpa_t gpa, phys_addr_t hpa,
                           unsigned long page_size,
-                          bool page_rdonly, bool page_exec)
+                          bool page_rdonly, bool page_exec,
+                          struct kvm_gstage_mapping *out_map)
 {
-       int ret;
-       u32 level = 0;
-       pte_t new_pte;
        pgprot_t prot;
+       int ret;
 
-       ret = gstage_page_size_to_level(page_size, &level);
+       out_map->addr = gpa;
+       out_map->level = 0;
+
+       ret = gstage_page_size_to_level(page_size, &out_map->level);
        if (ret)
                return ret;
 
@@ -216,10 +218,10 @@ static int gstage_map_page(struct kvm *kvm,
                else
                        prot = PAGE_WRITE;
        }
-       new_pte = pfn_pte(PFN_DOWN(hpa), prot);
-       new_pte = pte_mkdirty(new_pte);
+       out_map->pte = pfn_pte(PFN_DOWN(hpa), prot);
+       out_map->pte = pte_mkdirty(out_map->pte);
 
-       return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
+       return gstage_set_pte(kvm, pcache, out_map);
 }
 
 enum gstage_op {
@@ -352,7 +354,6 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
                             phys_addr_t hpa, unsigned long size,
                             bool writable, bool in_atomic)
 {
-       pte_t pte;
        int ret = 0;
        unsigned long pfn;
        phys_addr_t addr, end;
@@ -360,22 +361,25 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
                .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
                .gfp_zero = __GFP_ZERO,
        };
+       struct kvm_gstage_mapping map;
 
        end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
        pfn = __phys_to_pfn(hpa);
 
        for (addr = gpa; addr < end; addr += PAGE_SIZE) {
-               pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+               map.addr = addr;
+               map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+               map.level = 0;
 
                if (!writable)
-                       pte = pte_wrprotect(pte);
+                       map.pte = pte_wrprotect(map.pte);
 
                ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
                if (ret)
                        goto out;
 
                spin_lock(&kvm->mmu_lock);
-               ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
+               ret = gstage_set_pte(kvm, &pcache, &map);
                spin_unlock(&kvm->mmu_lock);
                if (ret)
                        goto out;
@@ -593,7 +597,8 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 
 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                         struct kvm_memory_slot *memslot,
-                        gpa_t gpa, unsigned long hva, bool is_write)
+                        gpa_t gpa, unsigned long hva, bool is_write,
+                        struct kvm_gstage_mapping *out_map)
 {
        int ret;
        kvm_pfn_t hfn;
@@ -608,6 +613,9 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
        unsigned long vma_pagesize, mmu_seq;
        struct page *page;
 
+       /* Setup initial state of output mapping */
+       memset(out_map, 0, sizeof(*out_map));
+
        /* We need minimum second+third level pages */
        ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
        if (ret) {
@@ -677,10 +685,10 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
        if (writable) {
                mark_page_dirty(kvm, gfn);
                ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
-                                     vma_pagesize, false, true);
+                                     vma_pagesize, false, true, out_map);
        } else {
                ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
-                                     vma_pagesize, true, true);
+                                     vma_pagesize, true, true, out_map);
        }
 
        if (ret)
index 965df528de908ab5da41fb6518f55c296d9bfe6f..6b4694bc07ea73ff9bf8b016c9e6c06e4c5c025e 100644 (file)
@@ -15,6 +15,7 @@
 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
                             struct kvm_cpu_trap *trap)
 {
+       struct kvm_gstage_mapping host_map;
        struct kvm_memory_slot *memslot;
        unsigned long hva, fault_addr;
        bool writable;
@@ -43,7 +44,7 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
        }
 
        ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
-               (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
+               (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false, &host_map);
        if (ret < 0)
                return ret;