]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RISC-V: KVM: Fix guest page fault within HLV* instructions
authorFangyu Yu <fangyu.yu@linux.alibaba.com>
Fri, 21 Nov 2025 13:35:43 +0000 (21:35 +0800)
committerAnup Patel <anup@brainfault.org>
Mon, 24 Nov 2025 04:25:36 +0000 (09:55 +0530)
When executing HLV* instructions at the HS mode, a guest page fault
may occur when a g-stage page table migration between triggering the
virtual instruction exception and executing the HLV* instruction.

This may be a corner case, and one simpler way to handle this is to
re-execute the instruction where the virtual  instruction exception
occurred, and the guest page fault will be automatically handled.

Fixes: b91f0e4cb8a3 ("RISC-V: KVM: Factor-out instruction emulation into separate sources")
Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251121133543.46822-1-fangyu.yu@linux.alibaba.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/kvm/vcpu_insn.c

index de1f96ea62251ff8ab45eddcf2e976f0e0d1da29..4d89b94128aea87b8bb623b450969b9d9ffd1b5b 100644 (file)
@@ -298,6 +298,22 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
        return (rc <= 0) ? rc : 1;
 }
 
+static bool is_load_guest_page_fault(unsigned long scause)
+{
+       /**
+        * If a g-stage page fault occurs, the direct approach
+        * is to let the g-stage page fault handler handle it
+        * naturally, however, calling the g-stage page fault
+        * handler here seems rather strange.
+        * Considering this is a corner case, we can directly
+        * return to the guest and re-execute the same PC, this
+        * will trigger a g-stage page fault again and then the
+        * regular g-stage page fault handler will populate
+        * g-stage page table.
+        */
+       return (scause == EXC_LOAD_GUEST_PAGE_FAULT);
+}
+
 /**
  * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
  *
@@ -323,6 +339,8 @@ int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
                                                          ct->sepc,
                                                          &utrap);
                        if (utrap.scause) {
+                               if (is_load_guest_page_fault(utrap.scause))
+                                       return 1;
                                utrap.sepc = ct->sepc;
                                kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
                                return 1;
@@ -378,6 +396,8 @@ int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
                insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
                                                  &utrap);
                if (utrap.scause) {
+                       if (is_load_guest_page_fault(utrap.scause))
+                               return 1;
                        /* Redirect trap if we failed to read instruction */
                        utrap.sepc = ct->sepc;
                        kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
@@ -504,6 +524,8 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
                insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
                                                  &utrap);
                if (utrap.scause) {
+                       if (is_load_guest_page_fault(utrap.scause))
+                               return 1;
                        /* Redirect trap if we failed to read instruction */
                        utrap.sepc = ct->sepc;
                        kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);