From 974555d6e417974e63444266e495a06d06c23af5 Mon Sep 17 00:00:00 2001 From: Fangyu Yu Date: Fri, 21 Nov 2025 21:35:43 +0800 Subject: [PATCH] RISC-V: KVM: Fix guest page fault within HLV* instructions When executing HLV* instructions at the HS mode, a guest page fault may occur when a g-stage page table migration between triggering the virtual instruction exception and executing the HLV* instruction. This may be a corner case, and one simpler way to handle this is to re-execute the instruction where the virtual instruction exception occurred, and the guest page fault will be automatically handled. Fixes: b91f0e4cb8a3 ("RISC-V: KVM: Factor-out instruction emulation into separate sources") Signed-off-by: Fangyu Yu Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20251121133543.46822-1-fangyu.yu@linux.alibaba.com Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_insn.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index de1f96ea62251..4d89b94128aea 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -298,6 +298,22 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, return (rc <= 0) ? rc : 1; } +static bool is_load_guest_page_fault(unsigned long scause) +{ + /** + * If a g-stage page fault occurs, the direct approach + * is to let the g-stage page fault handler handle it + * naturally, however, calling the g-stage page fault + * handler here seems rather strange. + * Considering this is a corner case, we can directly + * return to the guest and re-execute the same PC, this + * will trigger a g-stage page fault again and then the + * regular g-stage page fault handler will populate + * g-stage page table. + */ + return (scause == EXC_LOAD_GUEST_PAGE_FAULT); +} + /** * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap * @@ -323,6 +339,8 @@ int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ct->sepc, &utrap); if (utrap.scause) { + if (is_load_guest_page_fault(utrap.scause)) + return 1; utrap.sepc = ct->sepc; kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); return 1; @@ -378,6 +396,8 @@ int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, &utrap); if (utrap.scause) { + if (is_load_guest_page_fault(utrap.scause)) + return 1; /* Redirect trap if we failed to read instruction */ utrap.sepc = ct->sepc; kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); @@ -504,6 +524,8 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, &utrap); if (utrap.scause) { + if (is_load_guest_page_fault(utrap.scause)) + return 1; /* Redirect trap if we failed to read instruction */ utrap.sepc = ct->sepc; kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); -- 2.47.3