]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: riscv: Skip CSR restore if VCPU is reloaded on the same core
authorJinyu Tang <tjytimi@163.com>
Fri, 27 Feb 2026 12:10:08 +0000 (20:10 +0800)
committerAnup Patel <anup@brainfault.org>
Thu, 2 Apr 2026 13:01:51 +0000 (18:31 +0530)
Currently, kvm_arch_vcpu_load() unconditionally restores guest CSRs,
HGATP, and AIA state. However, when a VCPU is loaded back on the same
physical CPU, and no other KVM VCPU has run on this CPU since it was
last put, the hardware CSRs and AIA registers are still valid.

This patch optimizes the vcpu_load path by skipping the expensive CSR
and AIA writes if all the following conditions are met:
1. It is being reloaded on the same CPU (vcpu->arch.last_exit_cpu == cpu).
2. The CSRs are not dirty (!vcpu->arch.csr_dirty).
3. No other VCPU used this CPU (vcpu == __this_cpu_read(kvm_former_vcpu)).

To ensure this fast-path doesn't break corner cases:
- Live migration and VCPU reset are naturally safe. KVM initializes
  last_exit_cpu to -1, which guarantees the fast-path won't trigger.
- The 'csr_dirty' flag tracks runtime userspace interventions. If
  userspace modifies guest configurations (e.g., hedeleg via
  KVM_SET_GUEST_DEBUG, or CSRs including AIA via KVM_SET_ONE_REG),
  the flag is set to skip the fast path.

With the 'csr_dirty' safeguard proven effective, it is safe to
include kvm_riscv_vcpu_aia_load() inside the skip logic now.

Signed-off-by: Jinyu Tang <tjytimi@163.com>
Reviewed-by: Nutty Liu <nutty.liu@hotmail.com>
Reviewed-by: Andrew Jones <andrew.jones@oss.qualcomm.com>
Reviewed-by: Radim Krčmář <radim.krcmar@oss.qualcomm.com>
Link: https://lore.kernel.org/r/20260227121008.442241-1-tjytimi@163.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_onereg.c

index 24585304c02b14b410e3df5c77d41a6db6f6b273..7ee47b83c80d5cce402503fc96741af1fa0b1161 100644 (file)
@@ -273,6 +273,9 @@ struct kvm_vcpu_arch {
        /* 'static' configurations which are set only once */
        struct kvm_vcpu_config cfg;
 
+       /* Indicates modified guest CSRs */
+       bool csr_dirty;
+
        /* SBI steal-time accounting */
        struct {
                gpa_t shmem;
index fdd99ac1e714824f5bbd600e2fdd776d12512c52..1d5c777eba80b044cf6ff9dd01c7bcea9fd533e6 100644 (file)
@@ -24,6 +24,8 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
+static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_former_vcpu);
+
 const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
        KVM_GENERIC_VCPU_STATS(),
        STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
@@ -537,6 +539,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
                vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT);
        }
 
+       vcpu->arch.csr_dirty = true;
+
        return 0;
 }
 
@@ -581,6 +585,21 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
        struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
 
+       /*
+        * If VCPU is being reloaded on the same physical CPU and no
+        * other KVM VCPU has run on this CPU since it was last put,
+        * we can skip the expensive CSR and HGATP writes.
+        *
+        * Note: If a new CSR is added to this fast-path skip block,
+        * make sure that 'csr_dirty' is set to true in any
+        * ioctl (e.g., KVM_SET_ONE_REG) that modifies it.
+        */
+       if (vcpu != __this_cpu_read(kvm_former_vcpu))
+               __this_cpu_write(kvm_former_vcpu, vcpu);
+       else if (vcpu->arch.last_exit_cpu == cpu && !vcpu->arch.csr_dirty)
+               goto csr_restore_done;
+
+       vcpu->arch.csr_dirty = false;
        if (kvm_riscv_nacl_sync_csr_available()) {
                nsh = nacl_shmem();
                nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus);
@@ -624,6 +643,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_riscv_mmu_update_hgatp(vcpu);
 
+       kvm_riscv_vcpu_aia_load(vcpu, cpu);
+
+csr_restore_done:
        kvm_riscv_vcpu_timer_restore(vcpu);
 
        kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
@@ -633,8 +655,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
                                            vcpu->arch.isa);
 
-       kvm_riscv_vcpu_aia_load(vcpu, cpu);
-
        kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 
        vcpu->cpu = cpu;
index 45ecc0082e9020bb40b2f693a9b5b1beac9676e8..97fa72ba47c104e117b50ea0db5016d977ba4775 100644 (file)
@@ -670,6 +670,8 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
        if (rc)
                return rc;
 
+       vcpu->arch.csr_dirty = true;
+
        return 0;
 }