]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RISC-V: KVM: Use SBI sync SRET call when available
authorAnup Patel <apatel@ventanamicro.com>
Sun, 20 Oct 2024 19:47:32 +0000 (01:17 +0530)
committerAnup Patel <anup@brainfault.org>
Mon, 28 Oct 2024 11:14:03 +0000 (16:44 +0530)
Implement an optimized KVM world-switch using SBI sync SRET call
when SBI nested acceleration extension is available. This improves
KVM world-switch when KVM RISC-V is running as a Guest under some
other hypervisor.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20241020194734.58686-12-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_nacl.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_switch.S

index 8f3e3ebf50175dc6a192fd46c98fec7a7ef46a15..4124d5e06a0ff1f6528cc91c23383de544c0fa61 100644 (file)
@@ -12,6 +12,8 @@
 #include <asm/csr.h>
 #include <asm/sbi.h>
 
+struct kvm_vcpu_arch;
+
 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
 #define kvm_riscv_nacl_available() \
        static_branch_unlikely(&kvm_riscv_nacl_available)
@@ -43,6 +45,10 @@ void __kvm_riscv_nacl_hfence(void *shmem,
                             unsigned long page_num,
                             unsigned long page_count);
 
+void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
+                               unsigned long sbi_ext_id,
+                               unsigned long sbi_func_id);
+
 int kvm_riscv_nacl_enable(void);
 
 void kvm_riscv_nacl_disable(void);
index 1fb2ac9f34cc1b0ef209c0ba626ce8b6f771ade0..113eb8957472421cc3f585044392897402042a87 100644 (file)
@@ -766,19 +766,59 @@ static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *v
  */
 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
+       void *nsh;
        struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
        struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
 
        kvm_riscv_vcpu_swap_in_guest_state(vcpu);
        guest_state_enter_irqoff();
 
-       hcntx->hstatus = ncsr_swap(CSR_HSTATUS, gcntx->hstatus);
+       if (kvm_riscv_nacl_sync_sret_available()) {
+               nsh = nacl_shmem();
 
-       nsync_csr(-1UL);
+               if (kvm_riscv_nacl_autoswap_csr_available()) {
+                       hcntx->hstatus =
+                               nacl_csr_read(nsh, CSR_HSTATUS);
+                       nacl_scratch_write_long(nsh,
+                                               SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
+                                               SBI_NACL_SHMEM_AUTOSWAP_HSTATUS,
+                                               gcntx->hstatus);
+                       nacl_scratch_write_long(nsh,
+                                               SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
+                                               SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS);
+               } else if (kvm_riscv_nacl_sync_csr_available()) {
+                       hcntx->hstatus = nacl_csr_swap(nsh,
+                                                      CSR_HSTATUS, gcntx->hstatus);
+               } else {
+                       hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
+               }
 
-       __kvm_riscv_switch_to(&vcpu->arch);
+               nacl_scratch_write_longs(nsh,
+                                        SBI_NACL_SHMEM_SRET_OFFSET +
+                                        SBI_NACL_SHMEM_SRET_X(1),
+                                        &gcntx->ra,
+                                        SBI_NACL_SHMEM_SRET_X_LAST);
+
+               __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL,
+                                          SBI_EXT_NACL_SYNC_SRET);
+
+               if (kvm_riscv_nacl_autoswap_csr_available()) {
+                       nacl_scratch_write_long(nsh,
+                                               SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
+                                               0);
+                       gcntx->hstatus = nacl_scratch_read_long(nsh,
+                                                               SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
+                                                               SBI_NACL_SHMEM_AUTOSWAP_HSTATUS);
+               } else {
+                       gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
+               }
+       } else {
+               hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
 
-       gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
+               __kvm_riscv_switch_to(&vcpu->arch);
+
+               gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
+       }
 
        vcpu->arch.last_exit_cpu = vcpu->cpu;
        guest_state_exit_irqoff();
index 9f13e5ce6a18fce6bb5542b554efb7433f857ce3..47686bcb21e0a5db3e1e8e57a028650031483e43 100644 (file)
@@ -218,6 +218,35 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
        ret
 SYM_FUNC_END(__kvm_riscv_switch_to)
 
+       /*
+        * Parameters:
+        * A0 <= Pointer to struct kvm_vcpu_arch
+        * A1 <= SBI extension ID
+        * A2 <= SBI function ID
+        */
+SYM_FUNC_START(__kvm_riscv_nacl_switch_to)
+       SAVE_HOST_GPRS
+
+       SAVE_HOST_AND_RESTORE_GUEST_CSRS .Lkvm_nacl_switch_return
+
+       /* Resume Guest using SBI nested acceleration */
+       add     a6, a2, zero
+       add     a7, a1, zero
+       ecall
+
+       /* Back to Host */
+       .align 2
+.Lkvm_nacl_switch_return:
+       SAVE_GUEST_GPRS
+
+       SAVE_GUEST_AND_RESTORE_HOST_CSRS
+
+       RESTORE_HOST_GPRS
+
+       /* Return to C code */
+       ret
+SYM_FUNC_END(__kvm_riscv_nacl_switch_to)
+
 SYM_CODE_START(__kvm_riscv_unpriv_trap)
        /*
         * We assume that faulting unpriv load/store instruction is