]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
LoongArch: KVM: Compile switch.S directly into the kernel
authorXianglai Li <lixianglai@loongson.cn>
Mon, 4 May 2026 01:00:37 +0000 (09:00 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Mon, 4 May 2026 01:00:37 +0000 (09:00 +0800)
If we directly compile the switch.S file into the kernel, the address of
the kvm_exc_entry function will definitely be within the DMW memory area.
Therefore, we will no longer need to perform a copy relocation of the
kvm_exc_entry.

So this patch compiles switch.S directly into the kernel, and then remove
the copy relocation execution logic for the kvm_exc_entry function.

Cc: stable@vger.kernel.org
Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/Kbuild
arch/loongarch/include/asm/asm-prototypes.h
arch/loongarch/include/asm/kvm_host.h
arch/loongarch/kvm/Makefile
arch/loongarch/kvm/main.c
arch/loongarch/kvm/switch.S

index beb8499dd8ed84330beecbcd61977df0aa3474f8..1c7a0dbe5e72f211f40ec4622d40ac01749d5c53 100644 (file)
@@ -3,7 +3,7 @@ obj-y += mm/
 obj-y += net/
 obj-y += vdso/
 
-obj-$(CONFIG_KVM) += kvm/
+obj-$(subst m,y,$(CONFIG_KVM)) += kvm/
 
 # for cleaning
 subdir- += boot
index 704066b4f7368be15be960fadbcd6c2574bbf6c0..de0c17f3f49c2ce3067d7780b6f09be112a61e70 100644 (file)
@@ -20,3 +20,23 @@ asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_
                                                                    struct pt_regs *regs,
                                                                    int (*fn)(void *),
                                                                    void *fn_arg);
+
+struct kvm_run;
+struct kvm_vcpu;
+struct loongarch_fpu;
+
+void kvm_exc_entry(void);
+int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+void kvm_save_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fpu(struct loongarch_fpu *fpu);
+
+#ifdef CONFIG_CPU_HAS_LSX
+void kvm_save_lsx(struct loongarch_fpu *fpu);
+void kvm_restore_lsx(struct loongarch_fpu *fpu);
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+void kvm_save_lasx(struct loongarch_fpu *fpu);
+void kvm_restore_lasx(struct loongarch_fpu *fpu);
+#endif
index 130cedbb6b39b3ecffb0c2b107231c14d6f7fc66..776bc487a705277f69a144937a5e257f94afac3e 100644 (file)
@@ -87,7 +87,6 @@ struct kvm_context {
 struct kvm_world_switch {
        int (*exc_entry)(void);
        int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-       unsigned long page_order;
 };
 
 #define MAX_PGTABLE_LEVELS     4
@@ -359,8 +358,6 @@ void kvm_exc_entry(void);
 int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
 
 extern unsigned long vpid_mask;
-extern const unsigned long kvm_exception_size;
-extern const unsigned long kvm_enter_guest_size;
 extern struct kvm_world_switch *kvm_loongarch_ops;
 
 #define SW_GCSR                (1 << 0)
index ae469edec99c166b018943e06886f41b8ee821b0..a4d044da3aa7c2cba4fde8c773225c3350e8f69a 100644 (file)
@@ -7,11 +7,12 @@ include $(srctree)/virt/kvm/Makefile.kvm
 
 obj-$(CONFIG_KVM) += kvm.o
 
+obj-y += switch.o
+
 kvm-y += exit.o
 kvm-y += interrupt.o
 kvm-y += main.o
 kvm-y += mmu.o
-kvm-y += switch.o
 kvm-y += timer.o
 kvm-y += tlb.o
 kvm-y += vcpu.o
index 76ebff2faeddc97a3732eb373823a90ed7ac3ebc..f105a86143f5ba56c57b54d09bd2a1cf14156211 100644 (file)
@@ -348,8 +348,7 @@ void kvm_arch_disable_virtualization_cpu(void)
 
 static int kvm_loongarch_env_init(void)
 {
-       int cpu, order, ret;
-       void *addr;
+       int cpu, ret;
        struct kvm_context *context;
 
        vmcs = alloc_percpu(struct kvm_context);
@@ -365,30 +364,8 @@ static int kvm_loongarch_env_init(void)
                return -ENOMEM;
        }
 
-       /*
-        * PGD register is shared between root kernel and kvm hypervisor.
-        * So world switch entry should be in DMW area rather than TLB area
-        * to avoid page fault reenter.
-        *
-        * In future if hardware pagetable walking is supported, we won't
-        * need to copy world switch code to DMW area.
-        */
-       order = get_order(kvm_exception_size + kvm_enter_guest_size);
-       addr = (void *)__get_free_pages(GFP_KERNEL, order);
-       if (!addr) {
-               free_percpu(vmcs);
-               vmcs = NULL;
-               kfree(kvm_loongarch_ops);
-               kvm_loongarch_ops = NULL;
-               return -ENOMEM;
-       }
-
-       memcpy(addr, kvm_exc_entry, kvm_exception_size);
-       memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
-       flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
-       kvm_loongarch_ops->exc_entry = addr;
-       kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
-       kvm_loongarch_ops->page_order = order;
+       kvm_loongarch_ops->exc_entry = (void *)kvm_exc_entry;
+       kvm_loongarch_ops->enter_guest = (void *)kvm_enter_guest;
 
        vpid_mask = read_csr_gstat();
        vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
@@ -428,16 +405,10 @@ static int kvm_loongarch_env_init(void)
 
 static void kvm_loongarch_env_exit(void)
 {
-       unsigned long addr;
-
        if (vmcs)
                free_percpu(vmcs);
 
        if (kvm_loongarch_ops) {
-               if (kvm_loongarch_ops->exc_entry) {
-                       addr = (unsigned long)kvm_loongarch_ops->exc_entry;
-                       free_pages(addr, kvm_loongarch_ops->page_order);
-               }
                kfree(kvm_loongarch_ops);
        }
 
index f1768b7a619497323bd7f96cddf8280938ca594b..1d3ba7190154dc82c660dff80d6176dad8ff36c6 100644 (file)
@@ -4,9 +4,11 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/kvm_types.h>
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
 #include <asm/loongarch.h>
+#include <asm/page.h>
 #include <asm/regdef.h>
 #include <asm/unwind_hints.h>
 
         *  -        is still in guest mode, such as pgd table/vmid registers etc,
         *  -        will fix with hw page walk enabled in future
         * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
+        *
+        * PGD register is shared between root kernel and kvm hypervisor.
+        * So world switch entry should be in DMW area rather than TLB area
+        * to avoid page fault re-enter.
         */
        .text
+       .p2align PAGE_SHIFT
        .cfi_sections   .debug_frame
 SYM_CODE_START(kvm_exc_entry)
        UNWIND_HINT_UNDEFINED
@@ -190,8 +197,8 @@ ret_to_host:
        kvm_restore_host_gpr    a2
        jr      ra
 
-SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
 SYM_CODE_END(kvm_exc_entry)
+EXPORT_SYMBOL_FOR_KVM(kvm_exc_entry)
 
 /*
  * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
@@ -215,8 +222,8 @@ SYM_FUNC_START(kvm_enter_guest)
        /* Save kvm_vcpu to kscratch */
        csrwr   a1, KVM_VCPU_KS
        kvm_switch_to_guest
-SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
 SYM_FUNC_END(kvm_enter_guest)
+EXPORT_SYMBOL_FOR_KVM(kvm_enter_guest)
 
 SYM_FUNC_START(kvm_save_fpu)
        fpu_save_csr    a0 t1
@@ -224,6 +231,7 @@ SYM_FUNC_START(kvm_save_fpu)
        fpu_save_cc     a0 t1 t2
        jr              ra
 SYM_FUNC_END(kvm_save_fpu)
+EXPORT_SYMBOL_FOR_KVM(kvm_save_fpu)
 
 SYM_FUNC_START(kvm_restore_fpu)
        fpu_restore_double a0 t1
@@ -231,6 +239,7 @@ SYM_FUNC_START(kvm_restore_fpu)
        fpu_restore_cc     a0 t1 t2
        jr                 ra
 SYM_FUNC_END(kvm_restore_fpu)
+EXPORT_SYMBOL_FOR_KVM(kvm_restore_fpu)
 
 #ifdef CONFIG_CPU_HAS_LSX
 SYM_FUNC_START(kvm_save_lsx)
@@ -239,6 +248,7 @@ SYM_FUNC_START(kvm_save_lsx)
        lsx_save_data   a0 t1
        jr              ra
 SYM_FUNC_END(kvm_save_lsx)
+EXPORT_SYMBOL_FOR_KVM(kvm_save_lsx)
 
 SYM_FUNC_START(kvm_restore_lsx)
        lsx_restore_data a0 t1
@@ -246,6 +256,7 @@ SYM_FUNC_START(kvm_restore_lsx)
        fpu_restore_csr  a0 t1 t2
        jr               ra
 SYM_FUNC_END(kvm_restore_lsx)
+EXPORT_SYMBOL_FOR_KVM(kvm_restore_lsx)
 #endif
 
 #ifdef CONFIG_CPU_HAS_LASX
@@ -255,6 +266,7 @@ SYM_FUNC_START(kvm_save_lasx)
        lasx_save_data  a0 t1
        jr              ra
 SYM_FUNC_END(kvm_save_lasx)
+EXPORT_SYMBOL_FOR_KVM(kvm_save_lasx)
 
 SYM_FUNC_START(kvm_restore_lasx)
        lasx_restore_data a0 t1
@@ -262,10 +274,8 @@ SYM_FUNC_START(kvm_restore_lasx)
        fpu_restore_csr   a0 t1 t2
        jr                ra
 SYM_FUNC_END(kvm_restore_lasx)
+EXPORT_SYMBOL_FOR_KVM(kvm_restore_lasx)
 #endif
-       .section ".rodata"
-SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
-SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
 
 #ifdef CONFIG_CPU_HAS_LBT
 STACK_FRAME_NON_STANDARD kvm_restore_fpu