]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
LoongArch: KVM: Add FPU/LBT delay load support
authorBibo Mao <maobibo@loongson.cn>
Fri, 6 Feb 2026 01:28:00 +0000 (09:28 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Fri, 6 Feb 2026 01:28:00 +0000 (09:28 +0800)
FPU/LBT are lazy enabled with KVM hypervisor. After FPU/LBT enabled and
loaded, vCPU can be preempted and FPU/LBT will be lost again, there will
be unnecessary FPU/LBT exceptions, load and store stuff. Here delay the
FPU/LBT load until the guest entry.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/include/asm/kvm_host.h
arch/loongarch/kvm/exit.c
arch/loongarch/kvm/vcpu.c

index bced2d60784921e3dba018c7b2ac0030c4626a65..4a7e816fae84f2af763e1c28b34220577d3a8c8a 100644 (file)
@@ -37,6 +37,7 @@
 #define KVM_REQ_TLB_FLUSH_GPA          KVM_ARCH_REQ(0)
 #define KVM_REQ_STEAL_UPDATE           KVM_ARCH_REQ(1)
 #define KVM_REQ_PMU                    KVM_ARCH_REQ(2)
+#define KVM_REQ_AUX_LOAD               KVM_ARCH_REQ(3)
 
 #define KVM_GUESTDBG_SW_BP_MASK                \
        (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -200,6 +201,7 @@ struct kvm_vcpu_arch {
 
        /* Which auxiliary state is loaded (KVM_LARCH_*) */
        unsigned int aux_inuse;
+       unsigned int aux_ldtype;
 
        /* FPU state */
        struct loongarch_fpu fpu FPU_ALIGN;
index 65ec10a7245a96011957716ea0c186254ad9021b..da0ad89f2eb7467ad6ba4597713e93f9cbc36641 100644 (file)
@@ -754,7 +754,8 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
                return RESUME_HOST;
        }
 
-       kvm_own_fpu(vcpu);
+       vcpu->arch.aux_ldtype = KVM_LARCH_FPU;
+       kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
 
        return RESUME_GUEST;
 }
@@ -794,8 +795,10 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
 {
        if (!kvm_guest_has_lsx(&vcpu->arch))
                kvm_queue_exception(vcpu, EXCCODE_INE, 0);
-       else
-               kvm_own_lsx(vcpu);
+       else {
+               vcpu->arch.aux_ldtype = KVM_LARCH_LSX;
+               kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+       }
 
        return RESUME_GUEST;
 }
@@ -812,8 +815,10 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
 {
        if (!kvm_guest_has_lasx(&vcpu->arch))
                kvm_queue_exception(vcpu, EXCCODE_INE, 0);
-       else
-               kvm_own_lasx(vcpu);
+       else {
+               vcpu->arch.aux_ldtype = KVM_LARCH_LASX;
+               kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+       }
 
        return RESUME_GUEST;
 }
@@ -822,8 +827,10 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
 {
        if (!kvm_guest_has_lbt(&vcpu->arch))
                kvm_queue_exception(vcpu, EXCCODE_INE, 0);
-       else
-               kvm_own_lbt(vcpu);
+       else {
+               vcpu->arch.aux_ldtype = KVM_LARCH_LBT;
+               kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+       }
 
        return RESUME_GUEST;
 }
index 07c427a5e15661a09d0ec4fc0916e0a529480e05..4f0d10f52b99bdaab34139ef6e8105f71c36da67 100644 (file)
@@ -232,6 +232,27 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
                        kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
                        vcpu->arch.flush_gpa = INVALID_GPA;
                }
+
+       if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
+               switch (vcpu->arch.aux_ldtype) {
+               case KVM_LARCH_FPU:
+                       kvm_own_fpu(vcpu);
+                       break;
+               case KVM_LARCH_LSX:
+                       kvm_own_lsx(vcpu);
+                       break;
+               case KVM_LARCH_LASX:
+                       kvm_own_lasx(vcpu);
+                       break;
+               case KVM_LARCH_LBT:
+                       kvm_own_lbt(vcpu);
+                       break;
+               default:
+                       break;
+               }
+
+               vcpu->arch.aux_ldtype = 0;
+       }
 }
 
 /*
@@ -1304,13 +1325,11 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 #ifdef CONFIG_CPU_HAS_LBT
 int kvm_own_lbt(struct kvm_vcpu *vcpu)
 {
-       preempt_disable();
        if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
                set_csr_euen(CSR_EUEN_LBTEN);
                _restore_lbt(&vcpu->arch.lbt);
                vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
        }
-       preempt_enable();
 
        return 0;
 }
@@ -1353,8 +1372,6 @@ static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
 /* Enable FPU and restore context */
 void kvm_own_fpu(struct kvm_vcpu *vcpu)
 {
-       preempt_disable();
-
        /*
         * Enable FPU for guest
         * Set FR and FRE according to guest context
@@ -1365,16 +1382,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
        kvm_restore_fpu(&vcpu->arch.fpu);
        vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
        trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
-
-       preempt_enable();
 }
 
 #ifdef CONFIG_CPU_HAS_LSX
 /* Enable LSX and restore context */
 int kvm_own_lsx(struct kvm_vcpu *vcpu)
 {
-       preempt_disable();
-
        /* Enable LSX for guest */
        kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
        set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
@@ -1396,7 +1409,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
 
        trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
        vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
-       preempt_enable();
 
        return 0;
 }
@@ -1406,8 +1418,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
 /* Enable LASX and restore context */
 int kvm_own_lasx(struct kvm_vcpu *vcpu)
 {
-       preempt_disable();
-
        kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
        set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
        switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
@@ -1429,7 +1439,6 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
 
        trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
        vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
-       preempt_enable();
 
        return 0;
 }