#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
#define KVM_REQ_PMU KVM_ARCH_REQ(2)
+#define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
/* Which auxiliary state is loaded (KVM_LARCH_*) */
unsigned int aux_inuse;
+ unsigned int aux_ldtype;
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
return RESUME_HOST;
}
- kvm_own_fpu(vcpu);
+ vcpu->arch.aux_ldtype = KVM_LARCH_FPU;
+ kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
return RESUME_GUEST;
}
{
if (!kvm_guest_has_lsx(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
- else
- kvm_own_lsx(vcpu);
+ else {
+ vcpu->arch.aux_ldtype = KVM_LARCH_LSX;
+ kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+ }
return RESUME_GUEST;
}
{
if (!kvm_guest_has_lasx(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
- else
- kvm_own_lasx(vcpu);
+ else {
+ vcpu->arch.aux_ldtype = KVM_LARCH_LASX;
+ kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+ }
return RESUME_GUEST;
}
{
if (!kvm_guest_has_lbt(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
- else
- kvm_own_lbt(vcpu);
+ else {
+ vcpu->arch.aux_ldtype = KVM_LARCH_LBT;
+ kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
+ }
return RESUME_GUEST;
}
kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
vcpu->arch.flush_gpa = INVALID_GPA;
}
+
+ if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
+ switch (vcpu->arch.aux_ldtype) {
+ case KVM_LARCH_FPU:
+ kvm_own_fpu(vcpu);
+ break;
+ case KVM_LARCH_LSX:
+ kvm_own_lsx(vcpu);
+ break;
+ case KVM_LARCH_LASX:
+ kvm_own_lasx(vcpu);
+ break;
+ case KVM_LARCH_LBT:
+ kvm_own_lbt(vcpu);
+ break;
+ default:
+ break;
+ }
+
+ vcpu->arch.aux_ldtype = 0;
+ }
}
/*
#ifdef CONFIG_CPU_HAS_LBT
int kvm_own_lbt(struct kvm_vcpu *vcpu)
{
- preempt_disable();
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
set_csr_euen(CSR_EUEN_LBTEN);
_restore_lbt(&vcpu->arch.lbt);
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
}
- preempt_enable();
return 0;
}
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
- preempt_disable();
-
/*
* Enable FPU for guest
* Set FR and FRE according to guest context
kvm_restore_fpu(&vcpu->arch.fpu);
vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
-
- preempt_enable();
}
#ifdef CONFIG_CPU_HAS_LSX
/* Enable LSX and restore context */
int kvm_own_lsx(struct kvm_vcpu *vcpu)
{
- preempt_disable();
-
/* Enable LSX for guest */
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
- preempt_enable();
return 0;
}
/* Enable LASX and restore context */
int kvm_own_lasx(struct kvm_vcpu *vcpu)
{
- preempt_disable();
-
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
- preempt_enable();
return 0;
}