{
struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
- struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren);
vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
- if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
- (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
- vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
- smcsr->sstateen0);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+ vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, smcsr->sstateen0);
}
static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
- struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren);
csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
- if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
- (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
- smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
- vcpu->arch.host_sstateen0);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+ smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, vcpu->arch.host_sstateen0);
}
/*