#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
+
+#define __vcpu_assign_sys_reg(v, r, val) \
+ do { \
+ const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
+ u64 __v = (val); \
+ if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
+ __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
+ \
+ ctxt_sys_reg(ctxt, (r)) = __v; \
+ } while (0)
+
#define __vcpu_sys_reg(v,r) \
(*({ \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
- __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
+ __vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
break;
case TIMER_PTIMER:
- __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
+ __vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
break;
case TIMER_HVTIMER:
- __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
+ __vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
break;
case TIMER_HPTIMER:
- __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
+ __vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
break;
default:
WARN_ON(1);
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
- __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
+ __vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
break;
case TIMER_PTIMER:
- __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
+ __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
break;
case TIMER_HVTIMER:
- __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
+ __vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
break;
case TIMER_HPTIMER:
- __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
+ __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
break;
default:
WARN_ON(1);
if (unlikely(vcpu_has_nv(vcpu)))
vcpu_write_sys_reg(vcpu, val, reg);
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
- __vcpu_sys_reg(vcpu, reg) = val;
+ __vcpu_assign_sys_reg(vcpu, reg, val);
}
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
} else if (has_vhe()) {
write_sysreg_el1(val, SYS_SPSR);
} else {
- __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
+ __vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
}
}
if (!vcpu_el1_is_32bit(vcpu))
return;
- __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
+ __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
}
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
- __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
+ __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
/*
* The guest's state is always saved using the guest's max VL.
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
- __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
- __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
+ __vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
+ __vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));
if (has_vhe() || kvm_debug_regs_in_use(vcpu))
- __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
+ __vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
}
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
{
- __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+ __vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
/*
* On saving/restoring guest sve state, always use the maximum VL for
* the guest. The layout of the data when saving the sve state depends
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
if (has_fpmr)
- __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
+ __vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
if (system_supports_sve())
__hyp_sve_restore_host();
*/
val = read_sysreg_el0(SYS_CNTP_CVAL);
if (map.direct_ptimer == vcpu_ptimer(vcpu))
- __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
+ __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
if (map.direct_ptimer == vcpu_hptimer(vcpu))
- __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
+ __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
{
/* These registers are common with EL1 */
- __vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
- __vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
-
- __vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
- __vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
- __vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
- __vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
- __vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
- __vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
- __vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
- __vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
+ __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
+ __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
+
+ __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
+ __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
+ __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
+ __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
+ __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
+ __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
+ __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
+ __vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
/*
* In VHE mode those registers are compatible between EL1 and EL2,
* are always trapped, ensuring that the in-memory
* copy is always up-to-date. A small blessing...
*/
- __vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
- __vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
- __vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
- __vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
+ __vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
+ __vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
+ __vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
+ __vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
- __vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
+ __vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
- __vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
- __vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
+ __vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
+ __vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
}
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
- __vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
+ __vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
}
/*
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
}
- __vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
- __vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
- __vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
+ __vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
+ __vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
+ __vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
}
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
val |= lower_32_bits(val);
}
- __vcpu_sys_reg(vcpu, reg) = val;
+ __vcpu_assign_sys_reg(vcpu, reg, val);
/* Recreate the perf event to reflect the updated sample_period */
kvm_pmu_create_perf_event(pmc);
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
- __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
+ __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
}
reg = counter_index_to_reg(pmc->idx);
- __vcpu_sys_reg(vcpu, reg) = val;
+ __vcpu_assign_sys_reg(vcpu, reg, val);
kvm_pmu_release_perf_event(pmc);
}
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
if (!kvm_pmc_is_64bit(pmc))
reg = lower_32_bits(reg);
- __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
+ __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
/* No overflow? move on */
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
/* The reset bits don't indicate any state, and shouldn't be saved. */
- __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
+ __vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
u64 reg;
reg = counter_index_to_evtreg(pmc->idx);
- __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
+ __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
kvm_pmu_create_perf_event(pmc);
}
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
val &= ~MDCR_EL2_HPMN;
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
- __vcpu_sys_reg(vcpu, MDCR_EL2) = val;
+ __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
}
}
}
* to reverse-translate virtual EL2 system registers for a
* non-VHE guest hypervisor.
*/
- __vcpu_sys_reg(vcpu, reg) = val;
+ __vcpu_assign_sys_reg(vcpu, reg, val);
switch (reg) {
case CNTHCTL_EL2:
return;
memory_write:
- __vcpu_sys_reg(vcpu, reg) = val;
+ __vcpu_assign_sys_reg(vcpu, reg, val);
}
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
return -EINVAL;
- __vcpu_sys_reg(vcpu, rd->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, rd->reg, val);
return 0;
}
* The value of PMCR.N field is included when the
* vCPU register is read via kvm_vcpu_read_pmcr().
*/
- __vcpu_sys_reg(vcpu, r->reg) = pmcr;
+ __vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
return __vcpu_sys_reg(vcpu, r->reg);
}
return false;
if (p->is_write)
- __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
else
/* return PMSELR.SEL field */
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
{
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
- __vcpu_sys_reg(vcpu, r->reg) = val & mask;
+ __vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
return 0;
if (!vcpu_mode_priv(vcpu))
return undef_access(vcpu, p, r);
- __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
- p->regval & ARMV8_PMU_USERENR_MASK;
+ __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
+ (p->regval & ARMV8_PMU_USERENR_MASK));
} else {
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
& ARMV8_PMU_USERENR_MASK;
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
- __vcpu_sys_reg(vcpu, r->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
return 0;
if (kvm_has_mte(vcpu->kvm))
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
- __vcpu_sys_reg(vcpu, r->reg) = clidr;
+ __vcpu_assign_sys_reg(vcpu, r->reg, clidr);
return __vcpu_sys_reg(vcpu, r->reg);
}
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
return -EINVAL;
- __vcpu_sys_reg(vcpu, rd->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, rd->reg, val);
return 0;
}
const struct sys_reg_desc *r)
{
if (p->is_write)
- __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
else
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
const struct sys_reg_desc *r)
{
if (p->is_write)
- __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
else
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
const struct sys_reg_desc *r)
{
if (p->is_write)
- __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
else
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
val |= HCR_E2H;
- return __vcpu_sys_reg(vcpu, r->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
}
- __vcpu_sys_reg(vcpu, MDCR_EL2) = val;
+ __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
/*
* Request a reload of the PMU to enable/disable the counters
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
- __vcpu_sys_reg(vcpu, r->reg) = vcpu->kvm->arch.nr_pmu_counters;
+ __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
return vcpu->kvm->arch.nr_pmu_counters;
}
if (r->set_user) {
ret = (r->set_user)(vcpu, r, val);
} else {
- __vcpu_sys_reg(vcpu, r->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
ret = 0;
}
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
- __vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
+ __vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL);
return __vcpu_sys_reg(vcpu, r->reg);
}
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
- __vcpu_sys_reg(vcpu, r->reg) = r->val;
+ __vcpu_assign_sys_reg(vcpu, r->reg, r->val);
return __vcpu_sys_reg(vcpu, r->reg);
}
val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
val &= ~ICH_HCR_EL2_EOIcount_MASK;
val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
- __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val;
- __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr;
+ __vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val);
+ __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr);
for (i = 0; i < 4; i++) {
- __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i];
- __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i];
+ __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]);
+ __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]);
}
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
val &= ~ICH_LR_STATE;
val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
- __vcpu_sys_reg(vcpu, ICH_LRN(i)) = val;
+ __vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
s_cpu_if->vgic_lr[i] = 0;
}