*/
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
-#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
#define kvm_read_vm_id_reg(kvm, reg) \
({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
+void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
+
#define __expand_field_sign_unsigned(id, fld, val) \
((u64)SYS_FIELD_VALUE(id, fld, val))
}
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
- kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
- kvm->arch.id_regs[i]);
+ kvm_set_vm_id_reg(kvm, IDX_IDREG(i), limit_nv_id_reg(IDX_IDREG(i),
+ kvm->arch.id_regs[i]));
/* VTTBR_EL2 */
res0 = res1 = 0;
ret = arm64_check_features(vcpu, rd, val);
if (!ret)
- IDREG(vcpu->kvm, id) = val;
+ kvm_set_vm_id_reg(vcpu->kvm, id, val);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return ret;
}
+void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
+{
+ u64 *p = __vm_id_reg(&kvm->arch, reg);
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
+ return;
+
+ *p = val;
+}
+
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 *val)
{
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
return;
- lockdep_assert_held(&kvm->arch.config_lock);
- IDREG(kvm, id) = reg->reset(vcpu, reg);
+ kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
}
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,