return __nested_vmcb_check_controls(vcpu, ctl);
}
+/*
+ * If a feature is not advertised to L1, clear the corresponding vmcb12
+ * intercept.
+ */
+#define __nested_svm_sanitize_intercept(__vcpu, __control, fname, iname) \
+do { \
+ if (!guest_cpu_cap_has(__vcpu, X86_FEATURE_##fname)) \
+ vmcb12_clr_intercept(__control, INTERCEPT_##iname); \
+} while (0)
+
+#define nested_svm_sanitize_intercept(__vcpu, __control, name) \
+ __nested_svm_sanitize_intercept(__vcpu, __control, name, name)
+
static
void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
struct vmcb_ctrl_area_cached *to,
for (i = 0; i < MAX_INTERCEPT; i++)
to->intercepts[i] = from->intercepts[i];
+ __nested_svm_sanitize_intercept(vcpu, to, XSAVE, XSETBV);
+ nested_svm_sanitize_intercept(vcpu, to, INVPCID);
+ nested_svm_sanitize_intercept(vcpu, to, RDTSCP);
+ nested_svm_sanitize_intercept(vcpu, to, SKINIT);
+ nested_svm_sanitize_intercept(vcpu, to, RDPRU);
+
to->iopm_base_pa = from->iopm_base_pa;
to->msrpm_base_pa = from->msrpm_base_pa;
to->tsc_offset = from->tsc_offset;
*/
#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
-static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
+static inline void __vmcb_set_intercept(unsigned long *intercepts, u32 bit)
{
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
- __set_bit(bit, (unsigned long *)&control->intercepts);
+ __set_bit(bit, intercepts);
}
-static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
+static inline void __vmcb_clr_intercept(unsigned long *intercepts, u32 bit)
{
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
- __clear_bit(bit, (unsigned long *)&control->intercepts);
+ __clear_bit(bit, intercepts);
}
-static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
+static inline bool __vmcb_is_intercept(unsigned long *intercepts, u32 bit)
{
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
- return test_bit(bit, (unsigned long *)&control->intercepts);
+ return test_bit(bit, intercepts);
+}
+
+static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
+{
+ __vmcb_set_intercept((unsigned long *)&control->intercepts, bit);
+}
+
+static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
+{
+ __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
+}
+
+static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
+{
+ return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
+}
+
+static inline void vmcb12_clr_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
+{
+ __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
}
static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
{
- WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
- return test_bit(bit, (unsigned long *)&control->intercepts);
+ return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
}
static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)