return svm_test_msr_bitmap_write(msrpm, msr);
}
-void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
{
struct vcpu_svm *svm = to_svm(vcpu);
void *msrpm = svm->msrpm;
/* Don't disable interception for MSRs userspace wants to handle. */
if (type & MSR_TYPE_R) {
- if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
svm_clear_msr_bitmap_read(msrpm, msr);
else
svm_set_msr_bitmap_read(msrpm, msr);
}
if (type & MSR_TYPE_W) {
- if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
svm_clear_msr_bitmap_write(msrpm, msr);
else
svm_set_msr_bitmap_write(msrpm, msr);
svm->nested.force_msr_bitmap_recalc = true;
}
-void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- void *msrpm = svm->msrpm;
-
- if (type & MSR_TYPE_R)
- svm_set_msr_bitmap_read(msrpm, msr);
-
- if (type & MSR_TYPE_W)
- svm_set_msr_bitmap_write(msrpm, msr);
-
- svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
- svm->nested.force_msr_bitmap_recalc = true;
-}
-
void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
{
unsigned int order = get_order(size);
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vec);
-void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
-static inline void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
- int type, bool enable_intercept)
+static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
{
- if (enable_intercept)
- svm_enable_intercept_for_msr(vcpu, msr, type);
- else
- svm_disable_intercept_for_msr(vcpu, msr, type);
+ svm_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ svm_set_intercept_for_msr(vcpu, msr, type, true);
}
/* nested.c */
vmx->nested.force_msr_bitmap_recalc = true;
}
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx_msr_bitmap_l01_changed(vmx);
if (type & MSR_TYPE_R) {
- if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
vmx_clear_msr_bitmap_read(msr_bitmap, msr);
else
vmx_set_msr_bitmap_read(msr_bitmap, msr);
}
if (type & MSR_TYPE_W) {
- if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
vmx_clear_msr_bitmap_write(msr_bitmap, msr);
else
vmx_set_msr_bitmap_write(msr_bitmap, msr);
}
}
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- vmx_msr_bitmap_l01_changed(vmx);
-
- if (type & MSR_TYPE_R)
- vmx_set_msr_bitmap_read(msr_bitmap, msr);
-
- if (type & MSR_TYPE_W)
- vmx_set_msr_bitmap_write(msr_bitmap, msr);
-}
-
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
{
/*
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
+
+static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ vmx_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ vmx_set_intercept_for_msr(vcpu, msr, type, true);
+}
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
-static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
- int type, bool value)
-{
- if (value)
- vmx_enable_intercept_for_msr(vcpu, msr, type);
- else
- vmx_disable_intercept_for_msr(vcpu, msr, type);
-}
-
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);