int i, j;
for (i = 0; i < ARRAY_SIZE(merge_msrs); i++) {
- u32 bit_nr = svm_msrpm_bit_nr(merge_msrs[i]);
+ int bit_nr = svm_msrpm_bit_nr(merge_msrs[i]);
u32 offset;
- if (WARN_ON(bit_nr == MSR_INVALID))
+ if (WARN_ON(bit_nr < 0))
return -EIO;
/*
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
gpa_t base = svm->nested.ctl.msrpm_base_pa;
- u32 msr, bit_nr;
+ int write, bit_nr;
u8 value, mask;
- int write;
+ u32 msr;
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
bit_nr = svm_msrpm_bit_nr(msr);
write = svm->vmcb->control.exit_info_1 & 1;
- if (bit_nr == MSR_INVALID)
+ if (bit_nr < 0)
return NESTED_EXIT_DONE;
if (kvm_vcpu_read_guest(&svm->vcpu, base + bit_nr / BITS_PER_BYTE,
static_assert(SVM_MSRS_PER_RANGE == 8192);
#define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
-#define MSR_INVALID 0xffffffffU
-
-static __always_inline u32 svm_msrpm_bit_nr(u32 msr)
+static __always_inline int svm_msrpm_bit_nr(u32 msr)
{
int range_nr;
range_nr = 2;
break;
default:
- return MSR_INVALID;
+ return -EINVAL;
}
return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap, \
u32 msr) \
{ \
- u32 bit_nr; \
+ int bit_nr; \
\
bit_nr = svm_msrpm_bit_nr(msr); \
- if (bit_nr == MSR_INVALID) \
+ if (bit_nr < 0) \
return (rtype)true; \
\
return bitop##_bit(bit_nr + bit_rw, bitmap); \