KVM_MR_FLAGS_ONLY,
};
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem);
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem)
-{
- int r;
-
- mutex_lock(&kvm->slots_lock);
- r = __kvm_set_memory_region(kvm, mem);
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-EXPORT_SYMBOL_GPL(kvm_set_memory_region);
-
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region2 *mem)
{
if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
- return kvm_set_memory_region(kvm, mem);
+ guard(mutex)(&kvm->slots_lock);
+ return __kvm_set_memory_region(kvm, mem);
}
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT