]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - tools/testing/selftests/kvm/include/kvm_util_base.h
Merge tag 'kvm-riscv-6.8-1' of https://github.com/kvm-riscv/linux into HEAD
[thirdparty/kernel/stable.git] / tools / testing / selftests / kvm / include / kvm_util_base.h
index a18db6a7b3cf474fe48cacf16bd962123f977e42..4c8a05cbc20c72e9f30c2925efa86d7f2a320880 100644 (file)
@@ -44,7 +44,7 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
 
 struct userspace_mem_region {
-       struct kvm_userspace_memory_region region;
+       struct kvm_userspace_memory_region2 region;
        struct sparsebit *unused_phy_pages;
        int fd;
        off_t offset;
@@ -129,6 +129,7 @@ struct vcpu_reg_sublist {
        const char *name;
        long capability;
        int feature;
+       int feature_type;
        bool finalize;
        __u64 *regs;
        __u64 regs_n;
@@ -188,6 +189,23 @@ enum vm_guest_mode {
        NUM_VM_MODES,
 };
 
+struct vm_shape {
+       enum vm_guest_mode mode;
+       unsigned int type;
+};
+
+#define VM_TYPE_DEFAULT                        0
+
+#define VM_SHAPE(__mode)                       \
+({                                             \
+       struct vm_shape shape = {               \
+               .mode = (__mode),               \
+               .type = VM_TYPE_DEFAULT         \
+       };                                      \
+                                               \
+       shape;                                  \
+})
+
 #if defined(__aarch64__)
 
 extern enum vm_guest_mode vm_mode_default;
@@ -220,6 +238,8 @@ extern enum vm_guest_mode vm_mode_default;
 
 #endif
 
+#define VM_SHAPE_DEFAULT       VM_SHAPE(VM_MODE_DEFAULT)
+
 #define MIN_PAGE_SIZE          (1U << MIN_PAGE_SHIFT)
 #define PTES_PER_MIN_PAGE      ptes_per_page(MIN_PAGE_SIZE)
 
@@ -248,6 +268,13 @@ static inline bool kvm_has_cap(long cap)
 #define __KVM_SYSCALL_ERROR(_name, _ret) \
        "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
 
+/*
+ * Use the "inner", double-underscore macro when reporting errors from within
+ * other macros so that the name of ioctl() and not its literal numeric value
+ * is printed on error.  The "outer" macro is strongly preferred when reporting
+ * errors "directly", i.e. without an additional layer of macros, as it reduces
+ * the probability of passing in the wrong string.
+ */
 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
 
@@ -260,17 +287,13 @@ static inline bool kvm_has_cap(long cap)
 #define __kvm_ioctl(kvm_fd, cmd, arg)                          \
        kvm_do_ioctl(kvm_fd, cmd, arg)
 
-
-#define _kvm_ioctl(kvm_fd, cmd, name, arg)                     \
+#define kvm_ioctl(kvm_fd, cmd, arg)                            \
 ({                                                             \
        int ret = __kvm_ioctl(kvm_fd, cmd, arg);                \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));        \
 })
 
-#define kvm_ioctl(kvm_fd, cmd, arg) \
-       _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
-
 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
 
 #define __vm_ioctl(vm, cmd, arg)                               \
@@ -279,17 +302,42 @@ static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
        kvm_do_ioctl((vm)->fd, cmd, arg);                       \
 })
 
-#define _vm_ioctl(vm, cmd, name, arg)                          \
+/*
+ * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
+ * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
+ * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
+ * selftests existed and (b) should never outright fail, i.e. is supposed to
+ * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
+ * VM and its vCPUs, including KVM_CHECK_EXTENSION.
+ */
+#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)                               \
+do {                                                                                   \
+       int __errno = errno;                                                            \
+                                                                                       \
+       static_assert_is_vm(vm);                                                        \
+                                                                                       \
+       if (cond)                                                                       \
+               break;                                                                  \
+                                                                                       \
+       if (errno == EIO &&                                                             \
+           __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {     \
+               TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");     \
+               TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");  \
+       }                                                                               \
+       errno = __errno;                                                                \
+       TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));                                \
+} while (0)
+
+#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)          \
+       __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
+
+#define vm_ioctl(vm, cmd, arg)                                 \
 ({                                                             \
        int ret = __vm_ioctl(vm, cmd, arg);                     \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);               \
 })
 
-#define vm_ioctl(vm, cmd, arg)                                 \
-       _vm_ioctl(vm, cmd, #cmd, arg)
-
-
 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
 
 #define __vcpu_ioctl(vcpu, cmd, arg)                           \
@@ -298,16 +346,13 @@ static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
        kvm_do_ioctl((vcpu)->fd, cmd, arg);                     \
 })
 
-#define _vcpu_ioctl(vcpu, cmd, name, arg)                      \
+#define vcpu_ioctl(vcpu, cmd, arg)                             \
 ({                                                             \
        int ret = __vcpu_ioctl(vcpu, cmd, arg);                 \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);       \
 })
 
-#define vcpu_ioctl(vcpu, cmd, arg)                             \
-       _vcpu_ioctl(vcpu, cmd, #cmd, arg)
-
 /*
  * Looks up and returns the value corresponding to the capability
  * (KVM_CAP_*) given by cap.
@@ -316,7 +361,7 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
 {
        int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
 
-       TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
+       TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
        return ret;
 }
 
@@ -333,6 +378,54 @@ static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
        vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
 }
 
+static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
+                                           uint64_t size, uint64_t attributes)
+{
+       struct kvm_memory_attributes attr = {
+               .attributes = attributes,
+               .address = gpa,
+               .size = size,
+               .flags = 0,
+       };
+
+       /*
+        * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
+        * need significant enhancements to support multiple attributes.
+        */
+       TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
+                   "Update me to support multiple attributes!");
+
+       vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
+}
+
+
+static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
+                                     uint64_t size)
+{
+       vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
+}
+
+static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
+                                    uint64_t size)
+{
+       vm_set_memory_attributes(vm, gpa, size, 0);
+}
+
+void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+                           bool punch_hole);
+
+static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
+                                          uint64_t size)
+{
+       vm_guest_mem_fallocate(vm, gpa, size, true);
+}
+
+static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
+                                        uint64_t size)
+{
+       vm_guest_mem_fallocate(vm, gpa, size, false);
+}
+
 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
 const char *vm_guest_mode_string(uint32_t i);
 
@@ -375,7 +468,7 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
 {
        int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
 
-       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+       TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
        return fd;
 }
 
@@ -431,14 +524,44 @@ static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
 
 void vm_create_irqchip(struct kvm_vm *vm);
 
+static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
+                                       uint64_t flags)
+{
+       struct kvm_create_guest_memfd guest_memfd = {
+               .size = size,
+               .flags = flags,
+       };
+
+       return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
+}
+
+static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
+                                       uint64_t flags)
+{
+       int fd = __vm_create_guest_memfd(vm, size, flags);
+
+       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
+       return fd;
+}
+
 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
                               uint64_t gpa, uint64_t size, void *hva);
 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
                                uint64_t gpa, uint64_t size, void *hva);
+void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                               uint64_t gpa, uint64_t size, void *hva,
+                               uint32_t guest_memfd, uint64_t guest_memfd_offset);
+int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                                uint64_t gpa, uint64_t size, void *hva,
+                                uint32_t guest_memfd, uint64_t guest_memfd_offset);
+
 void vm_userspace_mem_region_add(struct kvm_vm *vm,
        enum vm_mem_backing_src_type src_type,
        uint64_t guest_paddr, uint32_t slot, uint64_t npages,
        uint32_t flags);
+void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
+               uint64_t guest_paddr, uint32_t slot, uint64_t npages,
+               uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
 
 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
@@ -587,7 +710,7 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
 {
        int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
 
-       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+       TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
        return fd;
 }
 
@@ -713,21 +836,33 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
  */
-struct kvm_vm *____vm_create(enum vm_guest_mode mode);
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *____vm_create(struct vm_shape shape);
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
                           uint64_t nr_extra_pages);
 
 static inline struct kvm_vm *vm_create_barebones(void)
 {
-       return ____vm_create(VM_MODE_DEFAULT);
+       return ____vm_create(VM_SHAPE_DEFAULT);
 }
 
+#ifdef __x86_64__
+static inline struct kvm_vm *vm_create_barebones_protected_vm(void)
+{
+       const struct vm_shape shape = {
+               .mode = VM_MODE_DEFAULT,
+               .type = KVM_X86_SW_PROTECTED_VM,
+       };
+
+       return ____vm_create(shape);
+}
+#endif
+
 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 {
-       return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
+       return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
 }
 
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
                                      uint64_t extra_mem_pages,
                                      void *guest_code, struct kvm_vcpu *vcpus[]);
 
@@ -735,17 +870,27 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
                                                  void *guest_code,
                                                  struct kvm_vcpu *vcpus[])
 {
-       return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
+       return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
                                      guest_code, vcpus);
 }
 
+
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                              struct kvm_vcpu **vcpu,
+                                              uint64_t extra_mem_pages,
+                                              void *guest_code);
+
 /*
  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
  */
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-                                        uint64_t extra_mem_pages,
-                                        void *guest_code);
+static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+                                                      uint64_t extra_mem_pages,
+                                                      void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
+                                              extra_mem_pages, guest_code);
+}
 
 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
                                                     void *guest_code)
@@ -753,6 +898,13 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
        return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
 }
 
+static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                                          struct kvm_vcpu **vcpu,
+                                                          void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
+}
+
 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 
 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
@@ -776,10 +928,6 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
        return n;
 }
 
-struct kvm_userspace_memory_region *
-kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
-                                uint64_t end);
-
 #define sync_global_to_guest(vm, g) ({                         \
        typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));     \
        memcpy(_p, &(g), sizeof(g));                            \