* the guest's code, stack, and page tables, and low memory contains
* the PCI hole and other MMIO regions that need to be avoided.
*/
- const u64 gpa = SZ_4G;
+ const gpa_t gpa = SZ_4G;
const int slot = 1;
struct kvm_vcpu *vcpu;
gpa_t ucall_mmio_addr;
gva_t handlers;
u32 dirty_ring_size;
- u64 gpa_tag_mask;
+ gpa_t gpa_tag_mask;
/*
* "mmu" is the guest's stage-1, with a short name because the vast
vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_set_memory_attributes(struct kvm_vm *vm, u64 gpa,
+static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa,
u64 size, u64 attributes)
{
struct kvm_memory_attributes attr = {
}
-static inline void vm_mem_set_private(struct kvm_vm *vm, u64 gpa,
+static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa,
u64 size)
{
vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
}
-static inline void vm_mem_set_shared(struct kvm_vm *vm, u64 gpa,
+static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa,
u64 size)
{
vm_set_memory_attributes(vm, gpa, size, 0);
}
-void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 gpa, u64 size,
+void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size,
bool punch_hole);
-static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, u64 gpa,
+static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa,
u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, true);
}
-static inline void vm_guest_mem_allocate(struct kvm_vm *vm, u64 gpa,
+static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa,
u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, false);
}
void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva);
+ gpa_t gpa, u64 size, void *hva);
int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva);
+ gpa_t gpa, u64 size, void *hva);
void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva,
+ gpa_t gpa, u64 size, void *hva,
u32 guest_memfd, u64 guest_memfd_offset);
int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva,
+ gpa_t gpa, u64 size, void *hva,
u32 guest_memfd, u64 guest_memfd_offset);
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- u64 gpa, u32 slot, u64 npages, u32 flags);
+ gpa_t gpa, u32 slot, u64 npages, u32 flags);
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- u64 gpa, u32 slot, u64 npages, u32 flags,
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
int guest_memfd_fd, u64 guest_memfd_offset);
#ifndef vm_arch_has_protected_memory
#define MEMSTRESS_MEM_SLOT_INDEX 1
struct memstress_vcpu_args {
- u64 gpa;
+ gpa_t gpa;
gva_t gva;
u64 pages;
struct memstress_args {
struct kvm_vm *vm;
/* The starting address and size of the guest test region. */
- u64 gpa;
+ gpa_t gpa;
u64 size;
u64 guest_page_size;
u32 random_seed;
u64 __xen_hypercall(u64 nr, u64 a0, void *a1);
void xen_hypercall(u64 nr, u64 a0, void *a1);
-static inline u64 __kvm_hypercall_map_gpa_range(u64 gpa, u64 size, u64 flags)
+static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
}
-static inline void kvm_hypercall_map_gpa_range(u64 gpa, u64 size, u64 flags)
+static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva)
+ gpa_t gpa, u64 size, void *hva)
{
struct kvm_userspace_memory_region region = {
.slot = slot,
}
void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva)
+ gpa_t gpa, u64 size, void *hva)
{
int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
"KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva,
+ gpa_t gpa, u64 size, void *hva,
u32 guest_memfd, u64 guest_memfd_offset)
{
struct kvm_userspace_memory_region2 region = {
}
void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
- u64 gpa, u64 size, void *hva,
+ gpa_t gpa, u64 size, void *hva,
u32 guest_memfd, u64 guest_memfd_offset)
{
int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
/* FIXME: This thing needs to be ripped apart and rewritten. */
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- u64 gpa, u32 slot, u64 npages, u32 flags,
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
int guest_memfd, u64 guest_memfd_offset)
{
int ret;
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- u64 gpa, u32 slot, u64 npages, u32 flags)
+ gpa_t gpa, u32 slot, u64 npages, u32 flags)
{
vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
}
const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
struct userspace_mem_region *region;
u64 end = base + size;
- u64 gpa, len;
+ gpa_t gpa, len;
off_t fd_offset;
int ret;
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 gpa)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
int ri, idx;
u64 *entry;
u64 nr_modifications)
{
u64 pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
- u64 gpa;
+ gpa_t gpa;
int i;
/*
"sem_timedwait() failed: %d", errno);
}
-static void *vm_gpa2hva(struct vm_data *data, u64 gpa, u64 *rempages)
+static void *vm_gpa2hva(struct vm_data *data, gpa_t gpa, u64 *rempages)
{
- u64 gpage, pgoffs;
+ gpa_t gpage, pgoffs;
u32 slot, slotoffs;
void *base;
u32 guest_page_size = data->vm->page_size;
for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
u64 npages;
- u64 gpa;
+ gpa_t gpa;
npages = data->pages_per_slot;
if (slot == data->nslots)
static void test_memslot_do_unmap(struct vm_data *data,
u64 offsp, u64 count)
{
- u64 gpa, ctr;
+ gpa_t gpa, ctr;
u32 guest_page_size = data->vm->page_size;
for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
static void test_memslot_map_unmap_check(struct vm_data *data,
u64 offsp, u64 valexp)
{
- u64 gpa;
+ gpa_t gpa;
u64 *val;
u32 guest_page_size = data->vm->page_size;
static void guest_code(u64 start_gpa, u64 end_gpa, u64 stride)
{
- u64 gpa;
+ gpa_t gpa;
int i;
for (i = 0; i < 2; i++) {
u64 start_gpa, u64 end_gpa)
{
struct vcpu_info *info;
- u64 gpa, nr_bytes;
+ gpa_t gpa, nr_bytes;
pthread_t *threads;
int i;
struct slot_worker_data {
struct kvm_vm *vm;
- u64 gpa;
+ gpa_t gpa;
u32 flags;
bool worker_ready;
bool prefault_ready;
static void __test_pre_fault_memory(unsigned long vm_type, bool private)
{
- u64 gpa, gva, alignment, guest_page_size;
+ gpa_t gpa, gva, alignment, guest_page_size;
const struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
.type = vm_type,
}
/* calculate host virtual addr from guest physical addr */
-static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
+static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, gpa_t gpa)
{
return (void *)(self->base_hva - self->base_gpa + gpa);
}
{
struct kvm_vm *vm;
u64 *hva;
- u64 gpa;
+ gpa_t gpa;
vm = vm_create_with_one_vcpu(vcpu, guest_code);
pattern, i, gpa + i, mem[i]); \
} while (0)
-static void memcmp_h(u8 *mem, u64 gpa, u8 pattern, size_t size)
+static void memcmp_h(u8 *mem, gpa_t gpa, u8 pattern, size_t size)
{
size_t i;
SYNC_PRIVATE,
};
-static void guest_sync_shared(u64 gpa, u64 size,
+static void guest_sync_shared(gpa_t gpa, u64 size,
u8 current_pattern, u8 new_pattern)
{
GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern);
}
-static void guest_sync_private(u64 gpa, u64 size, u8 pattern)
+static void guest_sync_private(gpa_t gpa, u64 size, u8 pattern)
{
GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern);
}
#define MAP_GPA_SHARED BIT(1)
#define MAP_GPA_DO_FALLOCATE BIT(2)
-static void guest_map_mem(u64 gpa, u64 size, bool map_shared,
+static void guest_map_mem(gpa_t gpa, u64 size, bool map_shared,
bool do_fallocate)
{
u64 flags = MAP_GPA_SET_ATTRIBUTES;
kvm_hypercall_map_gpa_range(gpa, size, flags);
}
-static void guest_map_shared(u64 gpa, u64 size, bool do_fallocate)
+static void guest_map_shared(gpa_t gpa, u64 size, bool do_fallocate)
{
guest_map_mem(gpa, size, true, do_fallocate);
}
-static void guest_map_private(u64 gpa, u64 size, bool do_fallocate)
+static void guest_map_private(gpa_t gpa, u64 size, bool do_fallocate)
{
guest_map_mem(gpa, size, false, do_fallocate);
}
memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE);
for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
- u64 gpa = base_gpa + test_ranges[i].offset;
+ gpa_t gpa = base_gpa + test_ranges[i].offset;
u64 size = test_ranges[i].size;
u8 p1 = 0x11;
u8 p2 = 0x22;
}
}
-static void guest_punch_hole(u64 gpa, u64 size)
+static void guest_punch_hole(gpa_t gpa, u64 size)
{
/* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */
u64 flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE;
guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false);
for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
- u64 gpa = base_gpa + test_ranges[i].offset;
+ gpa_t gpa = base_gpa + test_ranges[i].offset;
u64 size = test_ranges[i].size;
/*
static void handle_exit_hypercall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- u64 gpa = run->hypercall.args[0];
+ gpa_t gpa = run->hypercall.args[0];
u64 size = run->hypercall.args[1] * PAGE_SIZE;
bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES;
bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
case UCALL_SYNC: {
- u64 gpa = uc.args[1];
+ gpa_t gpa = uc.args[1];
size_t size = uc.args[2];
size_t i;
KVM_MEM_GUEST_MEMFD, memfd, slot_size * i);
for (i = 0; i < nr_vcpus; i++) {
- u64 gpa = BASE_DATA_GPA + i * per_cpu_size;
+ gpa_t gpa = BASE_DATA_GPA + i * per_cpu_size;
vcpu_args_set(vcpus[i], 1, gpa);
struct kvm_vm *vm;
struct ucall uc;
u64 *hva;
- u64 gpa;
+ gpa_t gpa;
int rc;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
vmcall();
}
-static void l1_vmrun(struct svm_test_data *svm, u64 gpa)
+static void l1_vmrun(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmload(struct svm_test_data *svm, u64 gpa)
+static void l1_vmload(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmsave(struct svm_test_data *svm, u64 gpa)
+static void l1_vmsave(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmexit(struct svm_test_data *svm, u64 gpa)
+static void l1_vmexit(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);