case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
break;
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, first_domain);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, max_domains);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, buffer);
+ break;
default:
VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
"%"PRIx32" not implemented yet\n",
case 0x0000000a:
case 0x0000000b:
case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, type);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, instance);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, buffer);
- switch (domctl->u.hvmcontext_partial.type) {
+ switch (domctl->u.hvmcontext_partial_00000007.type) {
case VKI_HVM_SAVE_CODE(CPU):
- if ( domctl->u.hvmcontext_partial.buffer.p )
+ if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
- (Addr)domctl->u.hvmcontext_partial.buffer.p,
+ (Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
VKI_HVM_SAVE_LENGTH(CPU));
break;
case VKI_HVM_SAVE_CODE(MTRR):
- if ( domctl->u.hvmcontext_partial.buffer.p )
+ if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
- (Addr)domctl->u.hvmcontext_partial.buffer.p,
+ (Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
VKI_HVM_SAVE_LENGTH(MTRR));
break;
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_domctl_gethvmcontext_partial type",
- domctl->u.hvmcontext_partial.type);
+ domctl->u.hvmcontext_partial_00000007.type);
break;
}
break;
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_00000007, info.elapsed_nsec);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, tsc_mode);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, gtsc_khz);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, incarnation);
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
break;
case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
__PRE_XEN_DOMCTL_READ(
domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
break;
case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, vcpu);
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, flags);
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD) {
break;
case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
break;
break;
case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, size);
#if defined(__i386__) || defined(__x86_64__)
__PRE_XEN_DOMCTL_READ(mem_event_op, mem_event_op_00000007, mode);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
__PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, op);
__PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, mode);
break;
+ case 0x00000012:
+ __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, op);
+ __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, mode);
+ __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, u.enable);
+ break;
}
break;
case VKI_XEN_DOMCTL_monitor_op:
switch (domctl->interface_version) {
case 0x000000b:
+ case 0x000000c:
+ case 0x000000d:
+ case 0x000000e:
+ case 0x000000f:
+ case 0x0000010:
if (domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE ||
domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_DISABLE) {
switch (domctl->u.monitor_op_0000000b.event) {
}
}
+ break;
+ case 0x0000011:
+ case 0x0000012:
+ if (domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE ||
+ domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_DISABLE) {
+ switch (domctl->u.monitor_op_00000011.event) {
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+ __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.mov_to_cr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+ __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.mov_to_msr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+ __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.guest_request);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
+ break;
+ }
+ }
+
break;
}
break;
PRE_XEN_HVMOP_READ(inject_trap, cr2);
break;
+ case VKI_XEN_HVMOP_altp2m: {
+ vki_xen_hvm_altp2m_op_t *altp2m_op = (vki_xen_hvm_altp2m_op_t *)arg;
+
+ PRE_XEN_HVMOP_READ(altp2m_op, version);
+ PRE_XEN_HVMOP_READ(altp2m_op, cmd);
+ PRE_XEN_HVMOP_READ(altp2m_op, domain);
+ PRE_XEN_HVMOP_READ(altp2m_op, pad1);
+ PRE_XEN_HVMOP_READ(altp2m_op, pad2);
+
+ switch (altp2m_op->cmd) {
+ case VKI_XEN_HVMOP_altp2m_get_domain_state:
+ case VKI_XEN_HVMOP_altp2m_set_domain_state:
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.domain_state.state), sizeof(vki_uint8_t));
+ break;
+ case VKI_XEN_HVMOP_altp2m_create_p2m:
+ case VKI_XEN_HVMOP_altp2m_destroy_p2m:
+ case VKI_XEN_HVMOP_altp2m_switch_p2m:
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.view.view), sizeof(vki_uint16_t));
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.view.hvmmem_default_access), sizeof(vki_uint16_t));
+ break;
+ case VKI_XEN_HVMOP_altp2m_change_gfn:
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.view), sizeof(vki_uint16_t));
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.pad1), sizeof(vki_uint16_t));
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.pad2), sizeof(vki_uint32_t));
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.old_gfn), sizeof(vki_uint64_t));
+ PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.new_gfn), sizeof(vki_uint64_t));
+ break;
+ };
+
+ break;
+ }
+
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_hvm_op", op);
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
break;
default:
return;
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
* sysctl->u.getdomaininfolist_0000000a.num_domains);
break;
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
+ POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000010, num_domains);
+ POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000010.buffer.p,
+ sizeof(*sysctl->u.getdomaininfolist_00000010.buffer.p)
+ * sysctl->u.getdomaininfolist_00000010.num_domains);
+ break;
}
break;
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
break;
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, capabilities);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, outstanding_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_mfn);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, hw_cap[8]);
}
break;
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
break;
default:
return;
sizeof(vki_xen_guest_tsc_info_t));
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, tsc_mode);
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, gtsc_khz);
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, incarnation);
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
- switch (domctl->u.hvmcontext_partial.type) {
+ switch (domctl->u.hvmcontext_partial_00000007.type) {
case VKI_HVM_SAVE_CODE(CPU):
- if ( domctl->u.hvmcontext_partial.buffer.p )
- POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
+ if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
+ POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
VKI_HVM_SAVE_LENGTH(CPU));
break;
}
domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
break;
case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
POST_MEM_WRITE(
(Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
break;
case 0x00000009:
case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
+ case 0x00000012:
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
__POST_XEN_DOMCTL_WRITE(mem_event_op, mem_event_op_00000007, port);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
+ case 0x00000011:
__POST_XEN_DOMCTL_WRITE(vm_event_op, vm_event_op_0000000b, port);
break;
+ case 0x00000012:
+ __POST_XEN_DOMCTL_WRITE(vm_event_op, vm_event_op_00000012, u.enable.port);
+ break;
}
break;
}
}
+ break;
+ case 0x0000011:
+ if (domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES) {
+ switch(domctl->u.monitor_op_00000011.event) {
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.mov_to_cr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.mov_to_msr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.guest_request);
+ break;
+ }
+ }
+
break;
}
break;
typedef struct vki_xen_domctl_getdomaininfo_00000009 vki_xen_domctl_getdomaininfo_00000009_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
+// x86 version only for now
+struct vki_xen_arch_domainconfig_00000010 {
+ vki_uint32_t emulation_flags;
+};
+
+struct vki_xen_domctl_getdomaininfo_00000010 {
+ /* OUT variables. */
+ vki_xen_domid_t domain;
+ vki_uint32_t flags;
+ vki_xen_uint64_aligned_t tot_pages;
+ vki_xen_uint64_aligned_t max_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_xen_uint64_aligned_t shr_pages;
+ vki_xen_uint64_aligned_t paged_pages;
+ vki_xen_uint64_aligned_t shared_info_frame;
+ vki_xen_uint64_aligned_t cpu_time;
+ vki_uint32_t nr_online_vcpus;
+ vki_uint32_t max_vcpu_id;
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t cpupool;
+ struct vki_xen_arch_domainconfig_00000010 arch;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000010 vki_xen_domctl_getdomaininfo_00000010_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000010_t);
+
/* vki_xen_domctl_getdomaininfo_0000000a is the same as 00000009 */
/* Get/set the NUMA node(s) with which the guest has affinity with. */
typedef struct vki_xen_domctl_hvmcontext vki_xen_domctl_hvmcontext_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_t);
-struct vki_xen_domctl_hvmcontext_partial {
+struct vki_xen_domctl_hvmcontext_partial_00000007 {
vki_uint32_t type; /* IN */
vki_uint32_t instance; /* IN */
VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT buffer */
};
-typedef struct vki_xen_domctl_hvmcontext_partial vki_xen_domctl_hvmcontext_partial_t;
-DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_t);
+typedef struct vki_xen_domctl_hvmcontext_partial_00000007 vki_xen_domctl_hvmcontext_partial_00000007_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_00000007_t);
+struct vki_xen_domctl_hvmcontext_partial_0000000e {
+ vki_uint32_t type; /* IN */
+ vki_uint32_t instance; /* IN */
+ vki_xen_uint64_aligned_t bufsz; /* IN */
+ VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* OUT buffer */
+};
+typedef struct vki_xen_domctl_hvmcontext_partial_0000000e vki_xen_domctl_hvmcontext_partial_0000000e_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_0000000e_t);
struct vki_xen_domctl_pin_mem_cacheattr {
vki_xen_uint64_aligned_t start, end; /* IN */
/* only a name change in 4.6 */
typedef struct vki_xen_domctl_mem_event_op_00000007 vki_xen_domctl_vm_event_op_0000000b;
+struct vki_xen_domctl_vm_event_op_00000012 {
+ vki_uint32_t op; /* IN */
+ vki_uint32_t mode; /* IN */
+
+ union {
+ struct {
+ vki_uint32_t port; /* OUT */
+ } enable;
+
+ vki_uint32_t version;
+ } u;
+};
+
struct vki_xen_domctl_set_access_required {
vki_uint8_t access_required; /* IN */
};
#define VKI_XEN_DOMCTL_MONITOR_OP_ENABLE 0
#define VKI_XEN_DOMCTL_MONITOR_OP_DISABLE 1
#define VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES 2
+#define VKI_XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP 3
#define VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG 0
#define VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR 1
#define VKI_XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP 2
#define VKI_XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT 3
#define VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_CPUID 6
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS 9
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED 10
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT 11
struct vki_xen_domctl_monitor_op_0000000b {
vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
} u;
};
+struct vki_xen_domctl_monitor_op_00000011 {
+ vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
-struct vki_xen_domctl_monitor_op {
- vki_uint32_t op;
-#define VKI_XEN_DOMCTL_MONITOR_OP_ENABLE 0
-#define VKI_XEN_DOMCTL_MONITOR_OP_DISABLE 1
-#define VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES 2
-#define VKI_XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP 3
+ /*
+ * When used with ENABLE/DISABLE this has to be set to
+ * the requested vki_xen_DOMCTL_MONITOR_EVENT_* value.
+ * With GET_CAPABILITIES this field returns a bitmap of
+ * events supported by the platform, in the format
+ * (1 << vki_xen_DOMCTL_MONITOR_EVENT_*).
+ */
vki_uint32_t event;
+
+ /*
+ * Further options when issuing vki_xen_DOMCTL_MONITOR_OP_ENABLE.
+ */
union {
struct {
+ /* Which control register */
vki_uint8_t index;
+ /* Pause vCPU until response */
vki_uint8_t sync;
+ /* Send event only on a change of value */
vki_uint8_t onchangeonly;
+ /* Allignment padding */
+ vki_uint8_t pad1;
+ vki_uint32_t pad2;
+ /*
+ * Send event only if the changed bit in the control register
+ * is not masked.
+ */
+ vki_xen_uint64_aligned_t bitmask;
} mov_to_cr;
+
struct {
- vki_uint8_t extended_capture;
+ vki_uint32_t msr;
+ vki_uint8_t onchangeonly;
} mov_to_msr;
+
struct {
+ /* Pause vCPU until response */
vki_uint8_t sync;
+ vki_uint8_t allow_userspace;
} guest_request;
+
+ struct {
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ } debug_exception;
} u;
};
struct vki_xen_domctl_tsc_info_0000000b tsc_info_0000000b;
//struct vki_xen_domctl_real_mode_area real_mode_area;
struct vki_xen_domctl_hvmcontext hvmcontext;
- struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
+ struct vki_xen_domctl_hvmcontext_partial_0000000e hvmcontext_partial_00000007;
+ struct vki_xen_domctl_hvmcontext_partial_0000000e hvmcontext_partial_0000000e;
struct vki_xen_domctl_address_size address_size;
//struct vki_xen_domctl_sendtrigger sendtrigger;
//struct vki_xen_domctl_get_device_group get_device_group;
struct vki_xen_domctl_debug_op debug_op;
struct vki_xen_domctl_mem_event_op_00000007 mem_event_op_00000007;
vki_xen_domctl_vm_event_op_0000000b vm_event_op_0000000b;
+ struct vki_xen_domctl_vm_event_op_00000012 vm_event_op_00000012;
//struct vki_xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct vki_xen_domctl_cpuid cpuid;
//struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
//struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
struct vki_xen_domctl_monitor_op_0000000b monitor_op_0000000b;
+ struct vki_xen_domctl_monitor_op_00000011 monitor_op_00000011;
vki_uint8_t pad[128];
} u;
};