}
case VKI_XENMEM_increase_reservation:
case VKI_XENMEM_decrease_reservation:
- case VKI_XENMEM_populate_physmap: {
+ case VKI_XENMEM_populate_physmap:
+ case VKI_XENMEM_claim_pages: {
struct xen_memory_reservation *memory_reservation =
(struct xen_memory_reservation *)ARG2;
const HChar *which;
(Addr)memory_reservation->extent_start.p,
sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
+ case VKI_XENMEM_claim_pages:
+ which = "XENMEM_claim_pages";
+ break;
default:
which = "XENMEM_unknown";
break;
{
case 0x00000008:
case 0x00000009:
+ case 0x0000000a:
break;
default:
VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
{
case 0x00000007:
case 0x00000008:
+ case 0x00000009:
break;
default:
VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
(Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
- domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+ domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+ break;
+
+ case VKI_XEN_DOMCTL_getnodeaffinity:
+ __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+ break;
+ case VKI_XEN_DOMCTL_setnodeaffinity:
+ __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+ PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
+ (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+ domctl->u.nodeaffinity.nodemap.nr_bits / 8);
break;
case VKI_XEN_DOMCTL_getvcpucontext:
switch (ARG1) {
case VKI_XENMEM_set_memory_map:
case VKI_XENMEM_decrease_reservation:
+ case VKI_XENMEM_claim_pages:
/* No outputs */
break;
case VKI_XENMEM_increase_reservation:
{
case 0x00000008:
case 0x00000009:
+ case 0x0000000a:
break;
default:
return;
break;
case VKI_XEN_SYSCTL_physinfo:
- POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
- POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
- POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
- POST_XEN_SYSCTL_WRITE(physinfo, max_cpu_id);
- POST_XEN_SYSCTL_WRITE(physinfo, nr_nodes);
- POST_XEN_SYSCTL_WRITE(physinfo, max_node_id);
- POST_XEN_SYSCTL_WRITE(physinfo, cpu_khz);
- POST_XEN_SYSCTL_WRITE(physinfo, total_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, free_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, scrub_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, hw_cap[8]);
- POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ case 0x00000009: /* Unchanged from version 8 */
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
+ break;
+ case 0x0000000a:
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
+ break;
+ }
break;
case VKI_XEN_SYSCTL_topologyinfo:
switch (domctl->interface_version) {
case 0x00000007:
case 0x00000008:
+ case 0x00000009:
break;
default:
return;
case VKI_XEN_DOMCTL_hypercall_init:
case VKI_XEN_DOMCTL_setvcpuaffinity:
case VKI_XEN_DOMCTL_setvcpucontext:
+ case VKI_XEN_DOMCTL_setnodeaffinity:
case VKI_XEN_DOMCTL_set_cpuid:
case VKI_XEN_DOMCTL_unpausedomain:
/* No output fields */
case VKI_XEN_DOMCTL_getvcpuaffinity:
POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
- domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+ domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+ break;
+
+ case VKI_XEN_DOMCTL_getnodeaffinity:
+ POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+ domctl->u.nodeaffinity.nodemap.nr_bits / 8);
break;
case VKI_XEN_DOMCTL_getdomaininfo:
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
break;
+ case 0x00000009:
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
+ break;
}
break;
case VKI_XEN_DOMCTL_getvcpucontext:
*
* - 00000007: Xen 4.1
* - 00000008: Xen 4.2
+ * - 00000009: Xen 4.3
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
#define VKI_XEN_DOMCTL_pin_mem_cacheattr 41
#define VKI_XEN_DOMCTL_set_ext_vcpucontext 42
#define VKI_XEN_DOMCTL_get_ext_vcpucontext 43
-#define VKI_XEN_DOMCTL_set_opt_feature 44
+#define VKI_XEN_DOMCTL_set_opt_feature 44 /*Obsolete IA64 only */
#define VKI_XEN_DOMCTL_test_assign_device 45
#define VKI_XEN_DOMCTL_set_target 46
#define VKI_XEN_DOMCTL_deassign_device 47
#define VKI_XEN_DOMCTL_set_access_required 64
#define VKI_XEN_DOMCTL_audit_p2m 65
#define VKI_XEN_DOMCTL_set_virq_handler 66
+#define VKI_XEN_DOMCTL_set_broken_page_p2m 67
+#define VKI_XEN_DOMCTL_setnodeaffinity 68
+#define VKI_XEN_DOMCTL_getnodeaffinity 69
#define VKI_XEN_DOMCTL_gdbsx_guestmemio 1000
#define VKI_XEN_DOMCTL_gdbsx_pausevcpu 1001
#define VKI_XEN_DOMCTL_gdbsx_unpausevcpu 1002
typedef struct vki_xen_domctl_getdomaininfo_00000008 vki_xen_domctl_getdomaininfo_00000008_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000008_t);
+struct vki_xen_domctl_getdomaininfo_00000009 {
+ /* OUT variables. */
+ vki_xen_domid_t domain;
+ vki_uint32_t flags;
+ vki_xen_uint64_aligned_t tot_pages;
+ vki_xen_uint64_aligned_t max_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_xen_uint64_aligned_t shr_pages;
+ vki_xen_uint64_aligned_t paged_pages;
+ vki_xen_uint64_aligned_t shared_info_frame;
+ vki_xen_uint64_aligned_t cpu_time;
+ vki_uint32_t nr_online_vcpus;
+ vki_uint32_t max_vcpu_id;
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t cpupool;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000009 vki_xen_domctl_getdomaininfo_00000009_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
+
+/* Get/set the NUMA node(s) with which the guest has affinity with. */
+/* XEN_DOMCTL_setnodeaffinity */
+/* XEN_DOMCTL_getnodeaffinity */
+struct vki_xen_domctl_nodeaffinity {
+ struct vki_xenctl_bitmap nodemap;/* IN */
+};
+typedef struct vki_xen_domctl_nodeaffinity vki_xen_domctl_nodeaffinity_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_nodeaffinity_t);
+
+
struct vki_xen_domctl_vcpuaffinity {
vki_uint32_t vcpu; /* IN */
- struct vki_xenctl_cpumap cpumap; /* IN/OUT */
+ struct vki_xenctl_bitmap cpumap; /* IN/OUT */
};
struct vki_xen_domctl_max_mem {
struct vki_xen_domctl_createdomain createdomain;
struct vki_xen_domctl_getdomaininfo_00000007 getdomaininfo_00000007;
struct vki_xen_domctl_getdomaininfo_00000008 getdomaininfo_00000008;
+ struct vki_xen_domctl_getdomaininfo_00000009 getdomaininfo_00000009;
//struct vki_xen_domctl_getmemlist getmemlist;
//struct vki_xen_domctl_getpageframeinfo getpageframeinfo;
//struct vki_xen_domctl_getpageframeinfo2 getpageframeinfo2;
//struct vki_xen_domctl_getpageframeinfo3 getpageframeinfo3;
+ struct vki_xen_domctl_nodeaffinity nodeaffinity;
struct vki_xen_domctl_vcpuaffinity vcpuaffinity;
//struct vki_xen_domctl_shadow_op shadow_op;
struct vki_xen_domctl_max_mem max_mem;
//struct vki_xen_domctl_ioport_mapping ioport_mapping;
//struct vki_xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
//struct vki_xen_domctl_ext_vcpucontext ext_vcpucontext;
- //struct vki_xen_domctl_set_opt_feature set_opt_feature;
//struct vki_xen_domctl_set_target set_target;
//struct vki_xen_domctl_subscribe subscribe;
//struct vki_xen_domctl_debug_op debug_op;
//struct vki_xen_domctl_audit_p2m audit_p2m;
//struct vki_xen_domctl_set_virq_handler set_virq_handler;
//struct vki_xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ //struct vki_xen_domctl_set_broken_page_p2m set_broken_page_p2m;
//struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
//struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
vki_uint8_t pad[128];
*
* - 00000008: Xen 4.1
* - 00000009: Xen 4.2
+ * - 0000000a: Xen 4.3
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
#define VKI_XEN_SYSCTL_numainfo 17
#define VKI_XEN_SYSCTL_cpupool_op 18
#define VKI_XEN_SYSCTL_scheduler_op 19
+#define VKI_XEN_SYSCTL_coverage_op 20
struct vki_xen_sysctl_getdomaininfolist_00000008 {
/* IN variables. */
vki_uint32_t domid; /* IN: M */
vki_uint32_t cpu; /* IN: AR */
vki_uint32_t n_dom; /* OUT: I */
- struct vki_xenctl_cpumap cpumap; /* OUT: IF */
+ struct vki_xenctl_bitmap cpumap; /* OUT: IF */
};
struct vki_xen_sysctl_topologyinfo {
VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memfree;
VKI_XEN_GUEST_HANDLE_64(vki_uint32) node_to_node_distance;
};
-struct vki_xen_sysctl_physinfo {
+struct vki_xen_sysctl_physinfo_00000008 {
vki_uint32_t threads_per_core;
vki_uint32_t cores_per_socket;
vki_uint32_t nr_cpus; /* # CPUs currently online */
vki_uint32_t capabilities;
};
+struct vki_xen_sysctl_physinfo_0000000a {
+ vki_uint32_t threads_per_core;
+ vki_uint32_t cores_per_socket;
+ vki_uint32_t nr_cpus; /* # CPUs currently online */
+ vki_uint32_t max_cpu_id; /* Largest possible CPU ID on this host */
+ vki_uint32_t nr_nodes; /* # nodes currently online */
+ vki_uint32_t max_node_id; /* Largest possible node ID on this host */
+ vki_uint32_t cpu_khz;
+ vki_xen_uint64_aligned_t total_pages;
+ vki_xen_uint64_aligned_t free_pages;
+ vki_xen_uint64_aligned_t scrub_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_uint32_t hw_cap[8];
+
+ vki_uint32_t capabilities;
+};
+
struct vki_xen_sysctl {
vki_uint32_t cmd;
vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
//struct vki_xen_sysctl_readconsole readconsole;
//struct vki_xen_sysctl_tbuf_op tbuf_op;
- struct vki_xen_sysctl_physinfo physinfo;
+ struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
+ struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
struct vki_xen_sysctl_topologyinfo topologyinfo;
struct vki_xen_sysctl_numainfo numainfo;
//struct vki_xen_sysctl_sched_id sched_id;
//struct vki_xen_sysctl_lockprof_op lockprof_op;
struct vki_xen_sysctl_cpupool_op cpupool_op;
//struct vki_xen_sysctl_scheduler_op scheduler_op;
+ //struct vki_xen_sysctl_coverage_op coverage_op;
+
vki_uint8_t pad[128];
} u;
};