PRE_XEN_DOMCTL_READ(createdomain, flags);
break;
+ case VKI_XEN_DOMCTL_gethvmcontext:
+ /* Xen unconditionally reads the 'buffer' pointer */
+ __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
+ /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
+ * buffer is a request for the required size. */
+ if ( domctl->u.hvmcontext.buffer.p )
+ __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
+ break;
+
case VKI_XEN_DOMCTL_max_mem:
PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
break;
POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
break;
+ case VKI_XEN_DOMCTL_gethvmcontext:
+ /* Xen unconditionally writes size... */
+ __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
+ /* ...but only writes to the buffer if it was non NULL */
+ if ( domctl->u.hvmcontext.buffer.p )
+ POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
+ sizeof(*domctl->u.hvmcontext.buffer.p)
+ * domctl->u.hvmcontext.size);
+ break;
+
case VKI_XEN_DOMCTL_scheduler_op:
if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
switch(domctl->u.scheduler_op.sched_id) {
typedef struct vki_xen_guest_tsc_info vki_xen_guest_tsc_info_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_guest_tsc_info_t);
+struct vki_xen_domctl_hvmcontext {
+ vki_uint32_t size; /* IN/OUT size of buffer */
+ VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT */
+};
+typedef struct vki_xen_domctl_hvmcontext vki_xen_domctl_hvmcontext_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_t);
+
struct vki_xen_domctl_tsc_info {
VKI_XEN_GUEST_HANDLE_64(vki_xen_guest_tsc_info_t) out_info; /* OUT */
vki_xen_guest_tsc_info_t info; /* IN */
//struct vki_xen_domctl_disable_migrate disable_migrate;
struct vki_xen_domctl_tsc_info tsc_info;
//struct vki_xen_domctl_real_mode_area real_mode_area;
- //struct vki_xen_domctl_hvmcontext hvmcontext;
+ struct vki_xen_domctl_hvmcontext hvmcontext;
//struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
struct vki_xen_domctl_address_size address_size;
//struct vki_xen_domctl_sendtrigger sendtrigger;