PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
(Addr)&arg->gpfn, sizeof(arg->gpfn));
break;
- };
+ }
case VKI_XENMEM_remove_from_physmap: {
struct vki_xen_remove_from_physmap *arg =
(Addr)&arg->domid, sizeof(arg->domid));
PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
(Addr)&arg->gpfn, sizeof(arg->gpfn));
+ break;
}
case VKI_XENMEM_get_sharing_freed_pages:
case VKI_XENMEM_get_sharing_shared_pages:
break;
+ case VKI_XENMEM_access_op: {
+ struct vki_xen_mem_event_op *arg =
+ (struct vki_xen_mem_event_op *)ARG2;
+ PRE_MEM_READ("XENMEM_access_op domid",
+ (Addr)&arg->domain, sizeof(arg->domain));
+ PRE_MEM_READ("XENMEM_access_op op",
+ (Addr)&arg->op, sizeof(arg->op));
+ PRE_MEM_READ("XENMEM_access_op gfn",
+ (Addr)&arg->gfn, sizeof(arg->gfn));
+ break;
+ }
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_memory_op", ARG1);
domctl->u.hvmcontext.size);
break;
+ case VKI_XEN_DOMCTL_gethvmcontext_partial:
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
+
+ switch (domctl->u.hvmcontext_partial.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial.buffer.p )
+ PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
+ (Addr)domctl->u.hvmcontext_partial.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
+ "__HYPERVISOR_domctl_gethvmcontext_partial type",
+ domctl->u.hvmcontext_partial.type);
+ break;
+ }
+ break;
+
case VKI_XEN_DOMCTL_max_mem:
PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
break;
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
break;
+ case VKI_XEN_DOMCTL_ioport_permission:
+ PRE_XEN_DOMCTL_READ(ioport_permission, first_port);
+ PRE_XEN_DOMCTL_READ(ioport_permission, nr_ports);
+ PRE_XEN_DOMCTL_READ(ioport_permission, allow_access);
+ break;
+
case VKI_XEN_DOMCTL_hypercall_init:
PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
break;
PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
break;
+ case VKI_XEN_DOMCTL_set_access_required:
+ PRE_XEN_DOMCTL_READ(access_required, access_required);
+ break;
+
+ case VKI_XEN_DOMCTL_mem_event_op:
+ PRE_XEN_DOMCTL_READ(mem_event_op, op);
+ PRE_XEN_DOMCTL_READ(mem_event_op, mode);
+ break;
+
+ case VKI_XEN_DOMCTL_debug_op:
+ PRE_XEN_DOMCTL_READ(debug_op, op);
+ PRE_XEN_DOMCTL_READ(debug_op, vcpu);
+ break;
+
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_domctl", domctl->cmd);
PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
break;
+ case VKI_XEN_HVMOP_set_mem_access:
+ PRE_XEN_HVMOP_READ(set_mem_access, domid);
+ PRE_XEN_HVMOP_READ(set_mem_access, hvmmem_access);
+ PRE_XEN_HVMOP_READ(set_mem_access, first_pfn);
+ /* if default access */
+ if ( ((vki_xen_hvm_set_mem_access_t*)arg)->first_pfn != ~0ULL)
+ PRE_XEN_HVMOP_READ(set_mem_access, nr);
+ break;
+
+ case VKI_XEN_HVMOP_get_mem_access:
+ PRE_XEN_HVMOP_READ(get_mem_access, domid);
+ PRE_XEN_HVMOP_READ(get_mem_access, pfn);
+
+ PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
+ (Addr)&(((vki_xen_hvm_get_mem_access_t*)arg)->hvmmem_access),
+ sizeof(vki_uint16_t));
+ break;
+
+ case VKI_XEN_HVMOP_inject_trap:
+ PRE_XEN_HVMOP_READ(inject_trap, domid);
+ PRE_XEN_HVMOP_READ(inject_trap, vcpuid);
+ PRE_XEN_HVMOP_READ(inject_trap, vector);
+ PRE_XEN_HVMOP_READ(inject_trap, type);
+ PRE_XEN_HVMOP_READ(inject_trap, error_code);
+ PRE_XEN_HVMOP_READ(inject_trap, insn_len);
+ PRE_XEN_HVMOP_READ(inject_trap, cr2);
+ break;
+
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_hvm_op", op);
case VKI_XENMEM_claim_pages:
case VKI_XENMEM_maximum_gpfn:
case VKI_XENMEM_remove_from_physmap:
+ case VKI_XENMEM_access_op:
/* No outputs */
break;
case VKI_XENMEM_increase_reservation:
case VKI_XEN_DOMCTL_max_mem:
case VKI_XEN_DOMCTL_set_address_size:
case VKI_XEN_DOMCTL_settscinfo:
+ case VKI_XEN_DOMCTL_ioport_permission:
case VKI_XEN_DOMCTL_hypercall_init:
case VKI_XEN_DOMCTL_setvcpuaffinity:
case VKI_XEN_DOMCTL_setvcpucontext:
case VKI_XEN_DOMCTL_set_cpuid:
case VKI_XEN_DOMCTL_unpausedomain:
case VKI_XEN_DOMCTL_sethvmcontext:
+ case VKI_XEN_DOMCTL_debug_op:
case VKI_XEN_DOMCTL_set_max_evtchn:
case VKI_XEN_DOMCTL_cacheflush:
case VKI_XEN_DOMCTL_resumedomain:
+ case VKI_XEN_DOMCTL_set_access_required:
/* No output fields */
break;
* domctl->u.hvmcontext.size);
break;
+ case VKI_XEN_DOMCTL_gethvmcontext_partial:
+ switch (domctl->u.hvmcontext_partial.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial.buffer.p )
+ POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ }
+ break;
+
case VKI_XEN_DOMCTL_scheduler_op:
if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
switch(domctl->u.scheduler_op.sched_id) {
default:
break;
}
+ break;
+ case VKI_XEN_DOMCTL_mem_event_op:
+ POST_XEN_DOMCTL_WRITE(mem_event_op, port);
+
break;
}
#undef POST_XEN_DOMCTL_WRITE
POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
sizeof(((_type*)arg)->_field))
#define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
- __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
+ __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
switch (op) {
case VKI_XEN_HVMOP_set_param:
case VKI_XEN_HVMOP_set_isa_irq_level:
case VKI_XEN_HVMOP_set_pci_link_route:
case VKI_XEN_HVMOP_set_mem_type:
+ case VKI_XEN_HVMOP_set_mem_access:
+ case VKI_XEN_HVMOP_inject_trap:
/* No output paramters */
break;
case VKI_XEN_HVMOP_get_param:
__POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
break;
+
+ case VKI_XEN_HVMOP_get_mem_access:
+ POST_XEN_HVMOP_WRITE(get_mem_access, hvmmem_access);
+ break;
}
#undef __POST_XEN_HVMOP_WRITE
#undef POST_XEN_HVMOP_WRITE
vki_uint32_t max; /* maximum number of vcpus */
};
+struct vki_xen_domctl_ioport_permission {
+ vki_uint32_t first_port; /* IN */
+ vki_uint32_t nr_ports; /* IN */
+ vki_uint8_t allow_access; /* IN */
+};
+
struct vki_xen_domctl_hypercall_init {
vki_xen_uint64_aligned_t gmfn; /* GMFN to be initialised */
};
typedef struct vki_xen_domctl_hvmcontext vki_xen_domctl_hvmcontext_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_t);
+struct vki_xen_domctl_hvmcontext_partial {
+ vki_uint32_t type; /* IN */
+ vki_uint32_t instance; /* IN */
+ VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT buffer */
+};
+typedef struct vki_xen_domctl_hvmcontext_partial vki_xen_domctl_hvmcontext_partial_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_t);
+
struct vki_xen_domctl_tsc_info {
VKI_XEN_GUEST_HANDLE_64(vki_xen_guest_tsc_info_t) out_info; /* OUT */
vki_xen_guest_tsc_info_t info; /* IN */
vki_uint32_t size;
};
+struct vki_xen_domctl_debug_op {
+ vki_uint32_t op; /* IN */
+ vki_uint32_t vcpu; /* IN */
+};
+typedef struct vki_xen_domctl_debug_op vki_xen_domctl_debug_op_t;
+
+struct vki_xen_domctl_mem_event_op {
+ vki_uint32_t op; /* IN */
+ vki_uint32_t mode; /* IN */
+ vki_uint32_t port; /* OUT */
+};
+
+struct vki_xen_domctl_set_access_required {
+ vki_uint8_t access_required; /* IN */
+};
+
struct vki_xen_domctl_set_max_evtchn {
vki_uint32_t max_port;
};
//struct vki_xen_domctl_setdebugging setdebugging;
//struct vki_xen_domctl_irq_permission irq_permission;
//struct vki_xen_domctl_iomem_permission iomem_permission;
- //struct vki_xen_domctl_ioport_permission ioport_permission;
+ struct vki_xen_domctl_ioport_permission ioport_permission;
struct vki_xen_domctl_hypercall_init hypercall_init;
//struct vki_xen_domctl_arch_setup arch_setup;
struct vki_xen_domctl_settimeoffset settimeoffset;
struct vki_xen_domctl_tsc_info tsc_info;
//struct vki_xen_domctl_real_mode_area real_mode_area;
struct vki_xen_domctl_hvmcontext hvmcontext;
- //struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
+ struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
struct vki_xen_domctl_address_size address_size;
//struct vki_xen_domctl_sendtrigger sendtrigger;
//struct vki_xen_domctl_get_device_group get_device_group;
//struct vki_xen_domctl_ext_vcpucontext ext_vcpucontext;
//struct vki_xen_domctl_set_target set_target;
//struct vki_xen_domctl_subscribe subscribe;
- //struct vki_xen_domctl_debug_op debug_op;
- //struct vki_xen_domctl_mem_event_op mem_event_op;
+ struct vki_xen_domctl_debug_op debug_op;
+ struct vki_xen_domctl_mem_event_op mem_event_op;
//struct vki_xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct vki_xen_domctl_cpuid cpuid;
struct vki_xen_domctl_vcpuextstate vcpuextstate;
#endif
- //struct vki_xen_domctl_set_access_required access_required;
+ struct vki_xen_domctl_set_access_required access_required;
//struct vki_xen_domctl_audit_p2m audit_p2m;
//struct vki_xen_domctl_set_virq_handler set_virq_handler;
struct vki_xen_domctl_set_max_evtchn set_max_evtchn;
typedef struct vki_xen_vcpu_guest_context vki_xen_vcpu_guest_context_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_vcpu_guest_context_t);
+
+/* HVM_SAVE types and declarations for getcontext_partial */
+# define VKI_DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
+ struct __VKI_HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
+
+#define VKI_HVM_SAVE_TYPE(_x) typeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->t)
+#define VKI_HVM_SAVE_LENGTH(_x) (sizeof (VKI_HVM_SAVE_TYPE(_x)))
+#define VKI_HVM_SAVE_CODE(_x) (sizeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->c))
+
+struct vki_hvm_hw_cpu {
+ vki_uint8_t fpu_regs[512];
+
+ vki_uint64_t rax;
+ vki_uint64_t rbx;
+ vki_uint64_t rcx;
+ vki_uint64_t rdx;
+ vki_uint64_t rbp;
+ vki_uint64_t rsi;
+ vki_uint64_t rdi;
+ vki_uint64_t rsp;
+ vki_uint64_t r8;
+ vki_uint64_t r9;
+ vki_uint64_t r10;
+ vki_uint64_t r11;
+ vki_uint64_t r12;
+ vki_uint64_t r13;
+ vki_uint64_t r14;
+ vki_uint64_t r15;
+
+ vki_uint64_t rip;
+ vki_uint64_t rflags;
+
+ vki_uint64_t cr0;
+ vki_uint64_t cr2;
+ vki_uint64_t cr3;
+ vki_uint64_t cr4;
+
+ vki_uint64_t dr0;
+ vki_uint64_t dr1;
+ vki_uint64_t dr2;
+ vki_uint64_t dr3;
+ vki_uint64_t dr6;
+ vki_uint64_t dr7;
+
+ vki_uint32_t cs_sel;
+ vki_uint32_t ds_sel;
+ vki_uint32_t es_sel;
+ vki_uint32_t fs_sel;
+ vki_uint32_t gs_sel;
+ vki_uint32_t ss_sel;
+ vki_uint32_t tr_sel;
+ vki_uint32_t ldtr_sel;
+
+ vki_uint32_t cs_limit;
+ vki_uint32_t ds_limit;
+ vki_uint32_t es_limit;
+ vki_uint32_t fs_limit;
+ vki_uint32_t gs_limit;
+ vki_uint32_t ss_limit;
+ vki_uint32_t tr_limit;
+ vki_uint32_t ldtr_limit;
+ vki_uint32_t idtr_limit;
+ vki_uint32_t gdtr_limit;
+
+ vki_uint64_t cs_base;
+ vki_uint64_t ds_base;
+ vki_uint64_t es_base;
+ vki_uint64_t fs_base;
+ vki_uint64_t gs_base;
+ vki_uint64_t ss_base;
+ vki_uint64_t tr_base;
+ vki_uint64_t ldtr_base;
+ vki_uint64_t idtr_base;
+ vki_uint64_t gdtr_base;
+
+ vki_uint32_t cs_arbytes;
+ vki_uint32_t ds_arbytes;
+ vki_uint32_t es_arbytes;
+ vki_uint32_t fs_arbytes;
+ vki_uint32_t gs_arbytes;
+ vki_uint32_t ss_arbytes;
+ vki_uint32_t tr_arbytes;
+ vki_uint32_t ldtr_arbytes;
+
+ vki_uint64_t sysenter_cs;
+ vki_uint64_t sysenter_esp;
+ vki_uint64_t sysenter_eip;
+
+ /* msr for em64t */
+ vki_uint64_t shadow_gs;
+
+ /* msr content saved/restored. */
+ vki_uint64_t msr_flags;
+ vki_uint64_t msr_lstar;
+ vki_uint64_t msr_star;
+ vki_uint64_t msr_cstar;
+ vki_uint64_t msr_syscall_mask;
+ vki_uint64_t msr_efer;
+ vki_uint64_t msr_tsc_aux;
+
+ /* guest's idea of what rdtsc() would return */
+ vki_uint64_t tsc;
+
+ /* pending event, if any */
+ union {
+ vki_uint32_t pending_event;
+ struct {
+ vki_uint8_t pending_vector:8;
+ vki_uint8_t pending_type:3;
+ vki_uint8_t pending_error_valid:1;
+ vki_uint32_t pending_reserved:19;
+ vki_uint8_t pending_valid:1;
+ };
+ };
+ /* error code for pending event */
+ vki_uint32_t error_code;
+};
+
+VKI_DECLARE_HVM_SAVE_TYPE(CPU, 2, struct vki_hvm_hw_cpu);
+
#endif // __VKI_XEN_H
/*--------------------------------------------------------------------*/