]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Bug 407376 - Update Xen support to 4.12 (4.13, actually) and add more coverage.
authorJulian Seward <jseward@acm.org>
Wed, 22 Jan 2020 09:55:33 +0000 (10:55 +0100)
committerJulian Seward <jseward@acm.org>
Wed, 22 Jan 2020 09:55:33 +0000 (10:55 +0100)
Patch from Tamas K Lengyel (tamas@tklengyel.com).

coregrind/m_syswrap/syswrap-xen.c
include/vki/vki-xen-domctl.h
include/vki/vki-xen-hvm.h
include/vki/vki-xen-sysctl.h

index f464da567b6e845bd3c9edc84f78244051a34826..8cb91b57d1a7e835d2cec45e03f1d3541bcdefd1 100644 (file)
@@ -582,6 +582,13 @@ PRE(sysctl) {
    case 0x00000009:
    case 0x0000000a:
    case 0x0000000b:
+   case 0x0000000c:
+   case 0x0000000d:
+   case 0x0000000e:
+   case 0x0000000f:
+   case 0x00000010:
+   case 0x00000011:
+   case 0x00000012:
           break;
    default:
       bad_intf_version(tid, layout, arrghs, status, flags,
@@ -624,10 +631,21 @@ PRE(sysctl) {
         break;
       case 0x0000000a:
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
         PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
         PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
         PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
         break;
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
+     PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, first_domain);
+     PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, max_domains);
+     PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010, buffer);
+        break;
       default:
           VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
                     "%"PRIx32" not implemented yet\n",
@@ -728,6 +746,11 @@ PRE(domctl)
    case 0x0000000a:
    case 0x0000000b:
    case 0x0000000c:
+   case 0x0000000d:
+   case 0x0000000f:
+   case 0x00000010:
+   case 0x00000011:
+   case 0x00000012:
           break;
    default:
       bad_intf_version(tid, layout, arrghs, status, flags,
@@ -778,27 +801,27 @@ PRE(domctl)
        break;
 
    case VKI_XEN_DOMCTL_gethvmcontext_partial:
-       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
-       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
-       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
+       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, type);
+       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, instance);
+       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000007, buffer);
 
-       switch (domctl->u.hvmcontext_partial.type) {
+       switch (domctl->u.hvmcontext_partial_00000007.type) {
        case VKI_HVM_SAVE_CODE(CPU):
-           if ( domctl->u.hvmcontext_partial.buffer.p )
+           if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
                 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
-                   (Addr)domctl->u.hvmcontext_partial.buffer.p,
+                   (Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
                    VKI_HVM_SAVE_LENGTH(CPU));
            break;
        case VKI_HVM_SAVE_CODE(MTRR):
-           if ( domctl->u.hvmcontext_partial.buffer.p )
+           if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
                PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
-                  (Addr)domctl->u.hvmcontext_partial.buffer.p,
+                  (Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
                   VKI_HVM_SAVE_LENGTH(MTRR));
            break;
        default:
            bad_subop(tid, layout, arrghs, status, flags,
                          "__HYPERVISOR_domctl_gethvmcontext_partial type",
-                         domctl->u.hvmcontext_partial.type);
+                         domctl->u.hvmcontext_partial_00000007.type);
            break;
        }
        break;
@@ -820,6 +843,13 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_00000007, machine_sbdf);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, dev);
          __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, flag);
          switch (domctl->u.assign_device_0000000b.dev) {
@@ -850,6 +880,13 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(assign_device, assign_device_00000007, machine_sbdf);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, dev);
          __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, flag);
          switch (domctl->u.assign_device_0000000b.dev) {
@@ -880,6 +917,13 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_00000007, machine_sbdf);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, dev);
          __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, flag);
          switch (domctl->u.assign_device_0000000b.dev) {
@@ -914,6 +958,13 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_00000007, info.elapsed_nsec);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, tsc_mode);
          __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, gtsc_khz);
          __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, incarnation);
@@ -989,6 +1040,14 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
          break;
       case 0x0000000a:
+      case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
          if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
             __PRE_XEN_DOMCTL_READ(
@@ -1012,6 +1071,14 @@ PRE(domctl)
                       domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
          break;
       case 0x0000000a:
+      case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, vcpu);
          __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, flags);
          if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD) {
@@ -1068,6 +1135,15 @@ PRE(domctl)
          break;
 
       case 0x00000009:
+      case 0x0000000a:
+      case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
          break;
 
@@ -1108,6 +1184,15 @@ PRE(domctl)
            break;
 
        case 0x00000009:
+       case 0x0000000a:
+       case 0x0000000b:
+       case 0x0000000c:
+       case 0x0000000d:
+       case 0x0000000e:
+       case 0x0000000f:
+       case 0x00000010:
+       case 0x00000011:
+       case 0x00000012:
            __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
            __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, size);
 #if defined(__i386__) || defined(__x86_64__)
@@ -1246,9 +1331,20 @@ PRE(domctl)
          __PRE_XEN_DOMCTL_READ(mem_event_op, mem_event_op_00000007, mode);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
          __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, op);
          __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, mode);
          break;
+      case 0x00000012:
+         __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, op);
+         __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, mode);
+         __PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_00000012, u.enable);
+         break;
       }
       break;
 
@@ -1276,6 +1372,11 @@ PRE(domctl)
    case VKI_XEN_DOMCTL_monitor_op:
       switch (domctl->interface_version) {
       case 0x000000b:
+      case 0x000000c:
+      case 0x000000d:
+      case 0x000000e:
+      case 0x000000f:
+      case 0x0000010:
           if (domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE ||
               domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_DISABLE) {
              switch (domctl->u.monitor_op_0000000b.event) {
@@ -1293,6 +1394,26 @@ PRE(domctl)
              }
           }
 
+         break;
+      case 0x0000011:
+      case 0x0000012:
+          if (domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE ||
+              domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_DISABLE) {
+             switch (domctl->u.monitor_op_00000011.event) {
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+                __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.mov_to_cr);
+                break;
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+                __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.mov_to_msr);
+                break;
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+                __PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_00000011, u.guest_request);
+                break;
+             case VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
+                break;
+             }
+          }
+
          break;
       }
       break;
@@ -1399,6 +1520,38 @@ PRE(hvm_op)
       PRE_XEN_HVMOP_READ(inject_trap, cr2);
       break;
 
+   case VKI_XEN_HVMOP_altp2m: {
+      vki_xen_hvm_altp2m_op_t *altp2m_op = (vki_xen_hvm_altp2m_op_t *)arg;
+
+      PRE_XEN_HVMOP_READ(altp2m_op, version);
+      PRE_XEN_HVMOP_READ(altp2m_op, cmd);
+      PRE_XEN_HVMOP_READ(altp2m_op, domain);
+      PRE_XEN_HVMOP_READ(altp2m_op, pad1);
+      PRE_XEN_HVMOP_READ(altp2m_op, pad2);
+
+      switch (altp2m_op->cmd) {
+      case VKI_XEN_HVMOP_altp2m_get_domain_state:
+      case VKI_XEN_HVMOP_altp2m_set_domain_state:
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.domain_state.state), sizeof(vki_uint8_t));
+        break;
+      case VKI_XEN_HVMOP_altp2m_create_p2m:
+      case VKI_XEN_HVMOP_altp2m_destroy_p2m:
+      case VKI_XEN_HVMOP_altp2m_switch_p2m:
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.view.view), sizeof(vki_uint16_t));
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.view.hvmmem_default_access), sizeof(vki_uint16_t));
+        break;
+      case VKI_XEN_HVMOP_altp2m_change_gfn:
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.view), sizeof(vki_uint16_t));
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.pad1), sizeof(vki_uint16_t));
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.pad2), sizeof(vki_uint32_t));
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.old_gfn), sizeof(vki_uint64_t));
+        PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr)&(altp2m_op->u.change_gfn.new_gfn), sizeof(vki_uint64_t));
+        break;
+      };
+
+      break;
+   }
+
    default:
       bad_subop(tid, layout, arrghs, status, flags,
                 "__HYPERVISOR_hvm_op", op);
@@ -1670,6 +1823,13 @@ POST(sysctl)
    case 0x00000009:
    case 0x0000000a:
    case 0x0000000b:
+   case 0x0000000c:
+   case 0x0000000d:
+   case 0x0000000e:
+   case 0x0000000f:
+   case 0x00000010:
+   case 0x00000011:
+   case 0x00000012:
           break;
    default:
       return;
@@ -1704,11 +1864,23 @@ POST(sysctl)
         break;
       case 0x0000000a:
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
         POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
         POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
                        sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
                        * sysctl->u.getdomaininfolist_0000000a.num_domains);
         break;
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
+        POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000010, num_domains);
+        POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000010.buffer.p,
+                       sizeof(*sysctl->u.getdomaininfolist_00000010.buffer.p)
+                       * sysctl->u.getdomaininfolist_00000010.num_domains);
+        break;
       }
       break;
 
@@ -1749,6 +1921,10 @@ POST(sysctl)
          break;
       case 0x0000000a:
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
          POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
          POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
          POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
@@ -1763,6 +1939,23 @@ POST(sysctl)
          POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
          POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
          break;
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, threads_per_core);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, cores_per_socket);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_cpus);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_cpu_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_nodes);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_node_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, cpu_khz);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, capabilities);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, total_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, free_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, scrub_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, outstanding_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_mfn);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000010, hw_cap[8]);
       }
       break;
 
@@ -1806,6 +1999,13 @@ POST(domctl){
    case 0x00000009:
    case 0x0000000a:
    case 0x0000000b:
+   case 0x0000000c:
+   case 0x0000000d:
+   case 0x0000000e:
+   case 0x0000000f:
+   case 0x00000010:
+   case 0x00000011:
+   case 0x00000012:
           break;
    default:
           return;
@@ -1867,6 +2067,13 @@ POST(domctl){
                         sizeof(vki_xen_guest_tsc_info_t));
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          __POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, tsc_mode);
          __POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, gtsc_khz);
          __POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, incarnation);
@@ -1895,10 +2102,10 @@ POST(domctl){
        break;
 
    case VKI_XEN_DOMCTL_gethvmcontext_partial:
-       switch (domctl->u.hvmcontext_partial.type) {
+       switch (domctl->u.hvmcontext_partial_00000007.type) {
        case VKI_HVM_SAVE_CODE(CPU):
-           if ( domctl->u.hvmcontext_partial.buffer.p )
-                POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
+           if ( domctl->u.hvmcontext_partial_00000007.buffer.p )
+                POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_00000007.buffer.p,
                    VKI_HVM_SAVE_LENGTH(CPU));
            break;
        }
@@ -1941,6 +2148,14 @@ POST(domctl){
                         domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
          break;
       case 0x0000000a:
+      case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
          if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
             POST_MEM_WRITE(
                (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
@@ -1990,6 +2205,14 @@ POST(domctl){
       break;
       case 0x00000009:
       case 0x0000000a:
+      case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
+      case 0x00000012:
         POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
         POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
         POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
@@ -2118,8 +2341,17 @@ POST(domctl){
          __POST_XEN_DOMCTL_WRITE(mem_event_op, mem_event_op_00000007, port);
          break;
       case 0x0000000b:
+      case 0x0000000c:
+      case 0x0000000d:
+      case 0x0000000e:
+      case 0x0000000f:
+      case 0x00000010:
+      case 0x00000011:
          __POST_XEN_DOMCTL_WRITE(vm_event_op, vm_event_op_0000000b, port);
          break;
+      case 0x00000012:
+         __POST_XEN_DOMCTL_WRITE(vm_event_op, vm_event_op_00000012, u.enable.port);
+         break;
       }
       break;
 
@@ -2140,6 +2372,22 @@ POST(domctl){
              }
           }
 
+         break;
+      case 0x0000011:
+          if (domctl->u.monitor_op_00000011.op == VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES) {
+             switch(domctl->u.monitor_op_00000011.event) {
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+                __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.mov_to_cr);
+                break;
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+                __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.mov_to_msr);
+                break;
+             case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+                __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000011, u.guest_request);
+                break;
+             }
+          }
+
          break;
       }
       break;
index 6e25c7a961e484a5b4293897dbea4da0e351dd3e..3a544f556f8dd23db92e1f34d9e3e5b81c0001a6 100644 (file)
@@ -188,6 +188,32 @@ struct vki_xen_domctl_getdomaininfo_00000009 {
 typedef struct vki_xen_domctl_getdomaininfo_00000009 vki_xen_domctl_getdomaininfo_00000009_t;
 DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
 
+// x86 version only for now
+struct vki_xen_arch_domainconfig_00000010 {
+    vki_uint32_t emulation_flags;
+};
+
+struct vki_xen_domctl_getdomaininfo_00000010 {
+    /* OUT variables. */
+    vki_xen_domid_t  domain;
+    vki_uint32_t flags;
+    vki_xen_uint64_aligned_t tot_pages;
+    vki_xen_uint64_aligned_t max_pages;
+    vki_xen_uint64_aligned_t outstanding_pages;
+    vki_xen_uint64_aligned_t shr_pages;
+    vki_xen_uint64_aligned_t paged_pages;
+    vki_xen_uint64_aligned_t shared_info_frame;
+    vki_xen_uint64_aligned_t cpu_time;
+    vki_uint32_t nr_online_vcpus;
+    vki_uint32_t max_vcpu_id;
+    vki_uint32_t ssidref;
+    vki_xen_domain_handle_t handle;
+    vki_uint32_t cpupool;
+    struct vki_xen_arch_domainconfig_00000010 arch;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000010 vki_xen_domctl_getdomaininfo_00000010_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000010_t);
+
 /* vki_xen_domctl_getdomaininfo_0000000a is the same as 00000009 */
 
 /* Get/set the NUMA node(s) with which the guest has affinity with. */
@@ -376,14 +402,22 @@ struct vki_xen_domctl_hvmcontext {
 typedef struct vki_xen_domctl_hvmcontext vki_xen_domctl_hvmcontext_t;
 DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_t);
 
-struct vki_xen_domctl_hvmcontext_partial {
+struct vki_xen_domctl_hvmcontext_partial_00000007 {
     vki_uint32_t type; /* IN */
     vki_uint32_t instance; /* IN */
     VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT buffer */
 };
-typedef struct vki_xen_domctl_hvmcontext_partial vki_xen_domctl_hvmcontext_partial_t;
-DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_t);
+typedef struct vki_xen_domctl_hvmcontext_partial_00000007 vki_xen_domctl_hvmcontext_partial_00000007_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_00000007_t);
 
+struct vki_xen_domctl_hvmcontext_partial_0000000e {
+    vki_uint32_t type; /* IN */
+    vki_uint32_t instance; /* IN */
+    vki_xen_uint64_aligned_t bufsz; /* IN */
+    VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* OUT buffer */
+};
+typedef struct vki_xen_domctl_hvmcontext_partial_0000000e vki_xen_domctl_hvmcontext_partial_0000000e_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_0000000e_t);
 
 struct vki_xen_domctl_pin_mem_cacheattr {
     vki_xen_uint64_aligned_t start, end; /* IN */
@@ -477,6 +511,19 @@ struct vki_xen_domctl_mem_event_op_00000007 {
 /* only a name change in 4.6 */
 typedef struct vki_xen_domctl_mem_event_op_00000007 vki_xen_domctl_vm_event_op_0000000b;
 
+struct vki_xen_domctl_vm_event_op_00000012 {
+    vki_uint32_t op; /* IN */
+    vki_uint32_t mode; /* IN */
+
+    union {
+        struct {
+            vki_uint32_t port; /* OUT */
+        } enable;
+
+        vki_uint32_t version;
+    } u;
+};
+
 struct vki_xen_domctl_set_access_required {
     vki_uint8_t access_required; /* IN */
 };
@@ -507,12 +554,20 @@ struct vki_xen_domctl_vcpu_msrs {
 #define VKI_XEN_DOMCTL_MONITOR_OP_ENABLE            0
 #define VKI_XEN_DOMCTL_MONITOR_OP_DISABLE           1
 #define VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES  2
+#define VKI_XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP  3
 
 #define VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG         0
 #define VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR            1
 #define VKI_XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP            2
 #define VKI_XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT   3
 #define VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST         4
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION       5
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_CPUID                 6
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL       7
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_INTERRUPT             8
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS           9
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED    10
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT     11
 
 struct vki_xen_domctl_monitor_op_0000000b {
     vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
@@ -551,26 +606,54 @@ struct vki_xen_domctl_monitor_op_0000000b {
     } u;
 };
 
+struct vki_xen_domctl_monitor_op_00000011 {
+    vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
 
-struct vki_xen_domctl_monitor_op {
-    vki_uint32_t op;
-#define VKI_XEN_DOMCTL_MONITOR_OP_ENABLE            0
-#define VKI_XEN_DOMCTL_MONITOR_OP_DISABLE           1
-#define VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES  2
-#define VKI_XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP  3
+    /*
+     * When used with ENABLE/DISABLE this has to be set to
+     * the requested vki_xen_DOMCTL_MONITOR_EVENT_* value.
+     * With GET_CAPABILITIES this field returns a bitmap of
+     * events supported by the platform, in the format
+     * (1 << vki_xen_DOMCTL_MONITOR_EVENT_*).
+     */
     vki_uint32_t event;
+
+    /*
+     * Further options when issuing vki_xen_DOMCTL_MONITOR_OP_ENABLE.
+     */
     union {
         struct {
+            /* Which control register */
             vki_uint8_t index;
+            /* Pause vCPU until response */
             vki_uint8_t sync;
+            /* Send event only on a change of value */
             vki_uint8_t onchangeonly;
+            /* Allignment padding */
+            vki_uint8_t pad1;
+            vki_uint32_t pad2;
+            /*
+             * Send event only if the changed bit in the control register
+             * is not masked.
+             */
+            vki_xen_uint64_aligned_t bitmask;
         } mov_to_cr;
+
         struct {
-            vki_uint8_t extended_capture;
+            vki_uint32_t msr;
+            vki_uint8_t onchangeonly;
         } mov_to_msr;
+
         struct {
+            /* Pause vCPU until response */
             vki_uint8_t sync;
+            vki_uint8_t allow_userspace;
         } guest_request;
+
+        struct {
+            /* Pause vCPU until response */
+            vki_uint8_t sync;
+        } debug_exception;
     } u;
 };
 
@@ -609,7 +692,8 @@ struct vki_xen_domctl {
         struct vki_xen_domctl_tsc_info_0000000b   tsc_info_0000000b;
         //struct vki_xen_domctl_real_mode_area    real_mode_area;
         struct vki_xen_domctl_hvmcontext        hvmcontext;
-        struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
+        struct vki_xen_domctl_hvmcontext_partial_0000000e hvmcontext_partial_00000007;
+        struct vki_xen_domctl_hvmcontext_partial_0000000e hvmcontext_partial_0000000e;
         struct vki_xen_domctl_address_size      address_size;
         //struct vki_xen_domctl_sendtrigger       sendtrigger;
         //struct vki_xen_domctl_get_device_group  get_device_group;
@@ -626,6 +710,7 @@ struct vki_xen_domctl {
         struct vki_xen_domctl_debug_op          debug_op;
         struct vki_xen_domctl_mem_event_op_00000007 mem_event_op_00000007;
         vki_xen_domctl_vm_event_op_0000000b vm_event_op_0000000b;
+        struct vki_xen_domctl_vm_event_op_00000012 vm_event_op_00000012;
         //struct vki_xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct vki_xen_domctl_cpuid             cpuid;
@@ -642,6 +727,7 @@ struct vki_xen_domctl {
         //struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         //struct vki_xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
         struct vki_xen_domctl_monitor_op_0000000b monitor_op_0000000b;
+        struct vki_xen_domctl_monitor_op_00000011 monitor_op_00000011;
         vki_uint8_t                         pad[128];
     } u;
 };
index 050e16d19b46802683b42e94da0736a80220016e..c6ce0d90b85d3011910f3fc9f8102a61e32edc2b 100644 (file)
@@ -106,6 +106,84 @@ struct vki_xen_hvm_inject_trap {
 };
 typedef struct vki_xen_hvm_inject_trap vki_xen_hvm_inject_trap_t;
 
+#define VKI_XEN_HVMOP_altp2m 25
+#define VKI_XEN_HVMOP_altp2m_get_domain_state     1
+#define VKI_XEN_HVMOP_altp2m_set_domain_state     2
+#define VKI_XEN_HVMOP_altp2m_vcpu_enable_notify   3
+#define VKI_XEN_HVMOP_altp2m_create_p2m           4
+#define VKI_XEN_HVMOP_altp2m_destroy_p2m          5
+#define VKI_XEN_HVMOP_altp2m_switch_p2m           6
+#define VKI_XEN_HVMOP_altp2m_set_mem_access       7
+#define VKI_XEN_HVMOP_altp2m_change_gfn           8
+struct vki_xen_hvm_altp2m_domain_state {
+    /* IN or OUT variable on/off */
+    vki_uint8_t state;
+};
+typedef struct vki_xen_hvm_altp2m_domain_state vki_xen_hvm_altp2m_domain_state_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_domain_state_t);
+
+struct vki_xen_hvm_altp2m_vcpu_enable_notify {
+    vki_uint32_t vcpu_id;
+    vki_uint32_t pad;
+    /* #VE info area gfn */
+    vki_uint64_t gfn;
+};
+typedef struct vki_xen_hvm_altp2m_vcpu_enable_notify vki_xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_vcpu_enable_notify_t);
+
+struct vki_xen_hvm_altp2m_view {
+    /* IN/OUT variable */
+    vki_uint16_t view;
+    /* Create view only: default access type
+     * NOTE: currently ignored */
+    vki_uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct vki_xen_hvm_altp2m_view vki_xen_hvm_altp2m_view_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_view_t);
+
+struct vki_xen_hvm_altp2m_set_mem_access {
+    /* view */
+    vki_uint16_t view;
+    /* Memory type */
+    vki_uint16_t hvmmem_access; /* xenmem_access_t */
+    vki_uint32_t pad;
+    /* gfn */
+    vki_uint64_t gfn;
+};
+typedef struct vki_xen_hvm_altp2m_set_mem_access vki_xen_hvm_altp2m_set_mem_access_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_set_mem_access_t);
+
+struct vki_xen_hvm_altp2m_change_gfn {
+    /* view */
+    vki_uint16_t view;
+    vki_uint16_t pad1;
+    vki_uint32_t pad2;
+    /* old gfn */
+    vki_uint64_t old_gfn;
+    /* new gfn, INVALID_GFN (~0UL) means revert */
+    vki_uint64_t new_gfn;
+};
+typedef struct vki_xen_hvm_altp2m_change_gfn vki_xen_hvm_altp2m_change_gfn_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_change_gfn_t);
+
+struct vki_xen_hvm_altp2m_op {
+    vki_uint32_t version;   /* HVMOP_ALTP2M_INTERFACE_VERSION */
+    vki_uint32_t cmd;
+    vki_xen_domid_t domain;
+    vki_uint16_t pad1;
+    vki_uint32_t pad2;
+    union {
+        struct vki_xen_hvm_altp2m_domain_state       domain_state;
+        struct vki_xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+        struct vki_xen_hvm_altp2m_view               view;
+        struct vki_xen_hvm_altp2m_set_mem_access     set_mem_access;
+        struct vki_xen_hvm_altp2m_change_gfn         change_gfn;
+        vki_uint8_t pad[64];
+    } u;
+};
+typedef struct vki_xen_hvm_altp2m_op vki_xen_hvm_altp2m_op_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_hvm_altp2m_op_t);
+
 #endif // __VKI_XEN_HVM_H
 
 /*--------------------------------------------------------------------*/
index 8f4eac124ffdf354868cd4d5bf73a2d93133bea2..5f68b0eafa0394041b5aa132f217cd4470febf9a 100644 (file)
@@ -87,6 +87,15 @@ struct vki_xen_sysctl_getdomaininfolist_0000000a {
     vki_uint32_t              num_domains;
 };
 
+struct vki_xen_sysctl_getdomaininfolist_00000010 {
+    /* IN variables. */
+    vki_xen_domid_t           first_domain;
+    vki_uint32_t              max_domains;
+    VKI_XEN_GUEST_HANDLE_64(vki_xen_domctl_getdomaininfo_00000010_t) buffer;
+    /* OUT variables. */
+    vki_uint32_t              num_domains;
+};
+
 /* vki_xen_sysctl_getdomaininfolist_0000000b is the same as 0000000a */
 
 #define VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE                1  /* C */
@@ -159,7 +168,23 @@ struct vki_xen_sysctl_physinfo_0000000a {
     vki_uint32_t capabilities;
 };
 
-/* vki_xen_sysctl_physinfo_0000000b is the same as 0000000a */
+struct vki_xen_sysctl_physinfo_00000010 {
+    vki_uint32_t threads_per_core;
+    vki_uint32_t cores_per_socket;
+    vki_uint32_t nr_cpus;     /* # CPUs currently online */
+    vki_uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
+    vki_uint32_t nr_nodes;    /* # nodes currently online */
+    vki_uint32_t max_node_id; /* Largest possible node ID on this host */
+    vki_uint32_t cpu_khz;
+    vki_uint32_t capabilities;
+    vki_xen_uint64_aligned_t total_pages;
+    vki_xen_uint64_aligned_t free_pages;
+    vki_xen_uint64_aligned_t scrub_pages;
+    vki_xen_uint64_aligned_t outstanding_pages;
+    vki_xen_uint64_aligned_t max_mfn;
+    vki_uint32_t hw_cap[8];
+
+};
 
 struct vki_xen_sysctl_sched_id {
     /* OUT variable. */
@@ -174,6 +199,7 @@ struct vki_xen_sysctl {
         //struct vki_xen_sysctl_tbuf_op           tbuf_op;
         struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
         struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
+        struct vki_xen_sysctl_physinfo_00000010 physinfo_00000010;
         struct vki_xen_sysctl_topologyinfo      topologyinfo;
         struct vki_xen_sysctl_numainfo          numainfo;
         struct vki_xen_sysctl_sched_id          sched_id;
@@ -181,6 +207,7 @@ struct vki_xen_sysctl {
         struct vki_xen_sysctl_getdomaininfolist_00000008 getdomaininfolist_00000008;
         struct vki_xen_sysctl_getdomaininfolist_00000009 getdomaininfolist_00000009;
         struct vki_xen_sysctl_getdomaininfolist_0000000a getdomaininfolist_0000000a;
+        struct vki_xen_sysctl_getdomaininfolist_00000010 getdomaininfolist_00000010;
         struct vki_xen_sysctl_debug_keys        debug_keys;
         //struct vki_xen_sysctl_getcpuinfo        getcpuinfo;
         //struct vki_xen_sysctl_availheap         availheap;