Use vmcall or vmmcall when they are available.
Removed #include <string.h>, it was added in an earlier version
but is not needed in the current code.
Limit HostinfoBackdoorGetInterface to x86 architecture.
Fix some indents.
#include "backdoor.h"
#include "backdoorInt.h"
+#if defined(USE_HYPERCALL)
+#include "vm_assert.h"
+#include "x86cpuid.h"
+#include "x86cpuid_asm.h"
+#endif
+
#ifdef USE_VALGRIND
/*
* When running under valgrind, we need to ensure we have the correct register
# define BACKDOOR_LOG_HB_PROTO_STRUCT(x)
#endif
+#if defined(USE_HYPERCALL)
+/* Setting 'backdoorInterface' is idempotent, no atomic access is required. */
+static BackdoorInterface backdoorInterface = BACKDOOR_INTERFACE_NONE;
+
+static BackdoorInterface
+BackdoorGetInterface(void)
+{
+ if (UNLIKELY(backdoorInterface == BACKDOOR_INTERFACE_NONE)) {
+ CPUIDRegs regs;
+
+ /* Check whether we're on a VMware hypervisor that supports vmmcall. */
+ __GET_CPUID(1, ®s);
+ if (CPUID_ISSET(1, ECX, HYPERVISOR, regs.ecx)) {
+ __GET_CPUID(CPUID_HYPERVISOR_LEVEL_0, ®s);
+ if (CPUID_IsRawVendor(®s, CPUID_VMWARE_HYPERVISOR_VENDOR_STRING)) {
+ if (__GET_EAX_FROM_CPUID(CPUID_HYPERVISOR_LEVEL_0) >=
+ CPUID_VMW_FEATURES) {
+ uint32 features = __GET_ECX_FROM_CPUID(CPUID_VMW_FEATURES);
+ if (CPUID_ISSET(CPUID_VMW_FEATURES, ECX,
+ VMCALL_BACKDOOR, features)) {
+ backdoorInterface = BACKDOOR_INTERFACE_VMCALL;
+ BACKDOOR_LOG("Backdoor interface: vmcall\n");
+ } else if (CPUID_ISSET(CPUID_VMW_FEATURES, ECX,
+ VMMCALL_BACKDOOR, features)) {
+ backdoorInterface = BACKDOOR_INTERFACE_VMMCALL;
+ BACKDOOR_LOG("Backdoor interface: vmmcall\n");
+ }
+ }
+ }
+ }
+ if (backdoorInterface == BACKDOOR_INTERFACE_NONE) {
+ backdoorInterface = BACKDOOR_INTERFACE_IO;
+ BACKDOOR_LOG("Backdoor interface: I/O port\n");
+ }
+ }
+ return backdoorInterface;
+}
+#else
+static BackdoorInterface
+BackdoorGetInterface(void) {
+ return BACKDOOR_INTERFACE_IO;
+}
+#endif
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * Backdoor_ForceLegacy --
+ *
+ * In some cases, it may be desirable to use the legacy IO interface to
+ * access the backdoor, even if CPUID reports support for the VMCALL/VMMCALL
+ * interface.
+ *
+ * Params:
+ * force Set to TRUE to force the library to use the legacy IO interface
+ * for dispatching backdoor calls; set to FALSE to use the
+ * autodetected interface.
+ *
+ * Side-effects:
+ * Changes the interface used to access the backdoor.
+ *
+ *-----------------------------------------------------------------------------
+ */
+#if defined(USE_HYPERCALL)
+void
+Backdoor_ForceLegacy(Bool force)
+{
+ if (force) {
+ backdoorInterface = BACKDOOR_INTERFACE_IO;
+ } else {
+ backdoorInterface = BACKDOOR_INTERFACE_NONE;
+ BackdoorGetInterface();
+ }
+}
+#endif
+
/*
*-----------------------------------------------------------------------------
#ifdef USE_VALGRIND
static void
-Backdoor_InOutValgrind(uint16 tid, Backdoor_proto *myBp)
+BackdoorInOutValgrind(uint16 tid, Backdoor_proto *myBp)
{
Backdoor_InOut(myBp);
}
+static void
+BackdoorHbInValgrind(uint16 tid, Backdoor_proto_hb *myBp)
+{
+ BackdoorHbIn(myBp);
+}
+static void
+BackdoorHbOutValgrind(uint16 tid, Backdoor_proto_hb *myBp)
+{
+ BackdoorHbOut(myBp);
+}
+#if defined(USE_HYPERCALL)
+static void
+BackdoorVmcallValgrind(uint16 tid, Backdoor_proto *myBp)
+{
+ Backdoor_Vmcall(myBp);
+}
+static void
+BackdoorVmmcallValgrind(uint16 tid, Backdoor_proto *myBp)
+{
+ Backdoor_Vmmcall(myBp);
+}
+static void
+BackdoorHbVmcallValgrind(uint16 tid, Backdoor_proto_hb *myBp)
+{
+ BackdoorHbVmcall(myBp);
+}
+static void
+BackdoorHbVmmcallValgrind(uint16 tid, Backdoor_proto_hb *myBp)
+{
+ BackdoorHbVmmcall(myBp);
+}
+#endif
#endif
void
Backdoor(Backdoor_proto *myBp) // IN/OUT
{
+ BackdoorInterface interface = BackdoorGetInterface();
ASSERT(myBp);
myBp->in.ax.word = BDOOR_MAGIC;
- myBp->in.dx.halfs.low = BDOOR_PORT;
+
+ switch (interface) {
+ case BACKDOOR_INTERFACE_IO:
+ myBp->in.dx.halfs.low = BDOOR_PORT;
+ break;
+#if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL: // Fall through.
+ case BACKDOOR_INTERFACE_VMMCALL:
+ myBp->in.dx.halfs.low = BDOOR_FLAGS_LB | BDOOR_FLAGS_READ;
+ break;
+#endif
+ default:
+ ASSERT(FALSE);
+ break;
+ }
BACKDOOR_LOG("Backdoor: before ");
BACKDOOR_LOG_PROTO_STRUCT(myBp);
+ switch (interface) {
+ case BACKDOOR_INTERFACE_IO:
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorInOutValgrind, myBp);
+#else
+ Backdoor_InOut(myBp);
+#endif
+ break;
+#if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL:
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorVmcallValgrind, myBp);
+#else
+ Backdoor_Vmcall(myBp);
+#endif
+ break;
+ case BACKDOOR_INTERFACE_VMMCALL:
#ifdef USE_VALGRIND
- VALGRIND_NON_SIMD_CALL1(Backdoor_InOutValgrind, myBp);
+ VALGRIND_NON_SIMD_CALL1(BackdoorVmmcallValgrind, myBp);
#else
- Backdoor_InOut(myBp);
+ Backdoor_Vmmcall(myBp);
#endif
+ break;
+#endif // defined(USE_HYPERCALL)
+ default:
+ ASSERT(FALSE);
+ break;
+ }
BACKDOOR_LOG("Backdoor: after ");
BACKDOOR_LOG_PROTO_STRUCT(myBp);
}
+void
+BackdoorHb(Backdoor_proto_hb *myBp, // IN/OUT
+ Bool outbound) // IN
+{
+ BackdoorInterface interface = BackdoorGetInterface();
+ ASSERT(myBp);
+
+ myBp->in.ax.word = BDOOR_MAGIC;
+
+ switch (interface) {
+ case BACKDOOR_INTERFACE_IO:
+ myBp->in.dx.halfs.low = BDOORHB_PORT;
+ break;
+#if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL: // Fall through.
+ case BACKDOOR_INTERFACE_VMMCALL:
+ myBp->in.dx.halfs.low = BDOOR_FLAGS_HB;
+ if (outbound) {
+ myBp->in.dx.halfs.low |= BDOOR_FLAGS_WRITE;
+ } else {
+ myBp->in.dx.halfs.low |= BDOOR_FLAGS_READ;
+ }
+ break;
+#endif
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+ BACKDOOR_LOG("BackdoorHb: before ");
+ BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
+
+ switch (interface) {
+ case BACKDOOR_INTERFACE_IO:
+ if (outbound) {
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorHbOutValgrind, myBp);
+#else
+ BackdoorHbOut(myBp);
+#endif
+ } else {
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorHbInValgrind, myBp);
+#else
+ BackdoorHbIn(myBp);
+#endif
+ }
+ break;
+#if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL:
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorHbVmcallValgrind, myBp);
+#else
+ BackdoorHbVmcall(myBp);
+#endif
+ break;
+ case BACKDOOR_INTERFACE_VMMCALL:
+#ifdef USE_VALGRIND
+ VALGRIND_NON_SIMD_CALL1(BackdoorHbVmmcallValgrind, myBp);
+#else
+ BackdoorHbVmmcall(myBp);
+#endif
+ break;
+#endif
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+ BACKDOOR_LOG("BackdoorHb: after ");
+ BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
+}
+
+
/*
*-----------------------------------------------------------------------------
*
*-----------------------------------------------------------------------------
*/
-#ifdef USE_VALGRIND
-static void
-BackdoorHbOutValgrind(uint16 tid, Backdoor_proto_hb *myBp)
-{
- BackdoorHbOut(myBp);
-}
-#endif
-
void
Backdoor_HbOut(Backdoor_proto_hb *myBp) // IN/OUT
{
ASSERT(myBp);
- myBp->in.ax.word = BDOOR_MAGIC;
- myBp->in.dx.halfs.low = BDOORHB_PORT;
-
- BACKDOOR_LOG("Backdoor_HbOut: before ");
- BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
-
-#ifdef USE_VALGRIND
- VALGRIND_NON_SIMD_CALL1(BackdoorHbOutValgrind, myBp);
-#else
- BackdoorHbOut(myBp);
-#endif
-
- BACKDOOR_LOG("Backdoor_HbOut: after ");
- BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
+ BackdoorHb(myBp, TRUE);
}
*-----------------------------------------------------------------------------
*/
-#ifdef USE_VALGRIND
-static void
-BackdoorHbInValgrind(uint16 tid, Backdoor_proto_hb *myBp)
-{
- BackdoorHbIn(myBp);
-}
-#endif
-
void
Backdoor_HbIn(Backdoor_proto_hb *myBp) // IN/OUT
{
ASSERT(myBp);
- myBp->in.ax.word = BDOOR_MAGIC;
- myBp->in.dx.halfs.low = BDOORHB_PORT;
-
- BACKDOOR_LOG("Backdoor_HbIn: before ");
- BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
-
-#ifdef USE_VALGRIND
- VALGRIND_NON_SIMD_CALL1(BackdoorHbInValgrind, myBp);
-#else
- BackdoorHbIn(myBp);
-#endif
-
- BACKDOOR_LOG("Backdoor_HbIn: after ");
- BACKDOOR_LOG_HB_PROTO_STRUCT(myBp);
+ BackdoorHb(myBp, FALSE);
}
#ifdef __cplusplus
/*********************************************************
- * Copyright (C) 2005-2016 VMware, Inc. All rights reserved.
+ * Copyright (C) 2005-2016, 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
);
}
+#if defined(USE_HYPERCALL)
+void
+Backdoor_Vmcall(Backdoor_proto *myBp) // IN/OUT
+{
+ uint32 dummy;
+
+ __asm__ __volatile__(
+#ifdef __PIC__
+ "pushl %%ebx" "\n\t"
+#endif
+ "pushl %%eax" "\n\t"
+ "movl 20(%%eax), %%edi" "\n\t"
+ "movl 16(%%eax), %%esi" "\n\t"
+ "movl 12(%%eax), %%edx" "\n\t"
+ "movl 8(%%eax), %%ecx" "\n\t"
+ "movl 4(%%eax), %%ebx" "\n\t"
+ "movl (%%eax), %%eax" "\n\t"
+ "vmcall" "\n\t"
+ "xchgl %%eax, (%%esp)" "\n\t"
+ "movl %%edi, 20(%%eax)" "\n\t"
+ "movl %%esi, 16(%%eax)" "\n\t"
+ "movl %%edx, 12(%%eax)" "\n\t"
+ "movl %%ecx, 8(%%eax)" "\n\t"
+ "movl %%ebx, 4(%%eax)" "\n\t"
+ "popl (%%eax)" "\n\t"
+#ifdef __PIC__
+ "popl %%ebx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. So far it does not modify EFLAGS. --hpreg
+ */
+ :
+#ifndef __PIC__
+ "ebx",
+#endif
+ "ecx", "edx", "esi", "edi", "memory"
+ );
+}
+
+
+void
+Backdoor_Vmmcall(Backdoor_proto *myBp) // IN/OUT
+{
+ uint32 dummy;
+
+ __asm__ __volatile__(
+#ifdef __PIC__
+ "pushl %%ebx" "\n\t"
+#endif
+ "pushl %%eax" "\n\t"
+ "movl 20(%%eax), %%edi" "\n\t"
+ "movl 16(%%eax), %%esi" "\n\t"
+ "movl 12(%%eax), %%edx" "\n\t"
+ "movl 8(%%eax), %%ecx" "\n\t"
+ "movl 4(%%eax), %%ebx" "\n\t"
+ "movl (%%eax), %%eax" "\n\t"
+ "vmmcall" "\n\t"
+ "xchgl %%eax, (%%esp)" "\n\t"
+ "movl %%edi, 20(%%eax)" "\n\t"
+ "movl %%esi, 16(%%eax)" "\n\t"
+ "movl %%edx, 12(%%eax)" "\n\t"
+ "movl %%ecx, 8(%%eax)" "\n\t"
+ "movl %%ebx, 4(%%eax)" "\n\t"
+ "popl (%%eax)" "\n\t"
+#ifdef __PIC__
+ "popl %%ebx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. So far it does not modify EFLAGS. --hpreg
+ */
+ :
+#ifndef __PIC__
+ "ebx",
+#endif
+ "ecx", "edx", "esi", "edi", "memory"
+ );
+}
+#endif
/*
*-----------------------------------------------------------------------------
* vmware can modify the whole VM state without the compiler knowing
* it. --hpreg
*/
- :
+ :
#ifndef __PIC__
- "ebx",
+ "ebx",
#endif
"ecx", "edx", "esi", "edi", "memory", "cc"
);
);
}
+
+#if defined(USE_HYPERCALL)
+void
+BackdoorHbVmcall(Backdoor_proto_hb *myBp) // IN/OUT
+{
+ uint32 dummy;
+
+ __asm__ __volatile__(
+#ifdef __PIC__
+ "pushl %%ebx" "\n\t"
+#endif
+ "pushl %%ebp" "\n\t"
+
+ "pushl %%eax" "\n\t"
+ "movl 24(%%eax), %%ebp" "\n\t"
+ "movl 20(%%eax), %%edi" "\n\t"
+ "movl 16(%%eax), %%esi" "\n\t"
+ "movl 12(%%eax), %%edx" "\n\t"
+ "movl 8(%%eax), %%ecx" "\n\t"
+ "movl 4(%%eax), %%ebx" "\n\t"
+ "movl (%%eax), %%eax" "\n\t"
+ "vmcall" "\n\t"
+ "xchgl %%eax, (%%esp)" "\n\t"
+ "movl %%ebp, 24(%%eax)" "\n\t"
+ "movl %%edi, 20(%%eax)" "\n\t"
+ "movl %%esi, 16(%%eax)" "\n\t"
+ "movl %%edx, 12(%%eax)" "\n\t"
+ "movl %%ecx, 8(%%eax)" "\n\t"
+ "movl %%ebx, 4(%%eax)" "\n\t"
+ "popl (%%eax)" "\n\t"
+
+ "popl %%ebp" "\n\t"
+#ifdef __PIC__
+ "popl %%ebx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. --hpreg
+ */
+ :
+#ifndef __PIC__
+ "ebx",
+#endif
+ "ecx", "edx", "esi", "edi", "memory", "cc"
+ );
+}
+
+
+void
+BackdoorHbVmmcall(Backdoor_proto_hb *myBp) // IN/OUT
+{
+ uint32 dummy;
+
+ __asm__ __volatile__(
+#ifdef __PIC__
+ "pushl %%ebx" "\n\t"
+#endif
+ "pushl %%ebp" "\n\t"
+
+ "pushl %%eax" "\n\t"
+ "movl 24(%%eax), %%ebp" "\n\t"
+ "movl 20(%%eax), %%edi" "\n\t"
+ "movl 16(%%eax), %%esi" "\n\t"
+ "movl 12(%%eax), %%edx" "\n\t"
+ "movl 8(%%eax), %%ecx" "\n\t"
+ "movl 4(%%eax), %%ebx" "\n\t"
+ "movl (%%eax), %%eax" "\n\t"
+ "vmmcall" "\n\t"
+ "xchgl %%eax, (%%esp)" "\n\t"
+ "movl %%ebp, 24(%%eax)" "\n\t"
+ "movl %%edi, 20(%%eax)" "\n\t"
+ "movl %%esi, 16(%%eax)" "\n\t"
+ "movl %%edx, 12(%%eax)" "\n\t"
+ "movl %%ecx, 8(%%eax)" "\n\t"
+ "movl %%ebx, 4(%%eax)" "\n\t"
+ "popl (%%eax)" "\n\t"
+
+ "popl %%ebp" "\n\t"
+#ifdef __PIC__
+ "popl %%ebx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. --hpreg
+ */
+ :
+#ifndef __PIC__
+ "ebx",
+#endif
+ "ecx", "edx", "esi", "edi", "memory", "cc"
+ );
+}
+#endif
+
#ifdef __cplusplus
}
#endif
/*********************************************************
- * Copyright (C) 2005-2016 VMware, Inc. All rights reserved.
+ * Copyright (C) 2005-2016, 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
}
+#if defined(USE_HYPERCALL)
+void
+Backdoor_Vmcall(Backdoor_proto *myBp) // IN/OUT
+{
+ uint64 dummy;
+
+ __asm__ __volatile__(
+#ifdef __APPLE__
+ /*
+ * Save %rbx on the stack because the Mac OS GCC doesn't want us to
+ * clobber it - it erroneously thinks %rbx is the PIC register.
+ * (Radar bug 7304232)
+ */
+ "pushq %%rbx" "\n\t"
+#endif
+ "pushq %%rax" "\n\t"
+ "movq 40(%%rax), %%rdi" "\n\t"
+ "movq 32(%%rax), %%rsi" "\n\t"
+ "movq 24(%%rax), %%rdx" "\n\t"
+ "movq 16(%%rax), %%rcx" "\n\t"
+ "movq 8(%%rax), %%rbx" "\n\t"
+ "movq (%%rax), %%rax" "\n\t"
+ "vmcall" "\n\t"
+ "xchgq %%rax, (%%rsp)" "\n\t"
+ "movq %%rdi, 40(%%rax)" "\n\t"
+ "movq %%rsi, 32(%%rax)" "\n\t"
+ "movq %%rdx, 24(%%rax)" "\n\t"
+ "movq %%rcx, 16(%%rax)" "\n\t"
+ "movq %%rbx, 8(%%rax)" "\n\t"
+ "popq (%%rax)" "\n\t"
+#ifdef __APPLE__
+ "popq %%rbx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. So far it does not modify EFLAGS. --hpreg
+ */
+ :
+#ifndef __APPLE__
+ /* %rbx is unchanged at the end of the function on Mac OS. */
+ "rbx",
+#endif
+ "rcx", "rdx", "rsi", "rdi", "memory"
+ );
+}
+
+void
+Backdoor_Vmmcall(Backdoor_proto *myBp) // IN/OUT
+{
+ uint64 dummy;
+
+ __asm__ __volatile__(
+#ifdef __APPLE__
+ /*
+ * Save %rbx on the stack because the Mac OS GCC doesn't want us to
+ * clobber it - it erroneously thinks %rbx is the PIC register.
+ * (Radar bug 7304232)
+ */
+ "pushq %%rbx" "\n\t"
+#endif
+ "pushq %%rax" "\n\t"
+ "movq 40(%%rax), %%rdi" "\n\t"
+ "movq 32(%%rax), %%rsi" "\n\t"
+ "movq 24(%%rax), %%rdx" "\n\t"
+ "movq 16(%%rax), %%rcx" "\n\t"
+ "movq 8(%%rax), %%rbx" "\n\t"
+ "movq (%%rax), %%rax" "\n\t"
+ "vmmcall" "\n\t"
+ "xchgq %%rax, (%%rsp)" "\n\t"
+ "movq %%rdi, 40(%%rax)" "\n\t"
+ "movq %%rsi, 32(%%rax)" "\n\t"
+ "movq %%rdx, 24(%%rax)" "\n\t"
+ "movq %%rcx, 16(%%rax)" "\n\t"
+ "movq %%rbx, 8(%%rax)" "\n\t"
+ "popq (%%rax)" "\n\t"
+#ifdef __APPLE__
+ "popq %%rbx" "\n\t"
+#endif
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. So far it does not modify EFLAGS. --hpreg
+ */
+ :
+#ifndef __APPLE__
+ /* %rbx is unchanged at the end of the function on Mac OS. */
+ "rbx",
+#endif
+ "rcx", "rdx", "rsi", "rdi", "memory"
+ );
+}
+#endif
+
+
/*
*-----------------------------------------------------------------------------
*
);
}
+#if defined(USE_HYPERCALL)
+void
+BackdoorHbVmcall(Backdoor_proto_hb *myBp) // IN/OUT
+{
+ uint64 dummy;
+
+ __asm__ __volatile__(
+ "pushq %%rbp" "\n\t"
+#ifdef __APPLE__
+ /*
+ * Save %rbx on the stack because the Mac OS GCC doesn't want us to
+ * clobber it - it erroneously thinks %rbx is the PIC register.
+ * (Radar bug 7304232)
+ */
+ "pushq %%rbx" "\n\t"
+#endif
+ "pushq %%rax" "\n\t"
+ "movq 48(%%rax), %%rbp" "\n\t"
+ "movq 40(%%rax), %%rdi" "\n\t"
+ "movq 32(%%rax), %%rsi" "\n\t"
+ "movq 24(%%rax), %%rdx" "\n\t"
+ "movq 16(%%rax), %%rcx" "\n\t"
+ "movq 8(%%rax), %%rbx" "\n\t"
+ "movq (%%rax), %%rax" "\n\t"
+ "vmcall" "\n\t"
+ "xchgq %%rax, (%%rsp)" "\n\t"
+ "movq %%rbp, 48(%%rax)" "\n\t"
+ "movq %%rdi, 40(%%rax)" "\n\t"
+ "movq %%rsi, 32(%%rax)" "\n\t"
+ "movq %%rdx, 24(%%rax)" "\n\t"
+ "movq %%rcx, 16(%%rax)" "\n\t"
+ "movq %%rbx, 8(%%rax)" "\n\t"
+ "popq (%%rax)" "\n\t"
+#ifdef __APPLE__
+ "popq %%rbx" "\n\t"
+#endif
+ "popq %%rbp"
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. --hpreg
+ */
+ :
+#ifndef __APPLE__
+ /* %rbx is unchanged at the end of the function on Mac OS. */
+ "rbx",
+#endif
+ "rcx", "rdx", "rsi", "rdi", "memory", "cc"
+ );
+}
+
+
+void
+BackdoorHbVmmcall(Backdoor_proto_hb *myBp) // IN/OUT
+{
+ uint64 dummy;
+
+ __asm__ __volatile__(
+ "pushq %%rbp" "\n\t"
+#ifdef __APPLE__
+ /*
+ * Save %rbx on the stack because the Mac OS GCC doesn't want us to
+ * clobber it - it erroneously thinks %rbx is the PIC register.
+ * (Radar bug 7304232)
+ */
+ "pushq %%rbx" "\n\t"
+#endif
+ "pushq %%rax" "\n\t"
+ "movq 48(%%rax), %%rbp" "\n\t"
+ "movq 40(%%rax), %%rdi" "\n\t"
+ "movq 32(%%rax), %%rsi" "\n\t"
+ "movq 24(%%rax), %%rdx" "\n\t"
+ "movq 16(%%rax), %%rcx" "\n\t"
+ "movq 8(%%rax), %%rbx" "\n\t"
+ "movq (%%rax), %%rax" "\n\t"
+ "vmmcall" "\n\t"
+ "xchgq %%rax, (%%rsp)" "\n\t"
+ "movq %%rbp, 48(%%rax)" "\n\t"
+ "movq %%rdi, 40(%%rax)" "\n\t"
+ "movq %%rsi, 32(%%rax)" "\n\t"
+ "movq %%rdx, 24(%%rax)" "\n\t"
+ "movq %%rcx, 16(%%rax)" "\n\t"
+ "movq %%rbx, 8(%%rax)" "\n\t"
+ "popq (%%rax)" "\n\t"
+#ifdef __APPLE__
+ "popq %%rbx" "\n\t"
+#endif
+ "popq %%rbp"
+ : "=a" (dummy)
+ : "0" (myBp)
+ /*
+ * vmware can modify the whole VM state without the compiler knowing
+ * it. --hpreg
+ */
+ :
+#ifndef __APPLE__
+ /* %rbx is unchanged at the end of the function on Mac OS. */
+ "rbx",
+#endif
+ "rcx", "rdx", "rsi", "rdi", "memory", "cc"
+ );
+}
+#endif
#ifdef __cplusplus
}
#endif
+
/*********************************************************
- * Copyright (C) 2005-2016 VMware, Inc. All rights reserved.
+ * Copyright (C) 2005-2016, 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
void BackdoorHbIn(Backdoor_proto_hb *bp);
void BackdoorHbOut(Backdoor_proto_hb *bp);
+void BackdoorHb(Backdoor_proto_hb *myBp, Bool outbound);
+
+/*
+ * Are vmcall/vmmcall hypercall instructions available in the assembler?
+ * Use the compiler version as a proxy.
+ */
+#if defined(__linux__) && defined(__GNUC__)
+#define GCC_VERSION (__GNUC__ * 10000 + \
+ __GNUC_MINOR__ * 100 + \
+ __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 40803
+#define USE_HYPERCALL
+#endif
+#endif
+
+#if defined(USE_HYPERCALL)
+void BackdoorHbVmcall(Backdoor_proto_hb *bp);
+void BackdoorHbVmmcall(Backdoor_proto_hb *bp);
+#endif
/*********************************************************
- * Copyright (C) 1999-2017 VMware, Inc. All rights reserved.
+ * Copyright (C) 1999-2017, 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
void
Backdoor(Backdoor_proto *bp); // IN/OUT
-void
+void
+Backdoor_ForceLegacy(Bool force); // IN
+
+void
Backdoor_InOut(Backdoor_proto *bp); // IN/OUT
+void
+Backdoor_Vmcall(Backdoor_proto *bp); // IN/OUT
+
+void
+Backdoor_Vmmcall(Backdoor_proto *bp); // IN/OUT
+
void
Backdoor_HbOut(Backdoor_proto_hb *bp); // IN/OUT
/*********************************************************
- * Copyright (C) 1998-2019 VMware, Inc. All rights reserved.
+ * Copyright (C) 1998-2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
/* Flags used by the hypercall interface. */
+#define BDOOR_FLAGS_LB 0
+#define BDOOR_FLAGS_READ 0
#define BDOOR_FLAGS_HB (1<<0)
#define BDOOR_FLAGS_WRITE (1<<1)
/*********************************************************
- * Copyright (C) 2011-2019 VMware, Inc. All rights reserved.
+ * Copyright (C) 2011-2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
#define LOGLEVEL_VARIADIC
#include "loglevel_user.h"
+#if !defined(_WIN32)
+
+/*
+ * Are vmcall/vmmcall available in the compiler?
+ */
+#if defined(__linux__) && defined(__GNUC__)
+#define GCC_VERSION (__GNUC__ * 10000 + \
+ __GNUC_MINOR__ * 100 + \
+ __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 40803
+#define USE_HYPERCALL
+#endif
+#endif
+
+#define BDOOR_FLAGS_LB_READ (BDOOR_FLAGS_LB | BDOOR_FLAGS_READ)
+
+#define Vmcall(cmd, result) \
+ __asm__ __volatile__( \
+ "vmcall" \
+ : "=a" (result) \
+ : "0" (BDOOR_MAGIC), \
+ "c" (cmd), \
+ "d" (BDOOR_FLAGS_LB_READ) \
+ )
+
+#define Vmmcall(cmd, result) \
+ __asm__ __volatile__( \
+ "vmmcall" \
+ : "=a" (result) \
+ : "0" (BDOOR_MAGIC), \
+ "c" (cmd), \
+ "d" (BDOOR_FLAGS_LB_READ) \
+ )
+
+#define Ioportcall(cmd, result) \
+ __asm__ __volatile__( \
+ "inl %%dx, %%eax" \
+ : "=a" (result) \
+ : "0" (BDOOR_MAGIC), \
+ "c" (cmd), \
+ "d" (BDOOR_PORT) \
+ )
+#endif
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HostinfoBackdoorGetInterface --
+ *
+ * Check whether hypercall is present or backdoor is being used.
+ *
+ * Results:
+ * BACKDOOR_INTERFACE_VMCALL - Intel hypercall is used.
+ * BACKDOOR_INTERFACE_VMMCALL - AMD hypercall is used.
+ * BACKDOOR_INTERFACE_IO - Backdoor I/O Port is used.
+ *
+ * Side effects:
+ * None
+ *
+ *----------------------------------------------------------------------
+ */
+#if defined(__i386__) || defined(__x86_64__)
+static BackdoorInterface
+HostinfoBackdoorGetInterface(void)
+{
+#if defined(USE_HYPERCALL)
+ /* Setting 'interface' is idempotent, no atomic access is required. */
+ static BackdoorInterface interface = BACKDOOR_INTERFACE_NONE;
+
+ if (UNLIKELY(interface == BACKDOOR_INTERFACE_NONE)) {
+#if defined(__i386__) || defined(__x86_64__)
+ CPUIDRegs regs;
+
+ /* Check whether we're on a VMware hypervisor that supports vmmcall. */
+ __GET_CPUID(CPUID_FEATURE_INFORMATION, ®s);
+ if (CPUID_ISSET(CPUID_FEATURE_INFORMATION, ECX, HYPERVISOR, regs.ecx)) {
+ __GET_CPUID(CPUID_HYPERVISOR_LEVEL_0, ®s);
+ if (CPUID_IsRawVendor(®s, CPUID_VMWARE_HYPERVISOR_VENDOR_STRING)) {
+ if (__GET_EAX_FROM_CPUID(CPUID_HYPERVISOR_LEVEL_0) >=
+ CPUID_VMW_FEATURES) {
+ uint32 features = __GET_ECX_FROM_CPUID(CPUID_VMW_FEATURES);
+ if (CPUID_ISSET(CPUID_VMW_FEATURES, ECX,
+ VMCALL_BACKDOOR, features)) {
+ interface = BACKDOOR_INTERFACE_VMCALL;
+ } else if (CPUID_ISSET(CPUID_VMW_FEATURES, ECX,
+ VMMCALL_BACKDOOR, features)) {
+ interface = BACKDOOR_INTERFACE_VMMCALL;
+ }
+ }
+ }
+ }
+ if (interface == BACKDOOR_INTERFACE_NONE) {
+ interface = BACKDOOR_INTERFACE_IO;
+ }
+#else
+ interface = BACKDOOR_INTERFACE_IO;
+#endif
+ }
+ return interface;
+#else
+ return BACKDOOR_INTERFACE_IO;
+#endif
+}
+#endif // defined(__i386__) || defined(__x86_64__)
/*
*----------------------------------------------------------------------
CPUIDRegs regs;
if (!hypervisorPresent) {
- __GET_CPUID(1, ®s);
- hypervisorPresent = CPUID_ISSET(1, ECX, HYPERVISOR, regs.ecx);
+ __GET_CPUID(CPUID_FEATURE_INFORMATION, ®s);
+ hypervisorPresent = CPUID_ISSET(CPUID_FEATURE_INFORMATION, ECX,
+ HYPERVISOR, regs.ecx);
}
return hypervisorPresent;
}
#else // _WIN64
_asm {
push edx
- push ecx
- push ebx
- mov ecx, BDOOR_CMD_GETVERSION
+ push ecx
+ push ebx
+ mov ecx, BDOOR_CMD_GETVERSION
mov ebx, ~BDOOR_MAGIC
mov eax, BDOOR_MAGIC
mov dx, BDOOR_PORT
in eax, dx
- mov ebxval, ebx
- pop ebx
- pop ecx
+ mov ebxval, ebx
+ pop ebx
+ pop ecx
pop edx
}
#endif // _WIN64
}
-#else
+#else // !defined(_WIN32)
/*
*----------------------------------------------------------------------
* GP correctly and the process continues running returning garbage.
* In this case we check the EBX register which should be
* BDOOR_MAGIC if the IN was handled in a VM. Based on this we
- * return either TRUE or FALSE.
+ * return either TRUE or FALSE. If hypercall support is present,
+ * return TRUE without touching the backdoor.
*
* Results:
- * TRUE if we succesfully accessed the backdoor, FALSE or segfault
- * if not.
+ * TRUE if we have hypercall support or succesfully accessed the
+ * backdoor, FALSE or segfault if not.
*
* Side effects:
* Exception if not in a VM.
uint32 ebx;
uint32 ecx;
- __asm__ __volatile__(
-# if defined __PIC__ && !vm_x86_64 // %ebx is reserved by the compiler.
- "xchgl %%ebx, %1" "\n\t"
- "inl %%dx, %%eax" "\n\t"
- "xchgl %%ebx, %1"
- : "=a" (eax),
- "=&rm" (ebx),
-# else
- "inl %%dx, %%eax"
- : "=a" (eax),
- "=b" (ebx),
-# endif
- "=c" (ecx)
- : "0" (BDOOR_MAGIC),
- "1" (~BDOOR_MAGIC),
- "2" (BDOOR_CMD_GETVERSION),
- "d" (BDOOR_PORT)
- );
+ switch (HostinfoBackdoorGetInterface()) {
+# if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL: // Fall Through
+ case BACKDOOR_INTERFACE_VMMCALL:
+ return TRUE;
+ break;
+# endif
+ default:
+ __asm__ __volatile__(
+# if defined __PIC__ && !vm_x86_64 // %ebx is reserved by the compiler.
+ "xchgl %%ebx, %1" "\n\t"
+ "inl %%dx, %%eax" "\n\t"
+ "xchgl %%ebx, %1"
+ : "=a" (eax),
+ "=&rm" (ebx),
+# else
+ "inl %%dx, %%eax"
+ : "=a" (eax),
+ "=b" (ebx),
+# endif
+ "=c" (ecx)
+ : "0" (BDOOR_MAGIC),
+ "1" (~BDOOR_MAGIC),
+ "2" (BDOOR_CMD_GETVERSION),
+ "d" (BDOOR_PORT)
+ );
+ break;
+ }
if (ebx == BDOOR_MAGIC) {
return TRUE;
}
uint32 cmd = NESTING_CONTROL_QUERY << 16 | BDOOR_CMD_NESTING_CONTROL;
uint32 result;
- __asm__ __volatile__(
- "inl %%dx, %%eax"
- : "=a" (result)
- : "0" (BDOOR_MAGIC),
- "c" (cmd),
- "d" (BDOOR_PORT)
- );
+ switch (HostinfoBackdoorGetInterface()) {
+# if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL:
+ Vmcall(cmd, result);
+ break;
+ case BACKDOOR_INTERFACE_VMMCALL:
+ Vmmcall(cmd, result);
+ break;
+# endif
+ default:
+ Ioportcall(cmd, result);
+ break;
+ }
if (result >= NESTING_CONTROL_QUERY && result != ~0U) {
return TRUE;
{
#if defined(__i386__) || defined(__x86_64__)
uint32 result;
- __asm__ __volatile__(
- "inl %%dx, %%eax"
- : "=a" (result)
- : "0" (BDOOR_MAGIC),
- "c" (BDOOR_CMD_GET_VCPU_INFO),
- "d" (BDOOR_PORT)
- );
+ uint32 cmd = BDOOR_CMD_GET_VCPU_INFO;
+
+ switch (HostinfoBackdoorGetInterface()) {
+# if defined(USE_HYPERCALL)
+ case BACKDOOR_INTERFACE_VMCALL:
+ Vmcall(cmd, result);
+ break;
+ case BACKDOOR_INTERFACE_VMMCALL:
+ Vmmcall(cmd, result);
+ break;
+# endif
+ default:
+ Ioportcall(cmd, result);
+ break;
+ }
/* If reserved bit is 1, this command wasn't implemented. */
return (result & (1 << BDOOR_CMD_VCPU_RESERVED)) == 0 &&
(result & (1 << bit)) != 0;