} Atomic_uint64 ALIGNED(8);
+/*
+ * Prototypes for msft atomics. These are defined & inlined by the
+ * compiler so no function definition is needed. The prototypes are
+ * needed for c++. Since amd64 compiler doesn't support inline asm we
+ * have to use these. Unfortunately, we still have to use some inline asm
+ * for the 32 bit code since the and/or/xor implementations didn't show up
+ * untill xp or 2k3.
+ *
+ * The declarations for the intrinsic functions were taken from ntddk.h
+ * in the DDK. The declarations must match otherwise the 64-bit c++
+ * compiler will complain about second linkage of the intrinsic functions.
+ * We define the intrinsic using the basic types corresponding to the
+ * Windows typedefs. This avoids having to include windows header files
+ * to get to the windows types.
+ */
+#if defined(_MSC_VER) && _MSC_VER >= 1310
+#ifdef __cplusplus
+extern "C" {
+#endif
+long _InterlockedExchange(long volatile*, long);
+long _InterlockedCompareExchange(long volatile*, long, long);
+long _InterlockedExchangeAdd(long volatile*, long);
+long _InterlockedDecrement(long volatile*);
+long _InterlockedIncrement(long volatile*);
+#pragma intrinsic(_InterlockedExchange, _InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd, _InterlockedDecrement)
+#pragma intrinsic(_InterlockedIncrement)
+
+#if defined(VM_X86_64)
+long _InterlockedAnd(long volatile*, long);
+__int64 _InterlockedAnd64(__int64 volatile*, __int64);
+long _InterlockedOr(long volatile*, long);
+__int64 _InterlockedOr64(__int64 volatile*, __int64);
+long _InterlockedXor(long volatile*, long);
+__int64 _InterlockedXor64(__int64 volatile*, __int64);
+__int64 _InterlockedExchangeAdd64(__int64 volatile*, __int64);
+__int64 _InterlockedIncrement64(__int64 volatile*);
+__int64 _InterlockedDecrement64(__int64 volatile*);
+__int64 _InterlockedExchange64(__int64 volatile*, __int64);
+__int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
+#if !defined(_WIN64)
+#pragma intrinsic(_InterlockedAnd, _InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr, _InterlockedOr64)
+#pragma intrinsic(_InterlockedXor, _InterlockedXor64)
+#pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedIncrement64)
+#pragma intrinsic(_InterlockedDecrement64, _InterlockedExchange64)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#endif /* !_WIN64 */
+#endif /* __x86_64__ */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MSC_VER */
+
/*
* LDREX without STREX or CLREX may cause problems in environments where the
* context switch may not clear the reference monitor - according ARM manual
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
return _InterlockedExchange((long *)&var->value, (long)val);
#else
+#pragma warning(push)
+#pragma warning(disable : 4035) // disable no-return warning
+ {
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm xchg [ebx]Atomic_uint32.value, eax
+ // eax is the return value, this is documented to work - edward
+ }
+#pragma warning(pop)
+#endif // _MSC_VER >= 1310
+#else
#error No compiler defined for Atomic_ReadWrite
#endif // __GNUC__
}
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
return _InterlockedCompareExchange((long *)&var->value,
(long)newVal,
(long)oldVal);
+#else
+#pragma warning(push)
+#pragma warning(disable : 4035) // disable no-return warning
+ {
+ __asm mov eax, oldVal
+ __asm mov ebx, var
+ __asm mov ecx, newVal
+ __asm lock cmpxchg [ebx]Atomic_uint32.value, ecx
+ // eax is the return value, this is documented to work - edward
+ }
+#pragma warning(pop)
+#endif
+#else
+#error No compiler defined for Atomic_ReadIfEqualWrite
#endif
}
#define Atomic_ReadIfEqualWrite32 Atomic_ReadIfEqualWrite
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if defined(__x86_64__)
_InterlockedAnd((long *)&var->value, (long)val);
+#else
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock and [ebx]Atomic_uint32.value, eax
+#endif
#else
#error No compiler defined for Atomic_And
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if defined(__x86_64__)
_InterlockedOr((long *)&var->value, (long)val);
+#else
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock or [ebx]Atomic_uint32.value, eax
+#endif
#else
#error No compiler defined for Atomic_Or
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if defined(__x86_64__)
_InterlockedXor((long *)&var->value, (long)val);
+#else
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock xor [ebx]Atomic_uint32.value, eax
+#endif
#else
#error No compiler defined for Atomic_Xor
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
_InterlockedExchangeAdd((long *)&var->value, (long)val);
+#else
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock add [ebx]Atomic_uint32.value, eax
+#endif
#else
#error No compiler defined for Atomic_Add
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
_InterlockedExchangeAdd((long *)&var->value, (long)-val);
+#else
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock sub [ebx]Atomic_uint32.value, eax
+#endif
#else
#error No compiler defined for Atomic_Sub
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
_InterlockedIncrement((long *)&var->value);
+#else
+ __asm mov ebx, var
+ __asm lock inc [ebx]Atomic_uint32.value
+#endif
#else
#error No compiler defined for Atomic_Inc
#endif
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
_InterlockedDecrement((long *)&var->value);
+#else
+ __asm mov ebx, var
+ __asm lock dec [ebx]Atomic_uint32.value
+#endif
#else
#error No compiler defined for Atomic_Dec
#endif
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
+#if _MSC_VER >= 1310
return _InterlockedExchangeAdd((long *)&var->value, (long)val);
#else
+#pragma warning(push)
+#pragma warning(disable : 4035) // disable no-return warning
+ {
+ __asm mov eax, val
+ __asm mov ebx, var
+ __asm lock xadd [ebx]Atomic_uint32.value, eax
+ }
+#pragma warning(pop)
+#endif
+#else
#error No compiler defined for Atomic_FetchAndAdd
#endif
}
return equal;
#endif //VM_ARM_V7
#elif defined _MSC_VER
+#if defined(__x86_64__)
return (__int64)*oldVal == _InterlockedCompareExchange64((__int64 *)&var->value,
(__int64)*newVal,
(__int64)*oldVal);
#else
+#pragma warning(push)
+#pragma warning(disable : 4035) // disable no-return warning
+ {
+ __asm mov esi, var
+ __asm mov edx, oldVal
+ __asm mov ecx, newVal
+ __asm mov eax, [edx]S_uint64.lowValue
+ __asm mov edx, [edx]S_uint64.highValue
+ __asm mov ebx, [ecx]S_uint64.lowValue
+ __asm mov ecx, [ecx]S_uint64.highValue
+ __asm lock cmpxchg8b [esi]
+ __asm sete al
+ __asm movzx eax, al
+ // eax is the return value, this is documented to work - edward
+ }
+#pragma warning(pop)
+#endif
+#else
#error No compiler defined for Atomic_CMPXCHG64
#endif // !GNUC
}
*/
return var->value;
#elif defined (_MSC_VER) && defined(__i386__)
-// _InterlockedCompareExchange64 is not optimal here, see the comment for gcc.
# pragma warning(push)
# pragma warning(disable : 4035) // disable no-return warning
{
);
AtomicEpilogue();
#elif defined _MSC_VER
- // It also works as a memory barrier for MSVC (_ReadWriteBarrier).
- _interlockedbittestandset64((__int64*)&var->value, (__int64)bit);
+ uint64 oldVal;
+ uint64 newVal;
+ do {
+ oldVal = var->value;
+ newVal = oldVal | (CONST64U(1) << bit);
+ } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#else
#error No compiler defined for Atomic_SetBit64
#endif
);
AtomicEpilogue();
#elif defined _MSC_VER
- // It also works as a memory barrier for MSVC (_ReadWriteBarrier).
- _interlockedbittestandreset64((__int64*) &var->value, (__int64)bit);
+ uint64 oldVal;
+ uint64 newVal;
+ do {
+ oldVal = var->value;
+ newVal = oldVal & ~(CONST64U(1) << bit);
+ } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#else
#error No compiler defined for Atomic_ClearBit64
#endif
);
return out;
#elif defined _MSC_VER
- return _bittest64((__int64*)&var->value, (__int64)bit);
+ return (var->value & (CONST64U(1) << bit)) != 0;
#else
#error No compiler defined for Atomic_TestBit64
#endif
#include "vm_basic_asm_x86.h"
#endif
+/*
+ * x86-64 windows doesn't support inline asm so we have to use these
+ * intrinsic functions defined in the compiler. Not all of these are well
+ * documented. There is an array in the compiler dll (c1.dll) which has
+ * an array of the names of all the intrinsics minus the leading
+ * underscore. Searching around in the ntddk.h file can also be helpful.
+ *
+ * The declarations for the intrinsic functions were taken from the DDK.
+ * Our declarations must match the ddk's otherwise the 64-bit c++ compiler
+ * will complain about second linkage of the intrinsic functions.
+ * We define the intrinsic using the basic types corresponding to the
+ * Windows typedefs. This avoids having to include windows header files
+ * to get to the windows types.
+ */
+#ifdef _MSC_VER
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * It seems x86 & x86-64 windows still implements these intrinsic
+ * functions. The documentation for the x86-64 suggest the
+ * __inbyte/__outbyte intrinsics even though the _in/_out work fine and
+ * __inbyte/__outbyte aren't supported on x86.
+ */
+int _inp(unsigned short);
+unsigned short _inpw(unsigned short);
+unsigned long _inpd(unsigned short);
+
+int _outp(unsigned short, int);
+unsigned short _outpw(unsigned short, unsigned short);
+unsigned long _outpd(uint16, unsigned long);
+#pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpw, _outpd)
+
+/*
+ * Prevents compiler from re-ordering reads, writes and reads&writes.
+ * These functions do not add any instructions thus only affect
+ * the compiler ordering.
+ *
+ * See:
+ * `Lockless Programming Considerations for Xbox 360 and Microsoft Windows'
+ * http://msdn.microsoft.com/en-us/library/bb310595(VS.85).aspx
+ */
+void _ReadBarrier(void);
+void _WriteBarrier(void);
+void _ReadWriteBarrier(void);
+#pragma intrinsic(_ReadBarrier, _WriteBarrier, _ReadWriteBarrier)
+
+void _mm_mfence(void);
+void _mm_lfence(void);
+#pragma intrinsic(_mm_mfence, _mm_lfence)
+
+#ifdef VM_X86_64
+/*
+ * intrinsic functions only supported by x86-64 windows as of 2k3sp1
+ */
+unsigned __int64 __rdtsc(void);
+void __stosw(unsigned short *, unsigned short, size_t);
+void __stosd(unsigned long *, unsigned long, size_t);
+void _mm_pause(void);
+#pragma intrinsic(__rdtsc, __stosw, __stosd, _mm_pause)
+
+unsigned char _BitScanForward64(unsigned long *, unsigned __int64);
+unsigned char _BitScanReverse64(unsigned long *, unsigned __int64);
+#pragma intrinsic(_BitScanForward64, _BitScanReverse64)
+#endif /* VM_X86_64 */
+
+unsigned char _BitScanForward(unsigned long *, unsigned long);
+unsigned char _BitScanReverse(unsigned long *, unsigned long);
+#pragma intrinsic(_BitScanForward, _BitScanReverse)
+
+unsigned char _bittestandset(long *, long);
+unsigned char _bittestandreset(long *, long);
+#pragma intrinsic(_bittestandset, _bittestandreset)
+#ifdef VM_X86_64
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned char _bittestandreset64(__int64 *, __int64);
+#pragma intrinsic(_bittestandset64, _bittestandreset64)
+#endif /* VM_X86_64 */
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MSC_VER */
+
#ifdef __GNUC__ // {
#if defined(__i386__) || defined(__x86_64__) // Only on x86*
#define OUTW(port, val) __GCC_OUT(w, w, port, val)
#define OUT32(port, val) __GCC_OUT(l, , port, val)
+#define GET_CURRENT_EIP(_eip) \
+ __asm__ __volatile("call 0\n\tpopl %0" : "=r" (_eip): );
+
#endif // x86*
#elif defined(_MSC_VER) // } {
static INLINE uint8
INB(uint16 port)
{
- return __inbyte(port);
+ return (uint8)_inp(port);
}
static INLINE void
OUTB(uint16 port, uint8 value)
{
- __outbyte(port, value);
+ _outp(port, value);
}
static INLINE uint16
INW(uint16 port)
{
- return __inword(port);
+ return _inpw(port);
}
static INLINE void
OUTW(uint16 port, uint16 value)
{
- __outword(port, value);
+ _outpw(port, value);
}
static INLINE uint32
IN32(uint16 port)
{
- return __indword(port);
+ return _inpd(port);
}
static INLINE void
OUT32(uint16 port, uint32 value)
{
- __outdword(port, value);
+ _outpd(port, value);
}
+#ifndef VM_X86_64
+#ifdef NEAR
+#undef NEAR
+#endif
+
+#define GET_CURRENT_EIP(_eip) do { \
+ __asm call NEAR PTR $+5 \
+ __asm pop eax \
+ __asm mov _eip, eax \
+} while (0)
+#endif // VM_X86_64
+
#else // } {
#error
#endif // }
static INLINE void *
uint16set(void *dst, uint16 val, size_t count)
{
+#ifdef VM_X86_64
__stosw((uint16*)dst, val, count);
+#else
+ __asm { pushf;
+ mov ax, val;
+ mov ecx, count;
+ mov edi, dst;
+ cld;
+ rep stosw;
+ popf;
+ }
+#endif
return dst;
}
static INLINE void *
uint32set(void *dst, uint32 val, size_t count)
{
+#ifdef VM_X86_64
__stosd((unsigned long*)dst, (unsigned long)val, count);
+#else
+ __asm { pushf;
+ mov eax, val;
+ mov ecx, count;
+ mov edi, dst;
+ cld;
+ rep stosd;
+ popf;
+ }
+#endif
return dst;
}
#endif
}
#elif defined(_MSC_VER)
+#ifdef VM_X86_64
{
_mm_pause();
}
+#else /* VM_X86_64 */
+#pragma warning( disable : 4035)
+{
+ __asm _emit 0xf3 __asm _emit 0x90
+}
+#pragma warning (default: 4035)
+#endif /* VM_X86_64 */
#else /* __GNUC__ */
#error No compiler defined for PAUSE
#endif
#endif
}
#elif defined(_MSC_VER)
+#ifdef VM_X86_64
{
return __rdtsc();
}
+#else
+#pragma warning( disable : 4035)
+{
+ __asm _emit 0x0f __asm _emit 0x31
+}
+#pragma warning (default: 4035)
+#endif /* VM_X86_64 */
#else /* __GNUC__ */
#error No compiler defined for RDTSC
#endif /* __GNUC__ */
#error "This file is x86-64 only!"
#endif
+#ifdef _MSC_VER
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+uint64 _umul128(uint64 multiplier, uint64 multiplicand,
+ uint64 *highProduct);
+int64 _mul128(int64 multiplier, int64 multiplicand,
+ int64 *highProduct);
+uint64 __shiftright128(uint64 lowPart, uint64 highPart, uint8 shift);
+#ifdef __cplusplus
+}
+#endif
+
+#pragma intrinsic(_umul128, _mul128, __shiftright128)
+
+#endif // _MSC_VER
+
/*
* GET_CURRENT_RIP
*
#endif
-// Intrinsic funtions. WDK/DDK has its own pragmas for intrinsic functions.
-#if defined(_MSC_VER) && !defined (WINNT_DDK)
-#ifndef _IVEC_H_INCLUDED
-#define _IVEC_H_INCLUDED
-#define _IVEC_H_INCLUDED_2
-#endif
-
-#ifndef _FVEC_H_INCLUDED
-#define _FVEC_H_INCLUDED
-#define _FVEC_H_INCLUDED_2
-#endif
-
-#ifndef _DVEC_H_INCLUDED
-#define _DVEC_H_INCLUDED
-#define _DVEC_H_INCLUDED_2
-#endif
-
-#include <intrin.h>
-
-#ifdef _IVEC_H_INCLUDED_2
-#undef _IVEC_H_INCLUDED
-#undef _IVEC_H_INCLUDED_2
-#endif
-
-#ifdef _FVEC_H_INCLUDED_2
-#undef _FVEC_H_INCLUDED
-#undef _FVEC_H_INCLUDED_2
-#endif
-
-#ifdef _DVEC_H_INCLUDED_2
-#undef _DVEC_H_INCLUDED
-#undef _DVEC_H_INCLUDED_2
-#endif
-#endif /* _MSC_VER */
-
-
#if defined(__APPLE__) || defined(HAVE_STDINT_H)
/*
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
-#include "vm_basic_types.h"
#include "vm_basic_asm.h"
#include "x86cpuid.h"
+/*
+ * x86-64 windows doesn't support inline asm so we have to use these
+ * intrinsic functions defined in the compiler. Not all of these are well
+ * documented. There is an array in the compiler dll (c1.dll) which has
+ * an array of the names of all the intrinsics minus the leading
+ * underscore. Searching around in the ntddk.h file can also be helpful.
+ *
+ * The declarations for the intrinsic functions were taken from the DDK.
+ * Our declarations must match the ddk's otherwise the 64-bit c++ compiler
+ * will complain about second linkage of the intrinsic functions.
+ * We define the intrinsic using the basic types corresponding to the
+ * Windows typedefs. This avoids having to include windows header files
+ * to get to the windows types.
+ */
+#ifdef _MSC_VER
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef VM_X86_64
+/*
+ * intrinsic functions only supported by x86-64 windows as of 2k3sp1
+ */
+void __cpuid(unsigned int*, unsigned int);
+#pragma intrinsic(__cpuid)
+#endif /* VM_X86_64 */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MSC_VER */
+
+
#ifdef __GNUC__ // {
/*
static INLINE void
__GET_CPUID(int input, CPUIDRegs *regs)
{
- __cpuid((int *)regs, input);
+#ifdef VM_X86_64
+ __cpuid((unsigned int *)regs, input);
+#else
+ __asm push esi
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, input
+ __asm mov esi, regs
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov 0x0[esi], eax
+ __asm mov 0x4[esi], ebx
+ __asm mov 0x8[esi], ecx
+ __asm mov 0xC[esi], edx
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+ __asm pop esi
+#endif
}
+#ifdef VM_X86_64
+
+/*
+ * No inline assembly in Win64. Implemented in bora/lib/misc in
+ * cpuidMasm64.asm.
+ */
+
+extern void
+__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs);
+
+#else // VM_X86_64
+
static INLINE void
__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs)
{
- __cpuidex((int *)regs, inputEax, inputEcx);
+ __asm push esi
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, inputEax
+ __asm mov ecx, inputEcx
+ __asm mov esi, regs
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov 0x0[esi], eax
+ __asm mov 0x4[esi], ebx
+ __asm mov 0x8[esi], ecx
+ __asm mov 0xC[esi], edx
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+ __asm pop esi
}
+#endif
static INLINE uint32
__GET_EAX_FROM_CPUID(int input)
{
+#ifdef VM_X86_64
CPUIDRegs regs;
- __cpuid((int *)®s, input);
+ __cpuid((unsigned int *)®s, input);
return regs.eax;
+#else
+ uint32 output;
+
+ //NOT_TESTED();
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, input
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov output, eax
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+
+ return output;
+#endif
}
static INLINE uint32
__GET_EBX_FROM_CPUID(int input)
{
+#ifdef VM_X86_64
CPUIDRegs regs;
- __cpuid((int *)®s, input);
+ __cpuid((unsigned int *)®s, input);
return regs.ebx;
+#else
+ uint32 output;
+
+ //NOT_TESTED();
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, input
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov output, ebx
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+
+ return output;
+#endif
}
static INLINE uint32
__GET_ECX_FROM_CPUID(int input)
{
+#ifdef VM_X86_64
CPUIDRegs regs;
- __cpuid((int *)®s, input);
+ __cpuid((unsigned int *)®s, input);
return regs.ecx;
+#else
+ uint32 output;
+
+ //NOT_TESTED();
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, input
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov output, ecx
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+
+ return output;
+#endif
}
static INLINE uint32
__GET_EDX_FROM_CPUID(int input)
{
+#ifdef VM_X86_64
CPUIDRegs regs;
- __cpuid(( int *)®s, input);
+ __cpuid((unsigned int *)®s, input);
return regs.edx;
+#else
+ uint32 output;
+
+ //NOT_TESTED();
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, input
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov output, edx
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+
+ return output;
+#endif
}
+#ifdef VM_X86_64
+
+/*
+ * No inline assembly in Win64. Implemented in bora/lib/misc in
+ * cpuidMasm64.asm.
+ */
+
+extern uint32
+__GET_EAX_FROM_CPUID4(int inputEcx);
+
+#else // VM_X86_64
+
static INLINE uint32
__GET_EAX_FROM_CPUID4(int inputEcx)
{
- CPUIDRegs regs;
- __GET_CPUID2(4, inputEcx, ®s);
- return regs.eax;
+ uint32 output;
+
+ //NOT_TESTED();
+ __asm push ebx
+ __asm push ecx
+ __asm push edx
+
+ __asm mov eax, 4
+ __asm mov ecx, inputEcx
+ __asm _emit 0x0f __asm _emit 0xa2
+ __asm mov output, eax
+
+ __asm pop edx
+ __asm pop ecx
+ __asm pop ebx
+
+ return output;
}
+#endif // VM_X86_64
+
#else // }
#error
#endif