/*
* vm_basic_asm_arm64.h --
*
- * Basic assembler macros for the AArch64 architecture.
+ * Basic assembler macros for the AArch64 architecture.
*/
#ifndef _VM_BASIC_ASM_ARM64_H_
/*
- * GET_CURRENT_PC
+ * _GET_CURRENT_PC --
+ * GET_CURRENT_PC --
*
* Returns the current program counter. In the example below:
*
* the return value from GET_CURRENT_PC will point a debugger to L123.
*/
-static INLINE void *
+#define _GET_CURRENT_PC(pc) \
+ asm volatile("1: adr %0, 1b" : "=r" (pc))
+
+static INLINE_ALWAYS void *
GET_CURRENT_PC(void)
{
- uint64 pc;
-
- asm volatile("1: adr %0, 1b" : "=r" (pc) ::);
+ void *pc;
- return (void *)pc;
+ _GET_CURRENT_PC(pc);
+ return pc;
}
/*
- * GET_CURRENT_LOCATION
+ * GET_CURRENT_LOCATION --
*
- * Updates the arguments with the values of the %pc, %fp, %sp and %lr
+ * Updates the arguments with the values of the pc, x29, sp and x30
* registers at the current code location where the macro is invoked.
*/
-#define GET_CURRENT_LOCATION(pc, fp, sp, lr) \
- asm("1: adr %0, 1b \n\t" \
- "mov %1, x29\n\t" \
- "mov %2, sp \n\t" \
- "mov %3, x30\n\t" \
- : "=r" (pc), "=r" (fp), \
- "=r" (sp), "=r" (lr))
+#define GET_CURRENT_LOCATION(pc, fp, sp, lr) do { \
+ _GET_CURRENT_PC(pc); \
+ asm volatile("mov %0, x29" "\n\t" \
+ "mov %1, sp" "\n\t" \
+ "mov %2, x30" \
+ : "=r" (fp), "=r" (sp), "=r" (lr)); \
+} while (0)
/*
*
* MRS --
*
- * Get system register value.
+ * Get the value of system register 'name'.
*
* Results:
- * Value of system register x.
+ * The value.
*
* Side effects:
- * None
+ * Depends on 'name'.
*
*----------------------------------------------------------------------
*/
-#define MRS(x) ({ \
- uint64 _val; \
- asm volatile("mrs %0, " XSTR(x) : "=r" (_val) :: "memory"); \
- (_val); \
+#define MRS(name) ({ \
+ uint64 val; \
+ asm volatile("mrs %0, " XSTR(name) : "=r" (val) :: "memory"); \
+ val; \
})
*----------------------------------------------------------------------
*
* MSR --
+ * MSR_IMMED --
*
- * Set system register value.
+ * Set the value of system register 'name'.
*
* Results:
- * None
+ * None.
*
* Side effects:
- * Per-register side-effects.
+ * Depends on 'name'.
*
*----------------------------------------------------------------------
*/
-#define MSR(x, _val) \
- asm volatile("msr " XSTR(x) ", %0" :: "r" (_val) : "memory")
+#define MSR(name, val) \
+ asm volatile("msr " XSTR(name) ", %0" :: "r" (val) : "memory")
-#define MSR_IMMED(x, _val) \
- asm volatile("msr " XSTR(x) ", %0" :: "i" (_val) : "memory")
+#define MSR_IMMED(name, val) \
+ asm volatile("msr " XSTR(name) ", %0" :: "i" (val) : "memory")
/*
*
* Unsigned integer by fixed point multiplication, with rounding:
* result = floor(multiplicand * multiplier * 2**(-shift) + 0.5)
- *
+ *
* Unsigned 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* (multiplier, shift), where shift < 64.
{
uint64 resLow, resHi;
- asm volatile(
- "mul %x0, %x2, %x3 \n\t"
- "umulh %x1, %x2, %x3 \n\t"
- : "=&r" (resLow), "=&r" (resHi)
- : "r" (multiplicand), "r" (multiplier)
- :);
+ asm("mul %0, %2, %3" "\n\t"
+ "umulh %1, %2, %3"
+ : "=&r" (resLow), "=r" (resHi)
+ : "r" (multiplicand), "r" (multiplier));
if (shift == 0) {
return resLow;
*
* Signed integer by fixed point multiplication, with rounding:
* result = floor(multiplicand * multiplier * 2**(-shift) + 0.5)
- *
+ *
* Signed 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* (multiplier, shift), where shift < 64.
{
uint64 resLow, resHi;
- asm volatile(
- "mul %x0, %x2, %x3 \n\t"
- "smulh %x1, %x2, %x3 \n\t"
- : "=&r" (resLow), "=&r" (resHi)
- : "r" (multiplicand), "r" (multiplier)
- :);
+ asm("mul %0, %2, %3" "\n\t"
+ "smulh %1, %2, %3"
+ : "=&r" (resLow), "=r" (resHi)
+ : "r" (multiplicand), "r" (multiplier));
if (shift == 0) {
return resLow;
*
* Unsigned integer by fixed point multiplication, with rounding:
* result = floor(multiplicand * multiplier * 2**(-shift) + 0.5)
- *
+ *
* Unsigned 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* (multiplier, shift), where shift < 64.
*
* Signed integer by fixed point multiplication, with rounding:
* result = floor(multiplicand * multiplier * 2**(-shift) + 0.5)
- *
+ *
* Signed 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* (multiplier, shift), where shift < 64.
/*
* vm_basic_asm_x86_64.h
*
- * Basic x86_64 asm macros.
+ * Basic x86_64 asm macros.
*/
#ifndef _VM_BASIC_ASM_X86_64_H_
#if defined(__GNUC__)
/*
- * GET_CURRENT_PC
+ * _GET_CURRENT_PC --
+ * GET_CURRENT_PC --
*
* Returns the current program counter (i.e. instruction pointer i.e. rip
* register on x86_64). In the example below:
*
* the return value from GET_CURRENT_PC will point a debugger to L123.
*/
-#define GET_CURRENT_PC() ({ \
- void *__rip; \
- asm("lea 0(%%rip), %0;\n\t" \
- : "=r" (__rip)); \
- __rip; \
-})
+
+#define _GET_CURRENT_PC(rip) \
+ asm volatile("lea 0(%%rip), %0" : "=r" (rip))
+
+static INLINE_ALWAYS void *
+GET_CURRENT_PC(void)
+{
+ void *rip;
+
+ _GET_CURRENT_PC(rip);
+ return rip;
+}
/*
- * GET_CURRENT_LOCATION
+ * GET_CURRENT_LOCATION --
*
* Updates the arguments with the values of the %rip, %rbp, and %rsp
- * registers at the current code location where the macro is invoked,
- * and the return address.
+ * registers and the return address at the current code location where
+ * the macro is invoked.
*/
-#define GET_CURRENT_LOCATION(rip, rbp, rsp, retAddr) do { \
- asm("lea 0(%%rip), %0\n" \
- "mov %%rbp, %1\n" \
- "mov %%rsp, %2\n" \
- : "=r" (rip), "=r" (rbp), "=r" (rsp)); \
- retAddr = (uint64) GetReturnAddress(); \
- } while (0)
+
+#define GET_CURRENT_LOCATION(rip, rbp, rsp, retAddr) do { \
+ _GET_CURRENT_PC(rip); \
+ asm volatile("mov %%rbp, %0" "\n\t" \
+ "mov %%rsp, %1" \
+ : "=r" (rbp), "=r" (rsp)); \
+ retAddr = (uint64)GetReturnAddress(); \
+} while (0)
#endif
+
/*
* FXSAVE/FXRSTOR
* save/restore SIMD/MMX fpu state