From: John Wolfe Date: Tue, 18 Aug 2020 14:14:11 +0000 (-0700) Subject: Changes to common header files not directly applicable to open-vm-tools. X-Git-Tag: stable-11.2.0~69 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=468fcf407bd71983cca6652d71547099772f37f6;p=thirdparty%2Fopen-vm-tools.git Changes to common header files not directly applicable to open-vm-tools. --- diff --git a/open-vm-tools/lib/include/vm_basic_asm_arm64.h b/open-vm-tools/lib/include/vm_basic_asm_arm64.h index ae919a28c..369020d13 100644 --- a/open-vm-tools/lib/include/vm_basic_asm_arm64.h +++ b/open-vm-tools/lib/include/vm_basic_asm_arm64.h @@ -33,7 +33,7 @@ /* * vm_basic_asm_arm64.h -- * - * Basic assembler macros for the AArch64 architecture. + * Basic assembler macros for the AArch64 architecture. */ #ifndef _VM_BASIC_ASM_ARM64_H_ @@ -47,7 +47,8 @@ extern "C" { /* - * GET_CURRENT_PC + * _GET_CURRENT_PC -- + * GET_CURRENT_PC -- * * Returns the current program counter. In the example below: * @@ -57,30 +58,32 @@ extern "C" { * the return value from GET_CURRENT_PC will point a debugger to L123. */ -static INLINE void * +#define _GET_CURRENT_PC(pc) \ + asm volatile("1: adr %0, 1b" : "=r" (pc)) + +static INLINE_ALWAYS void * GET_CURRENT_PC(void) { - uint64 pc; - - asm volatile("1: adr %0, 1b" : "=r" (pc) ::); + void *pc; - return (void *)pc; + _GET_CURRENT_PC(pc); + return pc; } /* - * GET_CURRENT_LOCATION + * GET_CURRENT_LOCATION -- * - * Updates the arguments with the values of the %pc, %fp, %sp and %lr + * Updates the arguments with the values of the pc, x29, sp and x30 * registers at the current code location where the macro is invoked. */ -#define GET_CURRENT_LOCATION(pc, fp, sp, lr) \ - asm("1: adr %0, 1b \n\t" \ - "mov %1, x29\n\t" \ - "mov %2, sp \n\t" \ - "mov %3, x30\n\t" \ - : "=r" (pc), "=r" (fp), \ - "=r" (sp), "=r" (lr)) +#define GET_CURRENT_LOCATION(pc, fp, sp, lr) do { \ + _GET_CURRENT_PC(pc); \ + asm volatile("mov %0, x29" "\n\t" \ + "mov %1, sp" "\n\t" \ + "mov %2, x30" \ + : "=r" (fp), "=r" (sp), "=r" (lr)); \ +} while (0) /* @@ -168,21 +171,21 @@ ISB(void) * * MRS -- * - * Get system register value. + * Get the value of system register 'name'. * * Results: - * Value of system register x. + * The value. * * Side effects: - * None + * Depends on 'name'. * *---------------------------------------------------------------------- */ -#define MRS(x) ({ \ - uint64 _val; \ - asm volatile("mrs %0, " XSTR(x) : "=r" (_val) :: "memory"); \ - (_val); \ +#define MRS(name) ({ \ + uint64 val; \ + asm volatile("mrs %0, " XSTR(name) : "=r" (val) :: "memory"); \ + val; \ }) @@ -190,23 +193,24 @@ ISB(void) *---------------------------------------------------------------------- * * MSR -- + * MSR_IMMED -- * - * Set system register value. + * Set the value of system register 'name'. * * Results: - * None + * None. * * Side effects: - * Per-register side-effects. + * Depends on 'name'. * *---------------------------------------------------------------------- */ -#define MSR(x, _val) \ - asm volatile("msr " XSTR(x) ", %0" :: "r" (_val) : "memory") +#define MSR(name, val) \ + asm volatile("msr " XSTR(name) ", %0" :: "r" (val) : "memory") -#define MSR_IMMED(x, _val) \ - asm volatile("msr " XSTR(x) ", %0" :: "i" (_val) : "memory") +#define MSR_IMMED(name, val) \ + asm volatile("msr " XSTR(name) ", %0" :: "i" (val) : "memory") /* @@ -480,7 +484,7 @@ SetSPELx(VA va) * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) - * + * * Unsigned 64-bit integer multiplicand. * Unsigned 64-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. @@ -498,12 +502,10 @@ Mul64x6464(uint64 multiplicand, { uint64 resLow, resHi; - asm volatile( - "mul %x0, %x2, %x3 \n\t" - "umulh %x1, %x2, %x3 \n\t" - : "=&r" (resLow), "=&r" (resHi) - : "r" (multiplicand), "r" (multiplier) - :); + asm("mul %0, %2, %3" "\n\t" + "umulh %1, %2, %3" + : "=&r" (resLow), "=r" (resHi) + : "r" (multiplicand), "r" (multiplier)); if (shift == 0) { return resLow; @@ -522,7 +524,7 @@ Mul64x6464(uint64 multiplicand, * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) - * + * * Signed 64-bit integer multiplicand. * Unsigned 64-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. @@ -540,12 +542,10 @@ Muls64x64s64(int64 multiplicand, { uint64 resLow, resHi; - asm volatile( - "mul %x0, %x2, %x3 \n\t" - "smulh %x1, %x2, %x3 \n\t" - : "=&r" (resLow), "=&r" (resHi) - : "r" (multiplicand), "r" (multiplier) - :); + asm("mul %0, %2, %3" "\n\t" + "smulh %1, %2, %3" + : "=&r" (resLow), "=r" (resHi) + : "r" (multiplicand), "r" (multiplier)); if (shift == 0) { return resLow; @@ -564,7 +564,7 @@ Muls64x64s64(int64 multiplicand, * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) - * + * * Unsigned 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. @@ -589,7 +589,7 @@ Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift) * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) - * + * * Signed 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. diff --git a/open-vm-tools/lib/include/vm_basic_asm_x86_64.h b/open-vm-tools/lib/include/vm_basic_asm_x86_64.h index 5b171871e..5229d6b5a 100644 --- a/open-vm-tools/lib/include/vm_basic_asm_x86_64.h +++ b/open-vm-tools/lib/include/vm_basic_asm_x86_64.h @@ -33,7 +33,7 @@ /* * vm_basic_asm_x86_64.h * - * Basic x86_64 asm macros. + * Basic x86_64 asm macros. */ #ifndef _VM_BASIC_ASM_X86_64_H_ @@ -55,7 +55,8 @@ #if defined(__GNUC__) /* - * GET_CURRENT_PC + * _GET_CURRENT_PC -- + * GET_CURRENT_PC -- * * Returns the current program counter (i.e. instruction pointer i.e. rip * register on x86_64). In the example below: @@ -65,29 +66,37 @@ * * the return value from GET_CURRENT_PC will point a debugger to L123. */ -#define GET_CURRENT_PC() ({ \ - void *__rip; \ - asm("lea 0(%%rip), %0;\n\t" \ - : "=r" (__rip)); \ - __rip; \ -}) + +#define _GET_CURRENT_PC(rip) \ + asm volatile("lea 0(%%rip), %0" : "=r" (rip)) + +static INLINE_ALWAYS void * +GET_CURRENT_PC(void) +{ + void *rip; + + _GET_CURRENT_PC(rip); + return rip; +} /* - * GET_CURRENT_LOCATION + * GET_CURRENT_LOCATION -- * * Updates the arguments with the values of the %rip, %rbp, and %rsp - * registers at the current code location where the macro is invoked, - * and the return address. + * registers and the return address at the current code location where + * the macro is invoked. */ -#define GET_CURRENT_LOCATION(rip, rbp, rsp, retAddr) do { \ - asm("lea 0(%%rip), %0\n" \ - "mov %%rbp, %1\n" \ - "mov %%rsp, %2\n" \ - : "=r" (rip), "=r" (rbp), "=r" (rsp)); \ - retAddr = (uint64) GetReturnAddress(); \ - } while (0) + +#define GET_CURRENT_LOCATION(rip, rbp, rsp, retAddr) do { \ + _GET_CURRENT_PC(rip); \ + asm volatile("mov %%rbp, %0" "\n\t" \ + "mov %%rsp, %1" \ + : "=r" (rbp), "=r" (rsp)); \ + retAddr = (uint64)GetReturnAddress(); \ +} while (0) #endif + /* * FXSAVE/FXRSTOR * save/restore SIMD/MMX fpu state