ALIGN
__kernel_vsyscall:
CFI_STARTPROC
+
+ /*
+ * If using int $0x80, there is no reason to muck about with the
+ * stack here. Unfortunately just overwriting the push instructions
+ * would mess up the CFI annotations, but it is only a 3-byte
+ * NOP in that case. This could be avoided by patching the
+ * vdso symbol table (not the code) and entry point, but that
+ * would a fair bit of tooling work or by simply compiling
+ * two different vDSO images, but that doesn't seem worth it.
+ */
+ ALTERNATIVE "int $0x80; ret", "", X86_FEATURE_SYSFAST32
+
/*
* Reshuffle regs so that all of any of the entry instructions
* will preserve enough state.
#define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
#define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
- /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
- ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
- SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
+ ALTERNATIVE SYSENTER_SEQUENCE, SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
- /* Enter using int $0x80 */
+ /* Re-enter using int $0x80 */
int $0x80
SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
# define __sys_reg4 "r10"
# define __sys_reg5 "r8"
#else
-# define __sys_instr "call __kernel_vsyscall"
+# define __sys_instr ALTERNATIVE("ds;ds;ds;int $0x80", \
+ "call __kernel_vsyscall", \
+ X86_FEATURE_SYSFAST32)
# define __sys_clobber "memory"
# define __sys_nr(x,y) __NR_ ## x ## y
# define __sys_reg1 "ebx"