In most cases, the use of "fast 32-bit system call" depends either on
X86_FEATURE_SEP or X86_FEATURE_SYSENTER32 || X86_FEATURE_SYSCALL32.
However, nearly all the logic for both is identical.
Define X86_FEATURE_SYSFAST32 which indicates that *either* SYSENTER32 or
SYSCALL32 should be used, for either 32- or 64-bit kernels. This
defaults to SYSENTER; use SYSCALL if the SYSCALL32 bit is also set.
As this removes ALL existing uses of X86_FEATURE_SYSENTER32, which is
a kernel-only synthetic feature bit, simply remove it and replace it
with X86_FEATURE_SYSFAST32.
This leaves an unused alternative for a true 32-bit kernel, but that
should really not matter in any way.
The clearing of X86_FEATURE_SYSCALL32 can be removed once the patches
for automatically clearing disabled features has been merged.
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://patch.msgid.link/20251216212606.1325678-10-hpa@zytor.com
def_bool y
depends on MATOM
+config X86_REQUIRED_FEATURE_SYSFAST32
+ def_bool y
+ depends on X86_64 && !X86_FRED
+
config X86_REQUIRED_FEATURE_CPUID
def_bool y
depends on X86_64
def_bool y
depends on X86_64
+config X86_DISABLED_FEATURE_SYSCALL32
+ def_bool y
+ depends on !X86_64
+
config X86_DISABLED_FEATURE_PCID
def_bool y
depends on !X86_64
#define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
#define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
-#ifdef BUILD_VDSO32_64
/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
- ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
- SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
-#else
- ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
-#endif
+ ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
+ SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
/* Enter using int $0x80 */
int $0x80
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
+#define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
(c->x86 >= 7))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#endif
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
init_scattered_cpuid_features(c);
init_speculation_control(c);
+ if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
+ set_cpu_cap(c, X86_FEATURE_SYSFAST32);
+
/*
* Clear/Set all flags overridden by options, after probe.
* This needs to happen each time we re-probe, which may happen
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
+
+ /*
+ * Never use SYSCALL on a 32-bit kernel
+ */
+ setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
#endif
/*
clear_cpu_cap(c, X86_FEATURE_PSE);
}
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#else
+#ifndef CONFIG_X86_64
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15 && c->x86_cache_alignment == 64)
c->x86_cache_alignment = 128;
{
if (c->x86 >= 0x6)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#endif
+
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
idt_invalidate();
/* Use int $0x80 for 32-bit system calls in FRED mode */
- setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
+ setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
}
return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
}
-void xen_enable_sysenter(void)
-{
- if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
- register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
- setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
-}
-
void xen_enable_syscall(void)
{
int ret;
mechanism for syscalls. */
}
- if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
- register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
+ if (!cpu_feature_enabled(X86_FEATURE_SYSFAST32))
+ return;
+
+ if (cpu_feature_enabled(X86_FEATURE_SYSCALL32)) {
+ /* Use SYSCALL32 */
+ ret = register_callback(CALLBACKTYPE_syscall32,
+ xen_entry_SYSCALL_compat);
+
+ } else {
+ /* Use SYSENTER32 */
+ ret = register_callback(CALLBACKTYPE_sysenter,
+ xen_entry_SYSENTER_compat);
+ }
+
+ if (ret) {
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
+ setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
+ }
}
+
static void __init xen_pvmmu_arch_setup(void)
{
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
BUG();
- xen_enable_sysenter();
xen_enable_syscall();
}
touch_softlockup_watchdog();
/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
- if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
- xen_enable_sysenter();
+ if (!xen_feature(XENFEAT_supervisor_mode_kernel))
xen_enable_syscall();
- }
+
cpu = smp_processor_id();
identify_secondary_cpu(cpu);
set_cpu_sibling_map(cpu);
char * __init xen_memory_setup(void);
void __init xen_arch_setup(void);
void xen_banner(void);
-void xen_enable_sysenter(void);
void xen_enable_syscall(void);
void xen_vcpu_restore(void);