]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/cpufeature: Replace X86_FEATURE_SYSENTER32 with X86_FEATURE_SYSFAST32
authorH. Peter Anvin <hpa@zytor.com>
Tue, 16 Dec 2025 21:26:03 +0000 (13:26 -0800)
committerDave Hansen <dave.hansen@linux.intel.com>
Wed, 14 Jan 2026 00:37:58 +0000 (16:37 -0800)
In most cases, the use of "fast 32-bit system call" depends either on
X86_FEATURE_SEP or X86_FEATURE_SYSENTER32 || X86_FEATURE_SYSCALL32.
However, nearly all the logic for both is identical.

Define X86_FEATURE_SYSFAST32 which indicates that *either* SYSENTER32 or
SYSCALL32 should be used, for either 32- or 64-bit kernels.  This
defaults to SYSENTER; use SYSCALL if the SYSCALL32 bit is also set.

As this removes ALL existing uses of X86_FEATURE_SYSENTER32, which is
a kernel-only synthetic feature bit, simply remove it and replace it
with X86_FEATURE_SYSFAST32.

This leaves an unused alternative for a true 32-bit kernel, but that
should really not matter in any way.

The clearing of X86_FEATURE_SYSCALL32 can be removed once the patches
for automatically clearing disabled features has been merged.

Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://patch.msgid.link/20251216212606.1325678-10-hpa@zytor.com
arch/x86/Kconfig.cpufeatures
arch/x86/entry/vdso/vdso32/system_call.S
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/zhaoxin.c
arch/x86/kernel/fred.c
arch/x86/xen/setup.c
arch/x86/xen/smp_pv.c
arch/x86/xen/xen-ops.h

index 733d5aff24568129628e797b1a992930aac58f03..423ac795baa7283473e1be6771ffee120d0397dc 100644 (file)
@@ -56,6 +56,10 @@ config X86_REQUIRED_FEATURE_MOVBE
        def_bool y
        depends on MATOM
 
+config X86_REQUIRED_FEATURE_SYSFAST32
+       def_bool y
+       depends on X86_64 && !X86_FRED
+
 config X86_REQUIRED_FEATURE_CPUID
        def_bool y
        depends on X86_64
@@ -120,6 +124,10 @@ config X86_DISABLED_FEATURE_CENTAUR_MCR
        def_bool y
        depends on X86_64
 
+config X86_DISABLED_FEATURE_SYSCALL32
+       def_bool y
+       depends on !X86_64
+
 config X86_DISABLED_FEATURE_PCID
        def_bool y
        depends on !X86_64
index 2a15634bbe7538b1de463e047e503b2352c19db7..7b1c0f16e511bfd4d01bc63dcfbb858edd958e45 100644 (file)
@@ -52,13 +52,9 @@ __kernel_vsyscall:
        #define SYSENTER_SEQUENCE       "movl %esp, %ebp; sysenter"
        #define SYSCALL_SEQUENCE        "movl %ecx, %ebp; syscall"
 
-#ifdef BUILD_VDSO32_64
        /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
-       ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
-                         SYSCALL_SEQUENCE,  X86_FEATURE_SYSCALL32
-#else
-       ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
-#endif
+       ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
+                         SYSCALL_SEQUENCE,  X86_FEATURE_SYSCALL32
 
        /* Enter using int $0x80 */
        int     $0x80
index c3b53beb130078cabecd4d43b98dac8af921f087..63b0f9aa9b3e3f6ec59725ba622c589a16b770f2 100644 (file)
@@ -84,7 +84,7 @@
 #define X86_FEATURE_PEBS               ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
 #define X86_FEATURE_BTS                        ( 3*32+13) /* "bts" Branch Trace Store */
 #define X86_FEATURE_SYSCALL32          ( 3*32+14) /* syscall in IA32 userspace */
-#define X86_FEATURE_SYSENTER32         ( 3*32+15) /* sysenter in IA32 userspace */
+#define X86_FEATURE_SYSFAST32          ( 3*32+15) /* sysenter/syscall in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* "rep_good" REP microcode works well */
 #define X86_FEATURE_AMD_LBR_V2         ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
 #define X86_FEATURE_CLEAR_CPU_BUF      ( 3*32+18) /* Clear CPU buffers using VERW */
index a3b55db35c9612151b69b2ef559d93897cabf727..9833f837141cb5c3d6a8160792f17f285470b4ce 100644 (file)
@@ -102,9 +102,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
            (c->x86 >= 7))
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 
-#ifdef CONFIG_X86_64
-       set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#endif
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
index e7ab22fce3b57cae474da3c8292a35961c4c602a..1c3261cae40c9f5e8b3b0ed6de62d396a13c5570 100644 (file)
@@ -1068,6 +1068,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
        init_scattered_cpuid_features(c);
        init_speculation_control(c);
 
+       if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
+               set_cpu_cap(c, X86_FEATURE_SYSFAST32);
+
        /*
         * Clear/Set all flags overridden by options, after probe.
         * This needs to happen each time we re-probe, which may happen
@@ -1813,6 +1816,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
         * that it can't be enabled in 32-bit mode.
         */
        setup_clear_cpu_cap(X86_FEATURE_PCID);
+
+       /*
+        * Never use SYSCALL on a 32-bit kernel
+        */
+       setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
 #endif
 
        /*
index 98ae4c37c93eccf775d5632acf122603a19918a8..646ff33c4651db45665f6e82675e759b1d5d7920 100644 (file)
@@ -236,9 +236,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                clear_cpu_cap(c, X86_FEATURE_PSE);
        }
 
-#ifdef CONFIG_X86_64
-       set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#else
+#ifndef CONFIG_X86_64
        /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
        if (c->x86 == 15 && c->x86_cache_alignment == 64)
                c->x86_cache_alignment = 128;
index 89b1c8a70fe8decc9d26e24cc182467ebe0e816c..031379b7d4faf79e845c90bbf8dbbc5fa1b8f6ea 100644 (file)
@@ -59,9 +59,7 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c)
 {
        if (c->x86 >= 0x6)
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-#ifdef CONFIG_X86_64
-       set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#endif
+
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
index 816187da3a47c27e694eb767d4557c5532b68735..e736b19e18de5fbf0e656c0b6c6ca152e672fde1 100644 (file)
@@ -68,7 +68,7 @@ void cpu_init_fred_exceptions(void)
        idt_invalidate();
 
        /* Use int $0x80 for 32-bit system calls in FRED mode */
-       setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
+       setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
        setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
 }
 
index 3823e52aef523cb307930122686afa0ed1aebb19..ac8021c3a997e0bdbe686362b7052230eae791bc 100644 (file)
@@ -990,13 +990,6 @@ static int register_callback(unsigned type, const void *func)
        return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
 }
 
-void xen_enable_sysenter(void)
-{
-       if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
-           register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
-               setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
-}
-
 void xen_enable_syscall(void)
 {
        int ret;
@@ -1008,11 +1001,27 @@ void xen_enable_syscall(void)
                   mechanism for syscalls. */
        }
 
-       if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
-           register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
+       if (!cpu_feature_enabled(X86_FEATURE_SYSFAST32))
+               return;
+
+       if (cpu_feature_enabled(X86_FEATURE_SYSCALL32)) {
+               /* Use SYSCALL32 */
+               ret = register_callback(CALLBACKTYPE_syscall32,
+                                       xen_entry_SYSCALL_compat);
+
+       } else {
+               /* Use SYSENTER32 */
+               ret = register_callback(CALLBACKTYPE_sysenter,
+                                       xen_entry_SYSENTER_compat);
+       }
+
+       if (ret) {
                setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
+               setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
+       }
 }
 
+
 static void __init xen_pvmmu_arch_setup(void)
 {
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
@@ -1022,7 +1031,6 @@ static void __init xen_pvmmu_arch_setup(void)
            register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
                BUG();
 
-       xen_enable_sysenter();
        xen_enable_syscall();
 }
 
index 9bb8ff8bff30a6d13a186609e40346203d4feb25..c40f326f0c3ad905909c2ae6f41fa8e41b5ce22f 100644 (file)
@@ -65,10 +65,9 @@ static void cpu_bringup(void)
        touch_softlockup_watchdog();
 
        /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
-       if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
-               xen_enable_sysenter();
+       if (!xen_feature(XENFEAT_supervisor_mode_kernel))
                xen_enable_syscall();
-       }
+
        cpu = smp_processor_id();
        identify_secondary_cpu(cpu);
        set_cpu_sibling_map(cpu);
index 090349baec0979ac5947d488e96a53659bcd38d1..f6c331b20fadd384e93a482ffd4ad2907bdf9393 100644 (file)
@@ -60,7 +60,6 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size);
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
 void xen_banner(void);
-void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
 void xen_vcpu_restore(void);