]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
s390: Add stackprotector support
authorHeiko Carstens <hca@linux.ibm.com>
Mon, 17 Nov 2025 14:09:53 +0000 (15:09 +0100)
committerHeiko Carstens <hca@linux.ibm.com>
Mon, 24 Nov 2025 10:45:21 +0000 (11:45 +0100)
Stackprotector support was previously unavailable on s390 because by
default compilers generate code which is not suitable for the kernel:
the canary value is accessed via thread local storage, where the address
of thread local storage is within access registers 0 and 1.

Using those registers also for the kernel would come with a significant
performance impact and more complicated kernel entry/exit code, since
access registers contents would have to be exchanged on every kernel entry
and exit.

With the upcoming gcc 16 release new compiler options will become available
which allow to generate code suitable for the kernel. [1]

Compiler option -mstack-protector-guard=global instructs gcc to generate
stackprotector code that refers to a global stackprotector canary value via
symbol __stack_chk_guard. Access to this value is guaranteed to occur via
larl and lgrl instructions.

Furthermore, compiler option -mstack-protector-guard-record generates a
section containing all code addresses that reference the canary value.

To allow for per task canary values the instructions which load the address
of __stack_chk_guard are patched so they access a lowcore field instead: a
per task canary value is available within the task_struct of each task, and
is written to the per-cpu lowcore location on each context switch.

Also add sanity checks and debugging option to be consistent with other
kernel code patching mechanisms.

Full debugging output can be enabled with the following kernel command line
options:

debug_stackprotector
bootdebug
ignore_loglevel
earlyprintk
dyndbg="file stackprotector.c +p"

Example debug output:

stackprot: 0000021e402d4edac010005a9ae3 -> c01f00070240

where "<insn address>: <old insn> -> <new insn>".

[1] gcc commit 0cd1f03939d5 ("s390: Support global stack protector")

Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
18 files changed:
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/ipl_parm.c
arch/s390/boot/stackprotector.c [new file with mode: 0644]
arch/s390/boot/startup.c
arch/s390/include/asm/arch-stackprotector.h [new file with mode: 0644]
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/stackprotector.h [new file with mode: 0644]
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/module.c
arch/s390/kernel/smp.c
arch/s390/kernel/stackprotector.c [new file with mode: 0644]
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vmlinux.lds.S

index cb143bf782f8b85211760f299ac29b0dba8e75fa..d83501f829f1507eb2b31d5b724879899a9107a9 100644 (file)
@@ -69,6 +69,9 @@ config CC_HAS_ASM_AOR_FORMAT_FLAGS
          Clang versions before 19.1.0 do not support A,
          O, and R inline assembly format flags.
 
+config CC_HAS_STACKPROTECTOR_GLOBAL
+       def_bool $(cc-option, -mstack-protector-guard=global -mstack-protector-guard-record)
+
 config S390
        def_bool y
        #
@@ -245,6 +248,7 @@ config S390
        select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_SETUP_PER_CPU_AREA
        select HAVE_SOFTIRQ_ON_OWN_STACK
+       select HAVE_STACKPROTECTOR if CC_HAS_STACKPROTECTOR_GLOBAL
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
        select HAVE_VIRT_CPU_ACCOUNTING_IDLE
index bf53e7d1487a5f3f92b2d432891715de62308cc0..f0a670dcce71c885f6742781b3ad537281ae28d4 100644 (file)
@@ -89,6 +89,10 @@ ifdef CONFIG_EXPOLINE
   aflags-y += -DCC_USING_EXPOLINE
 endif
 
+ifeq ($(CONFIG_STACKPROTECTOR),y)
+  KBUILD_CFLAGS += -mstack-protector-guard=global -mstack-protector-guard-record
+endif
+
 ifdef CONFIG_FUNCTION_TRACER
   ifeq ($(call cc-option,-mfentry -mnop-mcount),)
     # make use of hotpatch feature if the compiler supports it
index 02f2cf08274876ebcb4cf99edd579413506a0b75..490167faba7a4312ae1c2eca12c75dfdea221bd5 100644 (file)
@@ -32,6 +32,7 @@ obj-$(CONFIG_RANDOMIZE_BASE)  += kaslr.o
 obj-y  += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
 obj-$(CONFIG_KMSAN) += kmsan.o
+obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o
 obj-all := $(obj-y) piggy.o syms.o
 
 targets        := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
index 37d5b097ede5f55256cd234cc4d50c4fdcc816b1..61a205b489fb093545c906a623d5a31a87b214d3 100644 (file)
@@ -28,6 +28,10 @@ struct vmlinux_info {
        unsigned long invalid_pg_dir_off;
        unsigned long alt_instructions;
        unsigned long alt_instructions_end;
+#ifdef CONFIG_STACKPROTECTOR
+       unsigned long stack_prot_start;
+       unsigned long stack_prot_end;
+#endif
 #ifdef CONFIG_KASAN
        unsigned long kasan_early_shadow_page_off;
        unsigned long kasan_early_shadow_pte_off;
index f584d7da29cb204fb88de629be92d5aa822a866e..6bc950b92be7666c7e50e6dfbf9b8cfbd22ada97 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/pgtable.h>
+#include <asm/arch-stackprotector.h>
 #include <asm/abs_lowcore.h>
 #include <asm/page-states.h>
 #include <asm/machine.h>
@@ -294,6 +295,11 @@ void parse_boot_command_line(void)
                                cmma_flag = 0;
                }
 
+#ifdef CONFIG_STACKPROTECTOR
+               if (!strcmp(param, "debug_stackprotector"))
+                       stack_protector_debug = 1;
+#endif
+
 #if IS_ENABLED(CONFIG_KVM)
                if (!strcmp(param, "prot_virt")) {
                        rc = kstrtobool(val, &enabled);
diff --git a/arch/s390/boot/stackprotector.c b/arch/s390/boot/stackprotector.c
new file mode 100644 (file)
index 0000000..6849494
--- /dev/null
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define boot_fmt(fmt)  "stackprot: " fmt
+
+#include "boot.h"
+#include "../kernel/stackprotector.c"
index 3fbd25b9498f35224e4586ee3dcc955138689d04..f77067dfc2a847e6853e614dbac842ff28e29d77 100644 (file)
@@ -20,6 +20,9 @@
 #include <asm/uv.h>
 #include <asm/abs_lowcore.h>
 #include <asm/physmem_info.h>
+#include <asm/stacktrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch-stackprotector.h>
 #include "decompressor.h"
 #include "boot.h"
 #include "uv.h"
@@ -477,6 +480,10 @@ static void kaslr_adjust_vmlinux_info(long offset)
        vmlinux.invalid_pg_dir_off += offset;
        vmlinux.alt_instructions += offset;
        vmlinux.alt_instructions_end += offset;
+#ifdef CONFIG_STACKPROTECTOR
+       vmlinux.stack_prot_start += offset;
+       vmlinux.stack_prot_end += offset;
+#endif
 #ifdef CONFIG_KASAN
        vmlinux.kasan_early_shadow_page_off += offset;
        vmlinux.kasan_early_shadow_pte_off += offset;
@@ -622,6 +629,7 @@ void startup_kernel(void)
        __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
                             (struct alt_instr *)_vmlinux_info.alt_instructions_end,
                             ALT_CTX_EARLY);
+       stack_protector_apply_early(text_lma);
 
        /*
         * Save KASLR offset for early dumps, before vmcore_info is set.
diff --git a/arch/s390/include/asm/arch-stackprotector.h b/arch/s390/include/asm/arch-stackprotector.h
new file mode 100644 (file)
index 0000000..9536272
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_ARCH_STACKPROTECTOR_H
+#define _ASM_S390_ARCH_STACKPROTECTOR_H
+
+extern unsigned long __stack_chk_guard;
+extern int stack_protector_debug;
+
+void __stack_protector_apply_early(unsigned long kernel_start);
+int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start);
+
+static inline void stack_protector_apply_early(unsigned long kernel_start)
+{
+       if (IS_ENABLED(CONFIG_STACKPROTECTOR))
+               __stack_protector_apply_early(kernel_start);
+}
+
+static inline int stack_protector_apply(unsigned long *start, unsigned long *end)
+{
+       if (IS_ENABLED(CONFIG_STACKPROTECTOR))
+               return __stack_protector_apply(start, end, 0);
+       return 0;
+}
+
+#endif /* _ASM_S390_ARCH_STACKPROTECTOR_H */
index d9c853db9a40b56ac01d36b5107b21e4a638dd24..50ffe75adeb477947357b90d22a809d703e76cbb 100644 (file)
@@ -100,7 +100,8 @@ struct lowcore {
 
        /* Save areas. */
        __u64   save_area[8];                   /* 0x0200 */
-       __u8    pad_0x0240[0x0280-0x0240];      /* 0x0240 */
+       __u64   stack_canary;                   /* 0x0240 */
+       __u8    pad_0x0248[0x0280-0x0248];      /* 0x0248 */
        __u64   save_area_restart[1];           /* 0x0280 */
 
        __u64   pcpu;                           /* 0x0288 */
diff --git a/arch/s390/include/asm/stackprotector.h b/arch/s390/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..0497850
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_STACKPROTECTOR_H
+#define _ASM_S390_STACKPROTECTOR_H
+
+#include <linux/sched.h>
+#include <asm/current.h>
+#include <asm/lowcore.h>
+
+static __always_inline void boot_init_stack_canary(void)
+{
+       current->stack_canary = get_random_canary();
+       get_lowcore()->stack_canary = current->stack_canary;
+}
+
+#endif /* _ASM_S390_STACKPROTECTOR_H */
index 810000355ac5bfa5394c96d139ec6d4b3344747d..ecaee29e724e6e8af5c8cbb2935030b2afb4bac3 100644 (file)
@@ -67,7 +67,7 @@ obj-$(CONFIG_KEXEC_CORE)      += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_VMCORE_INFO)      += vmcore_info.o
 obj-$(CONFIG_UPROBES)          += uprobes.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
-
+obj-$(CONFIG_STACKPROTECTOR)   += stackprotector.o
 obj-$(CONFIG_KEXEC_FILE)       += machine_kexec_file.o kexec_image.o
 obj-$(CONFIG_KEXEC_FILE)       += kexec_elf.o
 obj-$(CONFIG_CERT_STORE)       += cert_store.o
index a8915663e917faed4551276b64013ee073662cc9..cfe27f6579e33684c7a170302051a4cf5c9eabdb 100644 (file)
@@ -21,6 +21,9 @@ int main(void)
        OFFSET(__TASK_stack, task_struct, stack);
        OFFSET(__TASK_thread, task_struct, thread);
        OFFSET(__TASK_pid, task_struct, pid);
+#ifdef CONFIG_STACKPROTECTOR
+       OFFSET(__TASK_stack_canary, task_struct, stack_canary);
+#endif
        BLANK();
        /* thread struct offsets */
        OFFSET(__THREAD_ksp, thread_struct, ksp);
@@ -139,6 +142,7 @@ int main(void)
        OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
        OFFSET(__LC_LAST_BREAK, lowcore, last_break);
        /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+       OFFSET(__LC_STACK_CANARY, lowcore, stack_canary);
        OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
        OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
        OFFSET(__LC_OS_INFO, lowcore, os_info);
index 1e266c0eae2ca3f45b7d53b674e9acd5ee85e2cb..24cc33e668ea50e5ed9ca8560334925bb38f8741 100644 (file)
@@ -162,9 +162,13 @@ SYM_FUNC_START(__switch_to_asm)
        stg     %r3,__LC_CURRENT(%r13)          # store task struct of next
        stg     %r15,__LC_KERNEL_STACK(%r13)    # store end of kernel stack
        lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
-       aghi    %r3,__TASK_pid
-       mvc     __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next
+       aghik   %r4,%r3,__TASK_pid
+       mvc     __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next
        ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
+#ifdef CONFIG_STACKPROTECTOR
+       lg      %r3,__TASK_stack_canary(%r3)
+       stg     %r3,__LC_STACK_CANARY(%r13)
+#endif
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        BR_EX   %r14
 SYM_FUNC_END(__switch_to_asm)
index 54d99e811a8374a745506b12a062c71def39ddf3..9d1f8a50f5a4799edb232bddf482664eeb137624 100644 (file)
 #include <linux/bug.h>
 #include <linux/memory.h>
 #include <linux/execmem.h>
+#include <asm/arch-stackprotector.h>
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
 #include <asm/facility.h>
 #include <asm/ftrace.lds.h>
 #include <asm/set_memory.h>
 #include <asm/setup.h>
+#include <asm/asm-offsets.h>
 
 #if 0
 #define DEBUGP printk
@@ -525,6 +527,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                    (str_has_prefix(secname, ".s390_return")))
                        nospec_revert(aseg, aseg + s->sh_size);
 
+               if (IS_ENABLED(CONFIG_STACKPROTECTOR) &&
+                   (str_has_prefix(secname, "__stack_protector_loc"))) {
+                       rc = stack_protector_apply(aseg, aseg + s->sh_size);
+                       if (rc)
+                               break;
+               }
+
 #ifdef CONFIG_FUNCTION_TRACER
                if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
                        rc = module_alloc_ftrace_hotpatch_trampolines(me, s);
index 25240be74c21cb809d58d7749376c25b606843dd..b7429f30afc19e03cfaeeeb41c10da3d8d197028 100644 (file)
@@ -280,6 +280,9 @@ static void pcpu_attach_task(int cpu, struct task_struct *tsk)
        lc->hardirq_timer = tsk->thread.hardirq_timer;
        lc->softirq_timer = tsk->thread.softirq_timer;
        lc->steal_timer = 0;
+#ifdef CONFIG_STACKPROTECTOR
+       lc->stack_canary = tsk->stack_canary;
+#endif
 }
 
 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
diff --git a/arch/s390/kernel/stackprotector.c b/arch/s390/kernel/stackprotector.c
new file mode 100644 (file)
index 0000000..d4e4048
--- /dev/null
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef pr_fmt
+#define pr_fmt(fmt)    "stackprot: " fmt
+#endif
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <linux/printk.h>
+#include <asm/abs_lowcore.h>
+#include <asm/sections.h>
+#include <asm/machine.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch-stackprotector.h>
+
+#ifdef __DECOMPRESSOR
+
+#define DEBUGP         boot_debug
+#define EMERGP         boot_emerg
+#define PANIC          boot_panic
+
+#else /* __DECOMPRESSOR */
+
+#define DEBUGP         pr_debug
+#define EMERGP         pr_emerg
+#define PANIC          panic
+
+#endif /* __DECOMPRESSOR */
+
+int __bootdata_preserved(stack_protector_debug);
+
+unsigned long __stack_chk_guard;
+EXPORT_SYMBOL(__stack_chk_guard);
+
+struct insn_ril {
+       u8 opc1 : 8;
+       u8 r1   : 4;
+       u8 opc2 : 4;
+       u32 imm;
+} __packed;
+
+/*
+ * Convert a virtual instruction address to a real instruction address. The
+ * decompressor needs to patch instructions within the kernel image based on
+ * their virtual addresses, while dynamic address translation is still
+ * disabled. Therefore a translation from virtual kernel image addresses to
+ * the corresponding physical addresses is required.
+ *
+ * After dynamic address translation is enabled and when the kernel needs to
+ * patch instructions such a translation is not required since the addresses
+ * are identical.
+ */
+static struct insn_ril *vaddress_to_insn(unsigned long vaddress)
+{
+#ifdef __DECOMPRESSOR
+       return (struct insn_ril *)__kernel_pa(vaddress);
+#else
+       return (struct insn_ril *)vaddress;
+#endif
+}
+
+static unsigned long insn_to_vaddress(struct insn_ril *insn)
+{
+#ifdef __DECOMPRESSOR
+       return (unsigned long)__kernel_va(insn);
+#else
+       return (unsigned long)insn;
+#endif
+}
+
+#define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1)
+
+static void insn_ril_to_string(char *str, struct insn_ril *insn)
+{
+       u8 *ptr = (u8 *)insn;
+       int i;
+
+       for (i = 0; i < sizeof(*insn); i++)
+               hex_byte_pack(&str[2 * i], ptr[i]);
+       str[2 * i] = 0;
+}
+
+static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new)
+{
+       char ostr[INSN_RIL_STRING_SIZE];
+       char nstr[INSN_RIL_STRING_SIZE];
+
+       insn_ril_to_string(ostr, old);
+       insn_ril_to_string(nstr, new);
+       DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr);
+}
+
+static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start)
+{
+       char istr[INSN_RIL_STRING_SIZE];
+       unsigned long vaddress, offset;
+
+       /* larl */
+       if (insn->opc1 == 0xc0 && insn->opc2 == 0x0)
+               return 0;
+       /* lgrl */
+       if (insn->opc1 == 0xc4 && insn->opc2 == 0x8)
+               return 0;
+       insn_ril_to_string(istr, insn);
+       vaddress = insn_to_vaddress(insn);
+       if (__is_defined(__DECOMPRESSOR)) {
+               offset = (unsigned long)insn - kernel_start + TEXT_OFFSET;
+               EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr);
+               PANIC("Stackprotector error\n");
+       } else {
+               EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr);
+       }
+       return -EINVAL;
+}
+
+int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start)
+{
+       unsigned long canary, *loc;
+       struct insn_ril *insn, new;
+       int rc;
+
+       /*
+        * Convert LARL/LGRL instructions to LLILF so register R1 contains the
+        * address of the per-cpu / per-process stack canary:
+        *
+        * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary
+        */
+       canary = __LC_STACK_CANARY;
+       if (machine_has_relocated_lowcore())
+               canary += LOWCORE_ALT_ADDRESS;
+       for (loc = start; loc < end; loc++) {
+               insn = vaddress_to_insn(*loc);
+               rc = stack_protector_verify(insn, kernel_start);
+               if (rc)
+                       return rc;
+               new = *insn;
+               new.opc1 = 0xc0;
+               new.opc2 = 0xf;
+               new.imm = canary;
+               if (stack_protector_debug)
+                       stack_protector_dump(insn, &new);
+               s390_kernel_write(insn, &new, sizeof(*insn));
+       }
+       return 0;
+}
+
+#ifdef __DECOMPRESSOR
+void __stack_protector_apply_early(unsigned long kernel_start)
+{
+       unsigned long *start, *end;
+
+       start = (unsigned long *)vmlinux.stack_prot_start;
+       end = (unsigned long *)vmlinux.stack_prot_end;
+       __stack_protector_apply(start, end, kernel_start);
+}
+#endif
index d8f0df74280960cb351154a8a73b4f7fe83a9125..49ad8dfc7c790aa495ed57891e1bc6b61e16b59a 100644 (file)
@@ -32,6 +32,7 @@ KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_
 KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
 KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
 KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
+KBUILD_CFLAGS_64 += -fno-stack-protector
 ldflags-y := -shared -soname=linux-vdso64.so.1 \
             --hash-style=both --build-id=sha1 -T
 
index d74d4c52ccd05a255b5642359049c2990a082bbf..d5b67c99a24a38540e41a511e30c7c4b399c0cd2 100644 (file)
@@ -150,6 +150,15 @@ SECTIONS
                *(.altinstr_replacement)
        }
 
+#ifdef CONFIG_STACKPROTECTOR
+       . = ALIGN(8);
+       .stack_prot_table : {
+               __stack_prot_start = .;
+               KEEP(*(__stack_protector_loc))
+               __stack_prot_end = .;
+       }
+#endif
+
        /*
         * Table with the patch locations to undo expolines
        */
@@ -257,6 +266,10 @@ SECTIONS
                QUAD(invalid_pg_dir)
                QUAD(__alt_instructions)
                QUAD(__alt_instructions_end)
+#ifdef CONFIG_STACKPROTECTOR
+               QUAD(__stack_prot_start)
+               QUAD(__stack_prot_end)
+#endif
 #ifdef CONFIG_KASAN
                QUAD(kasan_early_shadow_page)
                QUAD(kasan_early_shadow_pte)