]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
riscv: Add usercfi state for task and save/restore of CSR_SSP on trap entry/exit
authorDeepak Gupta <debug@rivosinc.com>
Mon, 26 Jan 2026 04:09:53 +0000 (21:09 -0700)
committerPaul Walmsley <pjw@kernel.org>
Mon, 26 Jan 2026 04:09:53 +0000 (21:09 -0700)
Carve out space in the RISC-V architecture-specific thread struct for
cfi status and shadow stack in usermode.

This patch:
- defines a new structure cfi_status with status bit for cfi feature
- defines shadow stack pointer, base and size in cfi_status structure
- defines offsets to new member fields in thread in asm-offsets.c
- saves and restores shadow stack pointer on trap entry (U --> S) and exit
  (S --> U)

Shadow stack save/restore is gated on feature availability and is
implemented using alternatives. CSR_SSP can be context-switched in
'switch_to' as well, but as soon as kernel shadow stack support gets
rolled in, the shadow stack pointer will need to be switched at trap
entry/exit point (much like 'sp'). It can be argued that a kernel
using a shadow stack deployment scenario may not be as prevalent as
user mode using this feature. But even if there is some minimal
deployment of kernel shadow stack, that means that it needs to be
supported.  Thus save/restore of shadow stack pointer is implemented
in entry.S instead of in 'switch_to.h'.

Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
Reviewed-by: Zong Li <zong.li@sifive.com>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
Tested-by: Andreas Korb <andreas.korb@aisec.fraunhofer.de> # QEMU, custom CVA6
Tested-by: Valentin Haudiquet <valentin.haudiquet@canonical.com>
Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-5-b55691eacf4f@rivosinc.com
[pjw@kernel.org: cleaned up patch description]
Signed-off-by: Paul Walmsley <pjw@kernel.org>
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/usercfi.h [new file with mode: 0644]
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/entry.S

index da5426122d280b53b8ba8f764ea6f1b9f93ca994..4c3dd94d0f63844fecc32a7e2c1a184113ee49d9 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/insn-def.h>
 #include <asm/alternative-macros.h>
 #include <asm/hwcap.h>
+#include <asm/usercfi.h>
 
 #define arch_get_mmap_end(addr, len, flags)                    \
 ({                                                             \
index 836d80dd29210d528245a547f0398169e63adda5..36918c9200c92c75a6b0748097eca5c3805676a1 100644 (file)
@@ -73,6 +73,9 @@ struct thread_info {
         */
        unsigned long           a0, a1, a2;
 #endif
+#ifdef CONFIG_RISCV_USER_CFI
+       struct cfi_state        user_cfi_state;
+#endif
 };
 
 #ifdef CONFIG_SHADOW_CALL_STACK
diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h
new file mode 100644 (file)
index 0000000..4c5233e
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Rivos, Inc.
+ * Deepak Gupta <debug@rivosinc.com>
+ */
+#ifndef _ASM_RISCV_USERCFI_H
+#define _ASM_RISCV_USERCFI_H
+
+#ifndef __ASSEMBLER__
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_USER_CFI
+struct cfi_state {
+       unsigned long ubcfi_en : 1; /* Enable for backward cfi. */
+       unsigned long user_shdw_stk; /* Current user shadow stack pointer */
+       unsigned long shdw_stk_base; /* Base address of shadow stack */
+       unsigned long shdw_stk_size; /* size of shadow stack */
+};
+
+#endif /* CONFIG_RISCV_USER_CFI */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ASM_RISCV_USERCFI_H */
index 7d42d3b8a32a751c7bda1c89633f22f1b0ad602f..8a2b2656cb2fda317dd74935106c706676bc0ac3 100644 (file)
@@ -51,6 +51,10 @@ void asm_offsets(void)
 #endif
 
        OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+#ifdef CONFIG_RISCV_USER_CFI
+       OFFSET(TASK_TI_CFI_STATE, task_struct, thread_info.user_cfi_state);
+       OFFSET(TASK_TI_USER_SSP, task_struct, thread_info.user_cfi_state.user_shdw_stk);
+#endif
        OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
        OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
        OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
index 9b9dec6893b81a6b0c39af654590848e9ef754c1..0f1394d71720cea36a844acaef8e6f0a53db5b80 100644 (file)
        REG_L   a0, TASK_TI_A0(tp)
 .endm
 
+/*
+ * If previous mode was U, capture shadow stack pointer and save it away
+ * Zero CSR_SSP at the same time for sanitization.
+ */
+.macro save_userssp tmp, status
+       ALTERNATIVE("nops(4)",
+               __stringify(                            \
+               andi \tmp, \status, SR_SPP;             \
+               bnez \tmp, skip_ssp_save;               \
+               csrrw \tmp, CSR_SSP, x0;                \
+               REG_S \tmp, TASK_TI_USER_SSP(tp);       \
+               skip_ssp_save:),
+               0,
+               RISCV_ISA_EXT_ZICFISS,
+               CONFIG_RISCV_USER_CFI)
+.endm
+
+.macro restore_userssp tmp, status
+       ALTERNATIVE("nops(4)",
+               __stringify(                            \
+               andi \tmp, \status, SR_SPP;             \
+               bnez \tmp, skip_ssp_restore;            \
+               REG_L \tmp, TASK_TI_USER_SSP(tp);       \
+               csrw CSR_SSP, \tmp;                     \
+               skip_ssp_restore:),
+               0,
+               RISCV_ISA_EXT_ZICFISS,
+               CONFIG_RISCV_USER_CFI)
+.endm
 
 SYM_CODE_START(handle_exception)
        /*
@@ -148,6 +177,7 @@ SYM_CODE_START(handle_exception)
 
        REG_L s0, TASK_TI_USER_SP(tp)
        csrrc s1, CSR_STATUS, t0
+       save_userssp s2, s1
        csrr s2, CSR_EPC
        csrr s3, CSR_TVAL
        csrr s4, CSR_CAUSE
@@ -243,6 +273,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
        call riscv_v_context_nesting_end
 #endif
        REG_L a0, PT_STATUS(sp)
+       restore_userssp s3, a0
        /*
         * The current load reservation is effectively part of the processor's
         * state, in the sense that load reservations cannot be shared between