#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/prctl.h>
+#include <linux/errno.h>
struct task_struct;
struct kernel_clone_args;
bool is_shstk_allocated(struct task_struct *task);
void set_shstk_lock(struct task_struct *task);
void set_shstk_status(struct task_struct *task, bool enable);
+unsigned long get_active_shstk(struct task_struct *task);
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr);
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr);
bool is_indir_lp_enabled(struct task_struct *task);
bool is_indir_lp_locked(struct task_struct *task);
void set_indir_lp_status(struct task_struct *task, bool enable);
#define set_indir_lp_lock(task) do {} while (0)
+#define restore_user_shstk(tsk, shstk_ptr) -EINVAL
+
+#define save_user_shstk(tsk, saved_shstk_ptr) -EINVAL
+
+#define get_active_shstk(task) 0UL
+
#endif /* CONFIG_RISCV_USER_CFI */
#endif /* __ASSEMBLER__ */
#include <asm/vector.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
+#include <asm/usercfi.h>
unsigned long signal_minsigstksz __ro_after_init;
extern u32 __user_rt_sigreturn[2];
static size_t riscv_v_sc_size __ro_after_init;
+static size_t riscv_zicfiss_sc_size __ro_after_init;
#define DEBUG_SIG 0
return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
}
+static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
+{
+ struct __sc_riscv_cfi_state __user *state = sc_cfi;
+ unsigned long ss_ptr = 0;
+ long err = 0;
+
+ if (!is_shstk_enabled(current))
+ return 0;
+
+ /*
+ * Save a pointer to the shadow stack itself on shadow stack as a form of token.
+ * A token on the shadow stack gives the following properties:
+ * - Safe save and restore for shadow stack switching. Any save of a shadow stack
+ * must have saved a token on the shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to the shadow stack with
+ * address of the shadow stack itself is not easily allowed, a restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments. Any
+ * sspopchk will detect the condition and fault to kernel as a sw check exception.
+ */
+ err |= save_user_shstk(current, &ss_ptr);
+ err |= __put_user(ss_ptr, &state->ss_ptr);
+ if (unlikely(err))
+ return -EFAULT;
+
+ return riscv_zicfiss_sc_size;
+}
+
+static long __restore_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
+{
+ struct __sc_riscv_cfi_state __user *state = sc_cfi;
+ unsigned long ss_ptr = 0;
+ long err;
+
+ /*
+ * Restore shadow stack as a form of token stored on the shadow stack itself as a safe
+ * way to restore.
+ * A token on the shadow stack gives the following properties:
+ * - Safe save and restore for shadow stack switching. Any save of shadow stack
+ * must have saved a token on shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to a shadow stack with
+ * the address of shadow stack itself is not easily allowed, a restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in the shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments.
+ * sspopchk will detect the condition and fault to kernel as a sw check exception.
+ */
+ err = __copy_from_user(&ss_ptr, &state->ss_ptr, sizeof(unsigned long));
+
+ if (unlikely(err))
+ return err;
+
+ return restore_user_shstk(current, ss_ptr);
+}
+
struct arch_ext_priv {
__u32 magic;
long (*save)(struct pt_regs *regs, void __user *sc_vec);
.magic = RISCV_V_MAGIC,
.save = &save_v_state,
},
+ {
+ .magic = RISCV_ZICFISS_MAGIC,
+ .save = &save_cfiss_state,
+ },
};
static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
err = __restore_v_state(regs, sc_ext_ptr);
break;
+ case RISCV_ZICFISS_MAGIC:
+ if (!is_shstk_enabled(current) || size != riscv_zicfiss_sc_size)
+ return -EINVAL;
+
+ err = __restore_cfiss_state(regs, sc_ext_ptr);
+ break;
default:
return -EINVAL;
}
total_context_size += riscv_v_sc_size;
}
+ if (is_shstk_enabled(current))
+ total_context_size += riscv_zicfiss_sc_size;
+
+ /*
+ * Preserved a __riscv_ctx_hdr for END signal context header if an
+ * extension uses __riscv_extra_ext_header
+ */
+ if (total_context_size)
+ total_context_size += sizeof(struct __riscv_ctx_hdr);
+
frame_size += total_context_size;
frame_size = round_up(frame_size, 16);
#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn);
+
+ /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */
+ if (is_shstk_enabled(current))
+ regs->t0 = regs->ra;
+
#else
/*
* For the nommu case we don't have a VDSO. Instead we push two
{
riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) +
sizeof(struct __sc_riscv_v_state) + riscv_v_vsize;
+
+ riscv_zicfiss_sc_size = sizeof(struct __riscv_ctx_hdr) +
+ sizeof(struct __sc_riscv_cfi_state);
/*
* Determine the stack space required for guaranteed signal delivery.
* The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
}
+unsigned long get_active_shstk(struct task_struct *task)
+{
+ return task->thread_info.user_cfi_state.user_shdw_stk;
+}
+
void set_shstk_status(struct task_struct *task, bool enable)
{
if (!cpu_supports_shadow_stack())
return 0;
}
+/*
+ * Save user shadow stack pointer on the shadow stack itself and return a pointer to saved location.
+ * Returns -EFAULT if unsuccessful.
+ */
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
+{
+ unsigned long ss_ptr = 0;
+ unsigned long token_loc = 0;
+ int ret = 0;
+
+ if (!saved_shstk_ptr)
+ return -EINVAL;
+
+ ss_ptr = get_active_shstk(tsk);
+ ret = create_rstor_token(ss_ptr, &token_loc);
+
+ if (!ret) {
+ *saved_shstk_ptr = token_loc;
+ set_active_shstk(tsk, token_loc);
+ }
+
+ return ret;
+}
+
+/*
+ * Restores the user shadow stack pointer from the token on the shadow stack for task 'tsk'.
+ * Returns -EFAULT if unsuccessful.
+ */
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
+{
+ unsigned long token = 0;
+
+ token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0);
+
+ if (token == -1)
+ return -EFAULT;
+
+ /* invalid token, return EINVAL */
+ if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE) {
+ pr_info_ratelimited("%s[%d]: bad restore token in %s: pc=%p sp=%p, token=%p, shstk_ptr=%p\n",
+ tsk->comm, task_pid_nr(tsk), __func__,
+ (void *)(task_pt_regs(tsk)->epc),
+ (void *)(task_pt_regs(tsk)->sp),
+ (void *)token, (void *)shstk_ptr);
+ return -EINVAL;
+ }
+
+ /* all checks passed, set active shstk and return success */
+ set_active_shstk(tsk, token);
+ return 0;
+}
+
static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
unsigned long token_offset, bool set_tok)
{