1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #include <linux/init.h>
8 #include <linux/linkage.h>
13 #include <asm/unistd.h>
15 #include <asm/thread_info.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/errata_list.h>
18 #include <linux/sizes.h>
20 .section .irqentry.text, "ax"
22 SYM_CODE_START(handle_exception)
24 * If coming from userspace, preserve the user thread pointer and load
25 * the kernel thread pointer. If we came from the kernel, the scratch
26 * register will contain 0, and we should continue on the current TP.
28 csrrw tp, CSR_SCRATCH, tp
29 bnez tp, .Lsave_context
31 .Lrestore_kernel_tpsp:
33 REG_S sp, TASK_TI_KERNEL_SP(tp)
35 #ifdef CONFIG_VMAP_STACK
36 addi sp, sp, -(PT_SIZE_ON_STACK)
37 srli sp, sp, THREAD_SHIFT
39 bnez sp, handle_kernel_stack_overflow
40 REG_L sp, TASK_TI_KERNEL_SP(tp)
44 REG_S sp, TASK_TI_USER_SP(tp)
45 REG_L sp, TASK_TI_KERNEL_SP(tp)
46 addi sp, sp, -(PT_SIZE_ON_STACK)
53 * Disable user-mode memory access as it should only be set in the
54 * actual user copy routines.
56 * Disable the FPU/Vector to detect illegal usage of floating point
57 * or vector in kernel space.
59 li t0, SR_SUM | SR_FS_VS
61 REG_L s0, TASK_TI_USER_SP(tp)
62 csrrc s1, CSR_STATUS, t0
68 REG_S s1, PT_STATUS(sp)
70 REG_S s3, PT_BADADDR(sp)
71 REG_S s4, PT_CAUSE(sp)
75 * Set the scratch register to 0, so that if a recursive exception
76 * occurs, the exception vector knows it came from the kernel
80 /* Load the global pointer */
83 /* Load the kernel shadow call stack pointer if coming from userspace */
84 scs_load_current_if_task_changed s5
86 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
88 call riscv_v_context_nesting_start
90 move a0, sp /* pt_regs */
91 la ra, ret_from_exception
94 * MSB of cause differentiates between
95 * interrupts and exceptions
99 /* Handle interrupts */
102 /* Handle other exceptions */
103 slli t0, s4, RISCV_LGPTR
104 la t1, excp_vect_table
105 la t2, excp_vect_table_end
107 /* Check if exception code lies within bounds */
113 SYM_CODE_END(handle_exception)
114 ASM_NOKPROBE(handle_exception)
117 * The ret_from_exception must be called with interrupt disabled. Here is the
122 SYM_CODE_START_NOALIGN(ret_from_exception)
123 REG_L s0, PT_STATUS(sp)
124 #ifdef CONFIG_RISCV_M_MODE
125 /* the MPP value is too large to be used as an immediate arg for addi */
133 /* Save unwound kernel stack pointer in thread_info */
134 addi s0, sp, PT_SIZE_ON_STACK
135 REG_S s0, TASK_TI_KERNEL_SP(tp)
137 /* Save the kernel shadow call stack pointer */
141 * Save TP into the scratch register , so we can find the kernel data
146 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
148 call riscv_v_context_nesting_end
150 REG_L a0, PT_STATUS(sp)
152 * The current load reservation is effectively part of the processor's
153 * state, in the sense that load reservations cannot be shared between
154 * different hart contexts. We can't actually save and restore a load
155 * reservation, so instead here we clear any existing reservation --
156 * it's always legal for implementations to clear load reservations at
157 * any point (as long as the forward progress guarantee is kept, but
158 * we'll ignore that here).
160 * Dangling load reservations can be the result of taking a trap in the
161 * middle of an LR/SC sequence, but can also be the result of a taken
162 * forward branch around an SC -- which is how we implement CAS. As a
163 * result we need to clear reservations between the last CAS and the
164 * jump back to the new context. While it is unlikely the store
165 * completes, implementations are allowed to expand reservations to be
169 REG_SC x0, a2, PT_EPC(sp)
178 restore_from_x6_to_x31
182 #ifdef CONFIG_RISCV_M_MODE
187 SYM_CODE_END(ret_from_exception)
188 ASM_NOKPROBE(ret_from_exception)
190 #ifdef CONFIG_VMAP_STACK
191 SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
192 /* we reach here from kernel context, sscratch must be 0 */
193 csrrw x31, CSR_SCRATCH, x31
194 asm_per_cpu sp, overflow_stack, x31
195 li x31, OVERFLOW_STACK_SIZE
197 /* zero out x31 again and restore x31 */
199 csrrw x31, CSR_SCRATCH, x31
201 addi sp, sp, -(PT_SIZE_ON_STACK)
203 //save context to overflow stack
209 REG_L s0, TASK_TI_KERNEL_SP(tp)
216 REG_S s1, PT_STATUS(sp)
218 REG_S s3, PT_BADADDR(sp)
219 REG_S s4, PT_CAUSE(sp)
222 tail handle_bad_stack
223 SYM_CODE_END(handle_kernel_stack_overflow)
224 ASM_NOKPROBE(handle_kernel_stack_overflow)
227 SYM_CODE_START(ret_from_fork)
229 beqz s0, 1f /* not from kernel thread */
234 move a0, sp /* pt_regs */
235 la ra, ret_from_exception
236 tail syscall_exit_to_user_mode
237 SYM_CODE_END(ret_from_fork)
239 #ifdef CONFIG_IRQ_STACKS
241 * void call_on_irq_stack(struct pt_regs *regs,
242 * void (*func)(struct pt_regs *));
244 * Calls func(regs) using the per-CPU IRQ stack.
246 SYM_FUNC_START(call_on_irq_stack)
247 /* Create a frame record to save ra and s0 (fp) */
248 addi sp, sp, -STACKFRAME_SIZE_ON_STACK
249 REG_S ra, STACKFRAME_RA(sp)
250 REG_S s0, STACKFRAME_FP(sp)
251 addi s0, sp, STACKFRAME_SIZE_ON_STACK
253 /* Switch to the per-CPU shadow call stack */
255 scs_load_irq_stack t0
257 /* Switch to the per-CPU IRQ stack and call the handler */
258 load_per_cpu t0, irq_stack_ptr, t1
259 li t1, IRQ_STACK_SIZE
263 /* Switch back to the thread shadow call stack */
266 /* Switch back to the thread stack and restore ra and s0 */
267 addi sp, s0, -STACKFRAME_SIZE_ON_STACK
268 REG_L ra, STACKFRAME_RA(sp)
269 REG_L s0, STACKFRAME_FP(sp)
270 addi sp, sp, STACKFRAME_SIZE_ON_STACK
273 SYM_FUNC_END(call_on_irq_stack)
274 #endif /* CONFIG_IRQ_STACKS */
277 * Integer register context switch
278 * The callee-saved registers must be saved and restored.
280 * a0: previous task_struct (must be preserved across the switch)
281 * a1: next task_struct
283 * The value of a0 and a1 must be preserved by this function, as that's how
284 * arguments are passed to schedule_tail.
286 SYM_FUNC_START(__switch_to)
287 /* Save context into prev->thread */
288 li a4, TASK_THREAD_RA
291 REG_S ra, TASK_THREAD_RA_RA(a3)
292 REG_S sp, TASK_THREAD_SP_RA(a3)
293 REG_S s0, TASK_THREAD_S0_RA(a3)
294 REG_S s1, TASK_THREAD_S1_RA(a3)
295 REG_S s2, TASK_THREAD_S2_RA(a3)
296 REG_S s3, TASK_THREAD_S3_RA(a3)
297 REG_S s4, TASK_THREAD_S4_RA(a3)
298 REG_S s5, TASK_THREAD_S5_RA(a3)
299 REG_S s6, TASK_THREAD_S6_RA(a3)
300 REG_S s7, TASK_THREAD_S7_RA(a3)
301 REG_S s8, TASK_THREAD_S8_RA(a3)
302 REG_S s9, TASK_THREAD_S9_RA(a3)
303 REG_S s10, TASK_THREAD_S10_RA(a3)
304 REG_S s11, TASK_THREAD_S11_RA(a3)
305 /* Save the kernel shadow call stack pointer */
307 /* Restore context from next->thread */
308 REG_L ra, TASK_THREAD_RA_RA(a4)
309 REG_L sp, TASK_THREAD_SP_RA(a4)
310 REG_L s0, TASK_THREAD_S0_RA(a4)
311 REG_L s1, TASK_THREAD_S1_RA(a4)
312 REG_L s2, TASK_THREAD_S2_RA(a4)
313 REG_L s3, TASK_THREAD_S3_RA(a4)
314 REG_L s4, TASK_THREAD_S4_RA(a4)
315 REG_L s5, TASK_THREAD_S5_RA(a4)
316 REG_L s6, TASK_THREAD_S6_RA(a4)
317 REG_L s7, TASK_THREAD_S7_RA(a4)
318 REG_L s8, TASK_THREAD_S8_RA(a4)
319 REG_L s9, TASK_THREAD_S9_RA(a4)
320 REG_L s10, TASK_THREAD_S10_RA(a4)
321 REG_L s11, TASK_THREAD_S11_RA(a4)
322 /* The offset of thread_info in task_struct is zero. */
324 /* Switch to the next shadow call stack */
327 SYM_FUNC_END(__switch_to)
330 #define do_page_fault do_trap_unknown
335 /* Exception vector table */
336 SYM_DATA_START_LOCAL(excp_vect_table)
337 RISCV_PTR do_trap_insn_misaligned
338 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
339 RISCV_PTR do_trap_insn_illegal
340 RISCV_PTR do_trap_break
341 RISCV_PTR do_trap_load_misaligned
342 RISCV_PTR do_trap_load_fault
343 RISCV_PTR do_trap_store_misaligned
344 RISCV_PTR do_trap_store_fault
345 RISCV_PTR do_trap_ecall_u /* system call */
346 RISCV_PTR do_trap_ecall_s
347 RISCV_PTR do_trap_unknown
348 RISCV_PTR do_trap_ecall_m
349 /* instruciton page fault */
350 ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
351 RISCV_PTR do_page_fault /* load page fault */
352 RISCV_PTR do_trap_unknown
353 RISCV_PTR do_page_fault /* store page fault */
354 SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
357 SYM_DATA_START(__user_rt_sigreturn)
358 li a7, __NR_rt_sigreturn
360 SYM_DATA_END(__user_rt_sigreturn)