#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
-#include <asm/memory.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
.macro kernel_ventry label
.align 7
sub sp, sp, #S_FRAME_SIZE
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * Test whether the SP has overflowed, without corrupting a GPR.
+ * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
+ */
+ add sp, sp, x0 // sp' = sp + x0
+ sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
+ tbnz x0, #THREAD_SHIFT, 0f
+ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
+ sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
+ b \label
+
+0:
+ /*
+ * Either we've just detected an overflow, or we've taken an exception
+ * while on the overflow stack. Either way, we won't return to
+ * userspace, and can clobber EL0 registers to free up GPRs.
+ */
+
+ /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ msr tpidr_el0, x0
+
+ /* Recover the original x0 value and stash it in tpidrro_el0 */
+ sub x0, sp, x0
+ msr tpidrro_el0, x0
+
+ /* Switch to the overflow stack */
+ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
+
+ /*
+ * Check whether we were already on the overflow stack. This may happen
+ * after panic() re-enables interrupts.
+ */
+ mrs x0, tpidr_el0 // sp of interrupted context
+ sub x0, sp, x0 // delta with top of overflow stack
+ tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
+ b.ne __bad_stack // no? -> bad stack pointer
+
+ /* We were already on the overflow stack. Restore sp/x0 and carry on. */
+ sub sp, sp, x0
+ mrs x0, tpidrro_el0
+#endif
b \label
.endm
stp x22, x23, [sp, #S_PC]
- /*
- * Set syscallno to -1 by default (overridden later if real syscall).
- */
+ /* Not in a syscall by default (el0_svc overwrites for real syscall) */
.if \el == 0
- mvn x21, xzr
- str x21, [sp, #S_SYSCALLNO]
+ mov w21, #NO_SYSCALL
+ str w21, [sp, #S_SYSCALLNO]
.endif
/*
.macro kernel_exit, el
.if \el != 0
+ disable_daif
+
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
*
* x7 is reserved for the system call number in 32-bit mode.
*/
-sc_nr .req x25 // number of system calls
-scno .req x26 // syscall number
+wsc_nr .req w25 // number of system calls
+wscno .req w26 // syscall number
+xscno .req x26 // syscall number (zero-extended)
stbl .req x27 // syscall table pointer
tsk .req x28 // current thread_info
kernel_ventry el1_sync // Synchronous EL1h
kernel_ventry el1_irq // IRQ EL1h
kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error_invalid // Error EL1h
+ kernel_ventry el1_error // Error EL1h
kernel_ventry el0_sync // Synchronous 64-bit EL0
kernel_ventry el0_irq // IRQ 64-bit EL0
kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry el0_error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
kernel_ventry el0_irq_compat // IRQ 32-bit EL0
kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry el0_error_compat // Error 32-bit EL0
#else
kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
#endif
END(vectors)
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * We detected an overflow in kernel_ventry, which switched to the
+ * overflow stack. Stash the exception regs, and head to our overflow
+ * handler.
+ */
+__bad_stack:
+ /* Restore the original x0 value */
+ mrs x0, tpidrro_el0
+
+ /*
+ * Store the original GPRs to the new stack. The orginal SP (minus
+ * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ */
+ sub sp, sp, #S_FRAME_SIZE
+ kernel_entry 1
+ mrs x0, tpidr_el0
+ add x0, x0, #S_FRAME_SIZE
+ str x0, [sp, #S_SP]
+
+ /* Stash the regs for handle_bad_stack */
+ mov x0, sp
+
+ /* Time to die */
+ bl handle_bad_stack
+ ASM_BUG()
+#endif /* CONFIG_VMAP_STACK */
+
/*
* Invalid mode handlers
*/
el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
-
-el0_error_invalid_compat:
- inv_entry 0, BAD_ERROR, 32
-ENDPROC(el0_error_invalid_compat)
#endif
el1_sync_invalid:
* Data abort handling
*/
mrs x3, far_el1
- enable_dbg
- // re-enable interrupts if they were enabled in the aborted context
- tbnz x23, #7, 1f // PSR_I_BIT
- enable_irq
-1:
+ inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
- // disable interrupts before pulling preserved data off the stack
- disable_irq
kernel_exit 1
el1_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
ASM_BUG()
/*
* Undefined instruction
*/
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
ASM_BUG()
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
.align 6
el1_irq:
kernel_entry 1
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
+ cmp x24, #ESR_ELx_EC_SVE // SVE access
+ b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
/*
* AArch32 syscall handling
*/
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, compat_sys_call_table // load compat syscall table pointer
- uxtw scno, w7 // syscall number in w7 (r7)
- mov sc_nr, #__NR_compat_syscalls
+ mov wscno, w7 // syscall number in w7 (r7)
+ mov wsc_nr, #__NR_compat_syscalls
b el0_svc_naked
.align 6
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+
+el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
#endif
el0_da:
* Data abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
clear_address_tag x0, x26
mov x1, x25
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
/*
* Floating Point or Advanced SIMD access
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
+el0_sve_acc:
+ /*
+ * Scalable Vector Extension access
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sve_acc
+ b ret_to_user
el0_fpsimd_exc:
/*
- * Floating Point or Advanced SIMD exception
+ * Floating Point, Advanced SIMD or SVE exception
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
/*
* Undefined instruction
*/
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, sp
bl do_undefinstr
/*
* System instructions, for trapped cache maintenance instructions
*/
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_dbg
+ enable_daif
ct_user_exit
b ret_to_user
el0_inv:
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
el0_irq:
kernel_entry 0
el0_irq_naked:
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
b ret_to_user
ENDPROC(el0_irq)
+el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ kernel_exit 1
+ENDPROC(el1_error)
+
+el0_error:
+ kernel_entry 0
+el0_error_naked:
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ enable_daif
+ ct_user_exit
+ b ret_to_user
+ENDPROC(el0_error)
+
+
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
ret_fast_syscall:
- disable_irq // disable interrupts
+ disable_daif
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
enable_step_tsk x1, x2
kernel_exit 0
ret_fast_syscall_trace:
- enable_irq // enable interrupts
+ enable_daif
b __sys_trace_return_skipped // we already saved x0
/*
* "slow" syscall return path.
*/
ret_to_user:
- disable_irq // disable interrupts
+ disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
*/
.align 6
el0_svc:
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, sys_call_table // load syscall table pointer
- uxtw scno, w8 // syscall number in w8
- mov sc_nr, #__NR_syscalls
+ mov wscno, w8 // syscall number in w8
+ mov wsc_nr, #__NR_syscalls
+
+#ifndef CONFIG_ARM64_SVE
+ b el0_svc_naked
+#else
+ tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
+ bic x16, x16, #_TIF_SVE // discard SVE state
+ str x16, [tsk, #TSK_TI_FLAGS]
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case:
+ */
+ mrs x9, cpacr_el1
+ bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
+ msr cpacr_el1, x9 // synchronised by eret to el0
+#endif /* CONFIG_ARM64_SVE */
+
el0_svc_naked: // compat entry point
- stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_dbg_and_irq
+ stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
+ enable_daif
ct_user_exit 1
- ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
- tst x16, #_TIF_SYSCALL_WORK
+ tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
b.ne __sys_trace
- cmp scno, sc_nr // check upper syscall limit
+ cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
- ldr x16, [stbl, scno, lsl #3] // address in the syscall table
+ ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
ni_sys:
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov w0, #-1 // set default errno for
- cmp scno, x0 // user-issued syscall(-1)
+ cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
b.ne 1f
- mov x0, #-ENOSYS
+ mov x0, #-ENOSYS // set default errno if so
str x0, [sp, #S_X0]
1: mov x0, sp
bl syscall_trace_enter
- cmp w0, #-1 // skip the syscall?
+ cmp w0, #NO_SYSCALL // skip the syscall?
b.eq __sys_trace_return_skipped
- uxtw scno, w0 // syscall number (possibly new)
+ mov wscno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
- cmp scno, sc_nr // check upper syscall limit
+ cmp wscno, wsc_nr // check upper syscall limit
b.hs __ni_sys_trace
ldp x0, x1, [sp] // restore the syscall args
ldp x2, x3, [sp, #S_X2]
ldp x4, x5, [sp, #S_X4]
ldp x6, x7, [sp, #S_X6]
- ldr x16, [stbl, scno, lsl #3] // address in the syscall table
+ ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
__sys_trace_return: