1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/process.c
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/compat.h>
10 #include <linux/efi.h>
11 #include <linux/elf.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/mman.h>
20 #include <linux/nospec.h>
21 #include <linux/stddef.h>
22 #include <linux/sysctl.h>
23 #include <linux/unistd.h>
24 #include <linux/user.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/cpu.h>
30 #include <linux/elfcore.h>
32 #include <linux/tick.h>
33 #include <linux/utsname.h>
34 #include <linux/uaccess.h>
35 #include <linux/random.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/personality.h>
38 #include <linux/notifier.h>
39 #include <trace/events/power.h>
40 #include <linux/percpu.h>
41 #include <linux/thread_info.h>
42 #include <linux/prctl.h>
43 #include <linux/stacktrace.h>
45 #include <asm/alternative.h>
46 #include <asm/compat.h>
47 #include <asm/cpufeature.h>
48 #include <asm/cacheflush.h>
50 #include <asm/fpsimd.h>
51 #include <asm/mmu_context.h>
53 #include <asm/processor.h>
54 #include <asm/pointer_auth.h>
55 #include <asm/stacktrace.h>
56 #include <asm/switch_to.h>
57 #include <asm/system_misc.h>
59 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
60 #include <linux/stackprotector.h>
61 unsigned long __stack_chk_guard __ro_after_init
;
62 EXPORT_SYMBOL(__stack_chk_guard
);
66 * Function pointers to optional machine specific functions
68 void (*pm_power_off
)(void);
69 EXPORT_SYMBOL_GPL(pm_power_off
);
71 #ifdef CONFIG_HOTPLUG_CPU
72 void __noreturn
arch_cpu_idle_dead(void)
79 * Called by kexec, immediately prior to machine_kexec().
81 * This must completely disable all secondary CPUs; simply causing those CPUs
82 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
83 * kexec'd kernel to use any and all RAM as it sees fit, without having to
84 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
85 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
87 void machine_shutdown(void)
89 smp_shutdown_nonboot_cpus(reboot_cpu
);
93 * Halting simply requires that the secondary CPUs stop performing any
94 * activity (executing tasks, handling interrupts). smp_send_stop()
97 void machine_halt(void)
105 * Power-off simply requires that the secondary CPUs stop performing any
106 * activity (executing tasks, handling interrupts). smp_send_stop()
107 * achieves this. When the system power is turned off, it will take all CPUs
110 void machine_power_off(void)
114 do_kernel_power_off();
118 * Restart requires that the secondary CPUs stop performing any activity
119 * while the primary CPU resets the system. Systems with multiple CPUs must
120 * provide a HW restart implementation, to ensure that all CPUs reset at once.
121 * This is required so that any code running after reset on the primary CPU
122 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
123 * executing pre-reset code, and using RAM that the primary CPU's code wishes
124 * to use. Implementing such co-ordination would be essentially impossible.
126 void machine_restart(char *cmd
)
128 /* Disable interrupts first */
133 * UpdateCapsule() depends on the system being reset via
136 if (efi_enabled(EFI_RUNTIME_SERVICES
))
137 efi_reboot(reboot_mode
, NULL
);
139 /* Now call the architecture specific reboot code. */
140 do_kernel_restart(cmd
);
143 * Whoops - the architecture was unable to reboot.
145 printk("Reboot failed -- System halted\n");
149 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
150 static const char *const btypes
[] = {
158 static void print_pstate(struct pt_regs
*regs
)
160 u64 pstate
= regs
->pstate
;
162 if (compat_user_mode(regs
)) {
163 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
165 pstate
& PSR_AA32_N_BIT
? 'N' : 'n',
166 pstate
& PSR_AA32_Z_BIT
? 'Z' : 'z',
167 pstate
& PSR_AA32_C_BIT
? 'C' : 'c',
168 pstate
& PSR_AA32_V_BIT
? 'V' : 'v',
169 pstate
& PSR_AA32_Q_BIT
? 'Q' : 'q',
170 pstate
& PSR_AA32_T_BIT
? "T32" : "A32",
171 pstate
& PSR_AA32_E_BIT
? "BE" : "LE",
172 pstate
& PSR_AA32_A_BIT
? 'A' : 'a',
173 pstate
& PSR_AA32_I_BIT
? 'I' : 'i',
174 pstate
& PSR_AA32_F_BIT
? 'F' : 'f',
175 pstate
& PSR_AA32_DIT_BIT
? '+' : '-',
176 pstate
& PSR_AA32_SSBS_BIT
? '+' : '-');
178 const char *btype_str
= btypes
[(pstate
& PSR_BTYPE_MASK
) >>
181 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
183 pstate
& PSR_N_BIT
? 'N' : 'n',
184 pstate
& PSR_Z_BIT
? 'Z' : 'z',
185 pstate
& PSR_C_BIT
? 'C' : 'c',
186 pstate
& PSR_V_BIT
? 'V' : 'v',
187 pstate
& PSR_D_BIT
? 'D' : 'd',
188 pstate
& PSR_A_BIT
? 'A' : 'a',
189 pstate
& PSR_I_BIT
? 'I' : 'i',
190 pstate
& PSR_F_BIT
? 'F' : 'f',
191 pstate
& PSR_PAN_BIT
? '+' : '-',
192 pstate
& PSR_UAO_BIT
? '+' : '-',
193 pstate
& PSR_TCO_BIT
? '+' : '-',
194 pstate
& PSR_DIT_BIT
? '+' : '-',
195 pstate
& PSR_SSBS_BIT
? '+' : '-',
200 void __show_regs(struct pt_regs
*regs
)
205 if (compat_user_mode(regs
)) {
206 lr
= regs
->compat_lr
;
207 sp
= regs
->compat_sp
;
215 show_regs_print_info(KERN_DEFAULT
);
218 if (!user_mode(regs
)) {
219 printk("pc : %pS\n", (void *)regs
->pc
);
220 printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr
));
222 printk("pc : %016llx\n", regs
->pc
);
223 printk("lr : %016llx\n", lr
);
226 printk("sp : %016llx\n", sp
);
228 if (system_uses_irq_prio_masking())
229 printk("pmr_save: %08llx\n", regs
->pmr_save
);
234 printk("x%-2d: %016llx", i
, regs
->regs
[i
]);
237 pr_cont(" x%-2d: %016llx", i
, regs
->regs
[i
]);
243 void show_regs(struct pt_regs
*regs
)
246 dump_backtrace(regs
, NULL
, KERN_DEFAULT
);
249 static void tls_thread_flush(void)
251 write_sysreg(0, tpidr_el0
);
252 if (system_supports_tpidr2())
253 write_sysreg_s(0, SYS_TPIDR2_EL0
);
255 if (is_compat_task()) {
256 current
->thread
.uw
.tp_value
= 0;
259 * We need to ensure ordering between the shadow state and the
260 * hardware state, so that we don't corrupt the hardware state
261 * with a stale shadow state during context switch.
264 write_sysreg(0, tpidrro_el0
);
268 static void flush_tagged_addr_state(void)
270 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI
))
271 clear_thread_flag(TIF_TAGGED_ADDR
);
274 void flush_thread(void)
276 fpsimd_flush_thread();
278 flush_ptrace_hw_breakpoint(current
);
279 flush_tagged_addr_state();
282 void arch_release_task_struct(struct task_struct
*tsk
)
284 fpsimd_release_task(tsk
);
287 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
290 fpsimd_preserve_current_state();
293 /* We rely on the above assignment to initialize dst's thread_flags: */
294 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK
));
297 * Detach src's sve_state (if any) from dst so that it does not
298 * get erroneously used or freed prematurely. dst's copies
299 * will be allocated on demand later on if dst uses SVE.
300 * For consistency, also clear TIF_SVE here: this could be done
301 * later in copy_process(), but to avoid tripping up future
302 * maintainers it is best not to leave TIF flags and buffers in
303 * an inconsistent state, even temporarily.
305 dst
->thread
.sve_state
= NULL
;
306 clear_tsk_thread_flag(dst
, TIF_SVE
);
309 * In the unlikely event that we create a new thread with ZA
310 * enabled we should retain the ZA and ZT state so duplicate
311 * it here. This may be shortly freed if we exec() or if
312 * CLONE_SETTLS but it's simpler to do it here. To avoid
313 * confusing the rest of the code ensure that we have a
314 * sve_state allocated whenever sme_state is allocated.
316 if (thread_za_enabled(&src
->thread
)) {
317 dst
->thread
.sve_state
= kzalloc(sve_state_size(src
),
319 if (!dst
->thread
.sve_state
)
322 dst
->thread
.sme_state
= kmemdup(src
->thread
.sme_state
,
325 if (!dst
->thread
.sme_state
) {
326 kfree(dst
->thread
.sve_state
);
327 dst
->thread
.sve_state
= NULL
;
331 dst
->thread
.sme_state
= NULL
;
332 clear_tsk_thread_flag(dst
, TIF_SME
);
335 dst
->thread
.fp_type
= FP_STATE_FPSIMD
;
337 /* clear any pending asynchronous tag fault raised by the parent */
338 clear_tsk_thread_flag(dst
, TIF_MTE_ASYNC_FAULT
);
343 asmlinkage
void ret_from_fork(void) asm("ret_from_fork");
345 int copy_thread(struct task_struct
*p
, const struct kernel_clone_args
*args
)
347 unsigned long clone_flags
= args
->flags
;
348 unsigned long stack_start
= args
->stack
;
349 unsigned long tls
= args
->tls
;
350 struct pt_regs
*childregs
= task_pt_regs(p
);
352 memset(&p
->thread
.cpu_context
, 0, sizeof(struct cpu_context
));
355 * In case p was allocated the same task_struct pointer as some
356 * other recently-exited task, make sure p is disassociated from
357 * any cpu that may have run that now-exited task recently.
358 * Otherwise we could erroneously skip reloading the FPSIMD
361 fpsimd_flush_task_state(p
);
363 ptrauth_thread_init_kernel(p
);
365 if (likely(!args
->fn
)) {
366 *childregs
= *current_pt_regs();
367 childregs
->regs
[0] = 0;
370 * Read the current TLS pointer from tpidr_el0 as it may be
371 * out-of-sync with the saved value.
373 *task_user_tls(p
) = read_sysreg(tpidr_el0
);
374 if (system_supports_tpidr2())
375 p
->thread
.tpidr2_el0
= read_sysreg_s(SYS_TPIDR2_EL0
);
378 if (is_compat_thread(task_thread_info(p
)))
379 childregs
->compat_sp
= stack_start
;
381 childregs
->sp
= stack_start
;
385 * If a TLS pointer was passed to clone, use it for the new
386 * thread. We also reset TPIDR2 if it's in use.
388 if (clone_flags
& CLONE_SETTLS
) {
389 p
->thread
.uw
.tp_value
= tls
;
390 p
->thread
.tpidr2_el0
= 0;
394 * A kthread has no context to ERET to, so ensure any buggy
395 * ERET is treated as an illegal exception return.
397 * When a user task is created from a kthread, childregs will
398 * be initialized by start_thread() or start_compat_thread().
400 memset(childregs
, 0, sizeof(struct pt_regs
));
401 childregs
->pstate
= PSR_MODE_EL1h
| PSR_IL_BIT
;
403 p
->thread
.cpu_context
.x19
= (unsigned long)args
->fn
;
404 p
->thread
.cpu_context
.x20
= (unsigned long)args
->fn_arg
;
406 p
->thread
.cpu_context
.pc
= (unsigned long)ret_from_fork
;
407 p
->thread
.cpu_context
.sp
= (unsigned long)childregs
;
409 * For the benefit of the unwinder, set up childregs->stackframe
410 * as the final frame for the new task.
412 p
->thread
.cpu_context
.fp
= (unsigned long)childregs
->stackframe
;
414 ptrace_hw_copy_thread(p
);
419 void tls_preserve_current_state(void)
421 *task_user_tls(current
) = read_sysreg(tpidr_el0
);
422 if (system_supports_tpidr2() && !is_compat_task())
423 current
->thread
.tpidr2_el0
= read_sysreg_s(SYS_TPIDR2_EL0
);
426 static void tls_thread_switch(struct task_struct
*next
)
428 tls_preserve_current_state();
430 if (is_compat_thread(task_thread_info(next
)))
431 write_sysreg(next
->thread
.uw
.tp_value
, tpidrro_el0
);
432 else if (!arm64_kernel_unmapped_at_el0())
433 write_sysreg(0, tpidrro_el0
);
435 write_sysreg(*task_user_tls(next
), tpidr_el0
);
436 if (system_supports_tpidr2())
437 write_sysreg_s(next
->thread
.tpidr2_el0
, SYS_TPIDR2_EL0
);
441 * Force SSBS state on context-switch, since it may be lost after migrating
442 * from a CPU which treats the bit as RES0 in a heterogeneous system.
444 static void ssbs_thread_switch(struct task_struct
*next
)
447 * Nothing to do for kernel threads, but 'regs' may be junk
448 * (e.g. idle task) so check the flags and bail early.
450 if (unlikely(next
->flags
& PF_KTHREAD
))
454 * If all CPUs implement the SSBS extension, then we just need to
455 * context-switch the PSTATE field.
457 if (cpus_have_const_cap(ARM64_SSBS
))
460 spectre_v4_enable_task_mitigation(next
);
464 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
465 * shadow copy so that we can restore this upon entry from userspace.
467 * This is *only* for exception entry from EL0, and is not valid until we
468 * __switch_to() a user task.
470 DEFINE_PER_CPU(struct task_struct
*, __entry_task
);
472 static void entry_task_switch(struct task_struct
*next
)
474 __this_cpu_write(__entry_task
, next
);
478 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
479 * Ensure access is disabled when switching to a 32bit task, ensure
480 * access is enabled when switching to a 64bit task.
482 static void erratum_1418040_thread_switch(struct task_struct
*next
)
484 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040
) ||
485 !this_cpu_has_cap(ARM64_WORKAROUND_1418040
))
488 if (is_compat_thread(task_thread_info(next
)))
489 sysreg_clear_set(cntkctl_el1
, ARCH_TIMER_USR_VCT_ACCESS_EN
, 0);
491 sysreg_clear_set(cntkctl_el1
, 0, ARCH_TIMER_USR_VCT_ACCESS_EN
);
494 static void erratum_1418040_new_exec(void)
497 erratum_1418040_thread_switch(current
);
502 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
503 * this function must be called with preemption disabled and the update to
504 * sctlr_user must be made in the same preemption disabled block so that
505 * __switch_to() does not see the variable update before the SCTLR_EL1 one.
507 void update_sctlr_el1(u64 sctlr
)
510 * EnIA must not be cleared while in the kernel as this is necessary for
511 * in-kernel PAC. It will be cleared on kernel exit if needed.
513 sysreg_clear_set(sctlr_el1
, SCTLR_USER_MASK
& ~SCTLR_ELx_ENIA
, sctlr
);
515 /* ISB required for the kernel uaccess routines when setting TCF0. */
522 __notrace_funcgraph __sched
523 struct task_struct
*__switch_to(struct task_struct
*prev
,
524 struct task_struct
*next
)
526 struct task_struct
*last
;
528 fpsimd_thread_switch(next
);
529 tls_thread_switch(next
);
530 hw_breakpoint_thread_switch(next
);
531 contextidr_thread_switch(next
);
532 entry_task_switch(next
);
533 ssbs_thread_switch(next
);
534 erratum_1418040_thread_switch(next
);
535 ptrauth_thread_switch_user(next
);
538 * Complete any pending TLB or cache maintenance on this CPU in case
539 * the thread migrates to a different CPU.
540 * This full barrier is also required by the membarrier system
546 * MTE thread switching must happen after the DSB above to ensure that
547 * any asynchronous tag check faults have been logged in the TFSR*_EL1
550 mte_thread_switch(next
);
551 /* avoid expensive SCTLR_EL1 accesses if no change */
552 if (prev
->thread
.sctlr_user
!= next
->thread
.sctlr_user
)
553 update_sctlr_el1(next
->thread
.sctlr_user
);
555 /* the actual thread switch */
556 last
= cpu_switch_to(prev
, next
);
566 static bool get_wchan_cb(void *arg
, unsigned long pc
)
568 struct wchan_info
*wchan_info
= arg
;
570 if (!in_sched_functions(pc
)) {
574 return wchan_info
->count
++ < 16;
577 unsigned long __get_wchan(struct task_struct
*p
)
579 struct wchan_info wchan_info
= {
584 if (!try_get_task_stack(p
))
587 arch_stack_walk(get_wchan_cb
, &wchan_info
, p
, NULL
);
591 return wchan_info
.pc
;
594 unsigned long arch_align_stack(unsigned long sp
)
596 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
597 sp
-= get_random_u32_below(PAGE_SIZE
);
602 int compat_elf_check_arch(const struct elf32_hdr
*hdr
)
604 if (!system_supports_32bit_el0())
607 if ((hdr
)->e_machine
!= EM_ARM
)
610 if (!((hdr
)->e_flags
& EF_ARM_EABI_MASK
))
614 * Prevent execve() of a 32-bit program from a deadline task
615 * if the restricted affinity mask would be inadmissible on an
618 return !static_branch_unlikely(&arm64_mismatched_32bit_el0
) ||
619 !dl_task_check_affinity(current
, system_32bit_el0_cpumask());
624 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
626 void arch_setup_new_exec(void)
628 unsigned long mmflags
= 0;
630 if (is_compat_task()) {
631 mmflags
= MMCF_AARCH32
;
634 * Restrict the CPU affinity mask for a 32-bit task so that
635 * it contains only 32-bit-capable CPUs.
637 * From the perspective of the task, this looks similar to
638 * what would happen if the 64-bit-only CPUs were hot-unplugged
639 * at the point of execve(), although we try a bit harder to
640 * honour the cpuset hierarchy.
642 if (static_branch_unlikely(&arm64_mismatched_32bit_el0
))
643 force_compatible_cpus_allowed_ptr(current
);
644 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0
)) {
645 relax_compatible_cpus_allowed_ptr(current
);
648 current
->mm
->context
.flags
= mmflags
;
649 ptrauth_thread_init_user();
650 mte_thread_init_user();
651 erratum_1418040_new_exec();
653 if (task_spec_ssb_noexec(current
)) {
654 arch_prctl_spec_ctrl_set(current
, PR_SPEC_STORE_BYPASS
,
659 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
661 * Control the relaxed ABI allowing tagged user addresses into the kernel.
663 static unsigned int tagged_addr_disabled
;
665 long set_tagged_addr_ctrl(struct task_struct
*task
, unsigned long arg
)
667 unsigned long valid_mask
= PR_TAGGED_ADDR_ENABLE
;
668 struct thread_info
*ti
= task_thread_info(task
);
670 if (is_compat_thread(ti
))
673 if (system_supports_mte())
674 valid_mask
|= PR_MTE_TCF_SYNC
| PR_MTE_TCF_ASYNC \
677 if (arg
& ~valid_mask
)
681 * Do not allow the enabling of the tagged address ABI if globally
682 * disabled via sysctl abi.tagged_addr_disabled.
684 if (arg
& PR_TAGGED_ADDR_ENABLE
&& tagged_addr_disabled
)
687 if (set_mte_ctrl(task
, arg
) != 0)
690 update_ti_thread_flag(ti
, TIF_TAGGED_ADDR
, arg
& PR_TAGGED_ADDR_ENABLE
);
695 long get_tagged_addr_ctrl(struct task_struct
*task
)
698 struct thread_info
*ti
= task_thread_info(task
);
700 if (is_compat_thread(ti
))
703 if (test_ti_thread_flag(ti
, TIF_TAGGED_ADDR
))
704 ret
= PR_TAGGED_ADDR_ENABLE
;
706 ret
|= get_mte_ctrl(task
);
712 * Global sysctl to disable the tagged user addresses support. This control
713 * only prevents the tagged address ABI enabling via prctl() and does not
714 * disable it for tasks that already opted in to the relaxed ABI.
717 static struct ctl_table tagged_addr_sysctl_table
[] = {
719 .procname
= "tagged_addr_disabled",
721 .data
= &tagged_addr_disabled
,
722 .maxlen
= sizeof(int),
723 .proc_handler
= proc_dointvec_minmax
,
724 .extra1
= SYSCTL_ZERO
,
725 .extra2
= SYSCTL_ONE
,
730 static int __init
tagged_addr_init(void)
732 if (!register_sysctl("abi", tagged_addr_sysctl_table
))
737 core_initcall(tagged_addr_init
);
738 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
740 #ifdef CONFIG_BINFMT_ELF
741 int arch_elf_adjust_prot(int prot
, const struct arch_elf_state
*state
,
742 bool has_interp
, bool is_interp
)
745 * For dynamically linked executables the interpreter is
746 * responsible for setting PROT_BTI on everything except
749 if (is_interp
!= has_interp
)
752 if (!(state
->flags
& ARM64_ELF_BTI
))
755 if (prot
& PROT_EXEC
)