1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/tick.h>
25 #include <linux/cpuidle.h>
26 #include <trace/events/power.h>
27 #include <linux/hw_breakpoint.h>
30 #include <asm/syscalls.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/internal.h>
34 #include <asm/debugreg.h>
36 #include <asm/tlbflush.h>
39 #include <asm/switch_to.h>
41 #include <asm/prctl.h>
44 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
45 * no more per-task TSS's. The TSS size is kept cacheline-aligned
46 * so they are allowed to end up in the .data..cacheline_aligned
47 * section. Since TSS's are completely CPU-local, we want them
48 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
50 __visible
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, cpu_tss
) = {
53 * .sp0 is only used when entering ring 0 from a lower
54 * privilege level. Since the init task never runs anything
55 * but ring 0 code, there is no need for a valid value here.
58 .sp0
= (1UL << (BITS_PER_LONG
-1)) + 1,
62 .io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
,
67 * Note that the .io_bitmap member must be extra-big. This is because
68 * the CPU will access an additional byte beyond the end of the IO
69 * permission bitmap. The extra byte must be all 1 bits, and must
70 * be within the limit.
72 .io_bitmap
= { [0 ... IO_BITMAP_LONGS
] = ~0 },
74 .SYSENTER_stack_canary
= STACK_END_MAGIC
,
76 EXPORT_PER_CPU_SYMBOL(cpu_tss
);
78 DEFINE_PER_CPU(bool, __tss_limit_invalid
);
79 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid
);
82 * this gets called so that we can store lazy state into memory and copy the
83 * current task into the new thread.
85 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
87 memcpy(dst
, src
, arch_task_struct_size
);
89 dst
->thread
.vm86
= NULL
;
92 return fpu__copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
96 * Free current thread data structures etc..
98 void exit_thread(struct task_struct
*tsk
)
100 struct thread_struct
*t
= &tsk
->thread
;
101 unsigned long *bp
= t
->io_bitmap_ptr
;
102 struct fpu
*fpu
= &t
->fpu
;
105 struct tss_struct
*tss
= &per_cpu(cpu_tss
, get_cpu());
107 t
->io_bitmap_ptr
= NULL
;
108 clear_thread_flag(TIF_IO_BITMAP
);
110 * Careful, clear this in the TSS too:
112 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
113 t
->io_bitmap_max
= 0;
123 void flush_thread(void)
125 struct task_struct
*tsk
= current
;
127 flush_ptrace_hw_breakpoint(tsk
);
128 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
130 fpu__clear(&tsk
->thread
.fpu
);
133 void disable_TSC(void)
136 if (!test_and_set_thread_flag(TIF_NOTSC
))
138 * Must flip the CPU state synchronously with
139 * TIF_NOTSC in the current running context.
141 cr4_set_bits(X86_CR4_TSD
);
145 static void enable_TSC(void)
148 if (test_and_clear_thread_flag(TIF_NOTSC
))
150 * Must flip the CPU state synchronously with
151 * TIF_NOTSC in the current running context.
153 cr4_clear_bits(X86_CR4_TSD
);
157 int get_tsc_mode(unsigned long adr
)
161 if (test_thread_flag(TIF_NOTSC
))
162 val
= PR_TSC_SIGSEGV
;
166 return put_user(val
, (unsigned int __user
*)adr
);
169 int set_tsc_mode(unsigned int val
)
171 if (val
== PR_TSC_SIGSEGV
)
173 else if (val
== PR_TSC_ENABLE
)
181 DEFINE_PER_CPU(u64
, msr_misc_features_shadow
);
183 static void set_cpuid_faulting(bool on
)
187 msrval
= this_cpu_read(msr_misc_features_shadow
);
188 msrval
&= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
189 msrval
|= (on
<< MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT
);
190 this_cpu_write(msr_misc_features_shadow
, msrval
);
191 wrmsrl(MSR_MISC_FEATURES_ENABLES
, msrval
);
194 static void disable_cpuid(void)
197 if (!test_and_set_thread_flag(TIF_NOCPUID
)) {
199 * Must flip the CPU state synchronously with
200 * TIF_NOCPUID in the current running context.
202 set_cpuid_faulting(true);
207 static void enable_cpuid(void)
210 if (test_and_clear_thread_flag(TIF_NOCPUID
)) {
212 * Must flip the CPU state synchronously with
213 * TIF_NOCPUID in the current running context.
215 set_cpuid_faulting(false);
220 static int get_cpuid_mode(void)
222 return !test_thread_flag(TIF_NOCPUID
);
225 static int set_cpuid_mode(struct task_struct
*task
, unsigned long cpuid_enabled
)
227 if (!static_cpu_has(X86_FEATURE_CPUID_FAULT
))
239 * Called immediately after a successful exec.
241 void arch_setup_new_exec(void)
243 /* If cpuid was previously disabled for this task, re-enable it. */
244 if (test_thread_flag(TIF_NOCPUID
))
248 static inline void switch_to_bitmap(struct tss_struct
*tss
,
249 struct thread_struct
*prev
,
250 struct thread_struct
*next
,
251 unsigned long tifp
, unsigned long tifn
)
253 if (tifn
& _TIF_IO_BITMAP
) {
255 * Copy the relevant range of the IO bitmap.
256 * Normally this is 128 bytes or less:
258 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
259 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
261 * Make sure that the TSS limit is correct for the CPU
262 * to notice the IO bitmap.
265 } else if (tifp
& _TIF_IO_BITMAP
) {
267 * Clear any possible leftover bits:
269 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
273 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
274 struct tss_struct
*tss
)
276 struct thread_struct
*prev
, *next
;
277 unsigned long tifp
, tifn
;
279 prev
= &prev_p
->thread
;
280 next
= &next_p
->thread
;
282 tifn
= READ_ONCE(task_thread_info(next_p
)->flags
);
283 tifp
= READ_ONCE(task_thread_info(prev_p
)->flags
);
284 switch_to_bitmap(tss
, prev
, next
, tifp
, tifn
);
286 propagate_user_return_notify(prev_p
, next_p
);
288 if ((tifp
& _TIF_BLOCKSTEP
|| tifn
& _TIF_BLOCKSTEP
) &&
289 arch_has_block_step()) {
290 unsigned long debugctl
, msk
;
292 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
293 debugctl
&= ~DEBUGCTLMSR_BTF
;
294 msk
= tifn
& _TIF_BLOCKSTEP
;
295 debugctl
|= (msk
>> TIF_BLOCKSTEP
) << DEBUGCTLMSR_BTF_SHIFT
;
296 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
299 if ((tifp
^ tifn
) & _TIF_NOTSC
)
300 cr4_toggle_bits(X86_CR4_TSD
);
302 if ((tifp
^ tifn
) & _TIF_NOCPUID
)
303 set_cpuid_faulting(!!(tifn
& _TIF_NOCPUID
));
307 * Idle related variables and functions
309 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
310 EXPORT_SYMBOL(boot_option_idle_override
);
312 static void (*x86_idle
)(void);
315 static inline void play_dead(void)
321 void arch_cpu_idle_enter(void)
323 tsc_verify_tsc_adjust(false);
327 void arch_cpu_idle_dead(void)
333 * Called from the generic idle code.
335 void arch_cpu_idle(void)
341 * We use this if we don't have any better idle routine..
343 void __cpuidle
default_idle(void)
345 trace_cpu_idle_rcuidle(1, smp_processor_id());
347 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
349 #ifdef CONFIG_APM_MODULE
350 EXPORT_SYMBOL(default_idle
);
354 bool xen_set_default_idle(void)
356 bool ret
= !!x86_idle
;
358 x86_idle
= default_idle
;
364 void stop_this_cpu(void *dummy
)
370 set_cpu_online(smp_processor_id(), false);
371 disable_local_APIC();
372 mcheck_cpu_clear(this_cpu_ptr(&cpu_info
));
376 * Use wbinvd followed by hlt to stop the processor. This
377 * provides support for kexec on a processor that supports
378 * SME. With kexec, going from SME inactive to SME active
379 * requires clearing cache entries so that addresses without
380 * the encryption bit set don't corrupt the same physical
381 * address that has the encryption bit set when caches are
382 * flushed. To achieve this a wbinvd is performed followed by
383 * a hlt. Even if the processor is not in the kexec/SME
384 * scenario this only adds a wbinvd to a halting processor.
386 asm volatile("wbinvd; hlt" : : : "memory");
391 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
392 * states (local apic timer and TSC stop).
394 static void amd_e400_idle(void)
397 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
398 * gets set after static_cpu_has() places have been converted via
401 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
406 tick_broadcast_enter();
411 * The switch back from broadcast mode needs to be called with
412 * interrupts disabled.
415 tick_broadcast_exit();
420 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
421 * We can't rely on cpuidle installing MWAIT, because it will not load
422 * on systems that support only C1 -- so the boot default must be MWAIT.
424 * Some AMD machines are the opposite, they depend on using HALT.
426 * So for default C1, which is used during boot until cpuidle loads,
427 * use MWAIT-C1 on Intel HW that has it, else use HALT.
429 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
431 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
434 if (!cpu_has(c
, X86_FEATURE_MWAIT
) || static_cpu_has_bug(X86_BUG_MONITOR
))
441 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
442 * with interrupts enabled and no flags, which is backwards compatible with the
443 * original MWAIT implementation.
445 static __cpuidle
void mwait_idle(void)
447 if (!current_set_polling_and_test()) {
448 trace_cpu_idle_rcuidle(1, smp_processor_id());
449 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
451 clflush((void *)¤t_thread_info()->flags
);
455 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
460 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
464 __current_clr_polling();
467 void select_idle_routine(const struct cpuinfo_x86
*c
)
470 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
471 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
473 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
476 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
477 pr_info("using AMD E400 aware idle routine\n");
478 x86_idle
= amd_e400_idle
;
479 } else if (prefer_mwait_c1_over_halt(c
)) {
480 pr_info("using mwait in idle threads\n");
481 x86_idle
= mwait_idle
;
483 x86_idle
= default_idle
;
486 void amd_e400_c1e_apic_setup(void)
488 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
489 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
491 tick_broadcast_force();
496 void __init
arch_post_acpi_subsys_init(void)
500 if (!boot_cpu_has_bug(X86_BUG_AMD_E400
))
504 * AMD E400 detection needs to happen after ACPI has been enabled. If
505 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
506 * MSR_K8_INT_PENDING_MSG.
508 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
509 if (!(lo
& K8_INTP_C1E_ACTIVE_MASK
))
512 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E
);
514 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
515 mark_tsc_unstable("TSC halt in AMD C1E");
516 pr_info("System has AMD C1E enabled\n");
519 static int __init
idle_setup(char *str
)
524 if (!strcmp(str
, "poll")) {
525 pr_info("using polling idle threads\n");
526 boot_option_idle_override
= IDLE_POLL
;
527 cpu_idle_poll_ctrl(true);
528 } else if (!strcmp(str
, "halt")) {
530 * When the boot option of idle=halt is added, halt is
531 * forced to be used for CPU idle. In such case CPU C2/C3
532 * won't be used again.
533 * To continue to load the CPU idle driver, don't touch
534 * the boot_option_idle_override.
536 x86_idle
= default_idle
;
537 boot_option_idle_override
= IDLE_HALT
;
538 } else if (!strcmp(str
, "nomwait")) {
540 * If the boot option of "idle=nomwait" is added,
541 * it means that mwait will be disabled for CPU C2/C3
542 * states. In such case it won't touch the variable
543 * of boot_option_idle_override.
545 boot_option_idle_override
= IDLE_NOMWAIT
;
551 early_param("idle", idle_setup
);
553 unsigned long arch_align_stack(unsigned long sp
)
555 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
556 sp
-= get_random_int() % 8192;
560 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
562 return randomize_page(mm
->brk
, 0x02000000);
566 * Called from fs/proc with a reference on @p to find the function
567 * which called into schedule(). This needs to be done carefully
568 * because the task might wake up and we might look at a stack
571 unsigned long get_wchan(struct task_struct
*p
)
573 unsigned long start
, bottom
, top
, sp
, fp
, ip
, ret
= 0;
576 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
579 if (!try_get_task_stack(p
))
582 start
= (unsigned long)task_stack_page(p
);
587 * Layout of the stack page:
589 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
591 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
593 * ----------- bottom = start
595 * The tasks stack pointer points at the location where the
596 * framepointer is stored. The data on the stack is:
597 * ... IP FP ... IP FP
599 * We need to read FP and IP, so we need to adjust the upper
600 * bound by another unsigned long.
602 top
= start
+ THREAD_SIZE
- TOP_OF_KERNEL_STACK_PADDING
;
603 top
-= 2 * sizeof(unsigned long);
606 sp
= READ_ONCE(p
->thread
.sp
);
607 if (sp
< bottom
|| sp
> top
)
610 fp
= READ_ONCE_NOCHECK(((struct inactive_task_frame
*)sp
)->bp
);
612 if (fp
< bottom
|| fp
> top
)
614 ip
= READ_ONCE_NOCHECK(*(unsigned long *)(fp
+ sizeof(unsigned long)));
615 if (!in_sched_functions(ip
)) {
619 fp
= READ_ONCE_NOCHECK(*(unsigned long *)fp
);
620 } while (count
++ < 16 && p
->state
!= TASK_RUNNING
);
627 long do_arch_prctl_common(struct task_struct
*task
, int option
,
628 unsigned long cpuid_enabled
)
632 return get_cpuid_mode();
634 return set_cpuid_mode(task
, cpuid_enabled
);