1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
9 #include <linux/prctl.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/sched/idle.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
19 #include <linux/tick.h>
20 #include <linux/random.h>
21 #include <linux/user-return-notifier.h>
22 #include <linux/dmi.h>
23 #include <linux/utsname.h>
24 #include <linux/stackprotector.h>
25 #include <linux/cpuidle.h>
26 #include <linux/acpi.h>
27 #include <linux/elf-randomize.h>
28 #include <linux/static_call.h>
29 #include <trace/events/power.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/entry-common.h>
34 #include <linux/uaccess.h>
35 #include <asm/mwait.h>
36 #include <asm/fpu/api.h>
37 #include <asm/fpu/sched.h>
38 #include <asm/fpu/xstate.h>
39 #include <asm/debugreg.h>
41 #include <asm/tlbflush.h>
44 #include <asm/switch_to.h>
46 #include <asm/prctl.h>
47 #include <asm/spec-ctrl.h>
48 #include <asm/io_bitmap.h>
49 #include <asm/proto.h>
50 #include <asm/frame.h>
51 #include <asm/unwind.h>
53 #include <asm/mmu_context.h>
54 #include <asm/shstk.h>
59 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
60 * no more per-task TSS's. The TSS size is kept cacheline-aligned
61 * so they are allowed to end up in the .data..cacheline_aligned
62 * section. Since TSS's are completely CPU-local, we want them
63 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
65 __visible
DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct
, cpu_tss_rw
) = {
68 * .sp0 is only used when entering ring 0 from a lower
69 * privilege level. Since the init task never runs anything
70 * but ring 0 code, there is no need for a valid value here.
73 .sp0
= (1UL << (BITS_PER_LONG
-1)) + 1,
76 .sp1
= TOP_OF_INIT_STACK
,
81 .io_bitmap_base
= IO_BITMAP_OFFSET_INVALID
,
84 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw
);
86 DEFINE_PER_CPU(bool, __tss_limit_invalid
);
87 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid
);
90 * this gets called so that we can store lazy state into memory and copy the
91 * current task into the new thread.
93 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
95 memcpy(dst
, src
, arch_task_struct_size
);
97 dst
->thread
.vm86
= NULL
;
99 /* Drop the copied pointer to current's fpstate */
100 dst
->thread
.fpu
.fpstate
= NULL
;
106 void arch_release_task_struct(struct task_struct
*tsk
)
108 if (fpu_state_size_dynamic())
109 fpstate_free(&tsk
->thread
.fpu
);
114 * Free thread data structures etc..
116 void exit_thread(struct task_struct
*tsk
)
118 struct thread_struct
*t
= &tsk
->thread
;
119 struct fpu
*fpu
= &t
->fpu
;
121 if (test_thread_flag(TIF_IO_BITMAP
))
130 static int set_new_tls(struct task_struct
*p
, unsigned long tls
)
132 struct user_desc __user
*utls
= (struct user_desc __user
*)tls
;
134 if (in_ia32_syscall())
135 return do_set_thread_area(p
, -1, utls
, 0);
137 return do_set_thread_area_64(p
, ARCH_SET_FS
, tls
);
140 __visible
void ret_from_fork(struct task_struct
*prev
, struct pt_regs
*regs
,
141 int (*fn
)(void *), void *fn_arg
)
145 /* Is this a kernel thread? */
149 * A kernel thread is allowed to return here after successfully
150 * calling kernel_execve(). Exit to userspace to complete the
156 syscall_exit_to_user_mode(regs
);
159 int copy_thread(struct task_struct
*p
, const struct kernel_clone_args
*args
)
161 unsigned long clone_flags
= args
->flags
;
162 unsigned long sp
= args
->stack
;
163 unsigned long tls
= args
->tls
;
164 struct inactive_task_frame
*frame
;
165 struct fork_frame
*fork_frame
;
166 struct pt_regs
*childregs
;
167 unsigned long new_ssp
;
170 childregs
= task_pt_regs(p
);
171 fork_frame
= container_of(childregs
, struct fork_frame
, regs
);
172 frame
= &fork_frame
->frame
;
174 frame
->bp
= encode_frame_pointer(childregs
);
175 frame
->ret_addr
= (unsigned long) ret_from_fork_asm
;
176 p
->thread
.sp
= (unsigned long) fork_frame
;
177 p
->thread
.io_bitmap
= NULL
;
178 p
->thread
.iopl_warn
= 0;
179 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
183 p
->thread
.fsindex
= current
->thread
.fsindex
;
184 p
->thread
.fsbase
= current
->thread
.fsbase
;
185 p
->thread
.gsindex
= current
->thread
.gsindex
;
186 p
->thread
.gsbase
= current
->thread
.gsbase
;
188 savesegment(es
, p
->thread
.es
);
189 savesegment(ds
, p
->thread
.ds
);
191 if (p
->mm
&& (clone_flags
& (CLONE_VM
| CLONE_VFORK
)) == CLONE_VM
)
192 set_bit(MM_CONTEXT_LOCK_LAM
, &p
->mm
->context
.flags
);
194 p
->thread
.sp0
= (unsigned long) (childregs
+ 1);
195 savesegment(gs
, p
->thread
.gs
);
197 * Clear all status flags including IF and set fixed bit. 64bit
198 * does not have this initialization as the frame does not contain
199 * flags. The flags consistency (especially vs. AC) is there
200 * ensured via objtool, which lacks 32bit support.
202 frame
->flags
= X86_EFLAGS_FIXED
;
206 * Allocate a new shadow stack for thread if needed. If shadow stack,
207 * is disabled, new_ssp will remain 0, and fpu_clone() will know not to
210 new_ssp
= shstk_alloc_thread_stack(p
, clone_flags
, args
->stack_size
);
211 if (IS_ERR_VALUE(new_ssp
))
212 return PTR_ERR((void *)new_ssp
);
214 fpu_clone(p
, clone_flags
, args
->fn
, new_ssp
);
216 /* Kernel thread ? */
217 if (unlikely(p
->flags
& PF_KTHREAD
)) {
218 p
->thread
.pkru
= pkru_get_init_value();
219 memset(childregs
, 0, sizeof(struct pt_regs
));
220 kthread_frame_init(frame
, args
->fn
, args
->fn_arg
);
225 * Clone current's PKRU value from hardware. tsk->thread.pkru
226 * is only valid when scheduled out.
228 p
->thread
.pkru
= read_pkru();
231 *childregs
= *current_pt_regs();
236 if (unlikely(args
->fn
)) {
238 * A user space thread, but it doesn't return to
241 * In order to indicate that to tools like gdb,
242 * we reset the stack and instruction pointers.
244 * It does the same kernel frame setup to return to a kernel
245 * function that a kernel thread does.
249 kthread_frame_init(frame
, args
->fn
, args
->fn_arg
);
253 /* Set a new TLS for the child thread? */
254 if (clone_flags
& CLONE_SETTLS
)
255 ret
= set_new_tls(p
, tls
);
257 if (!ret
&& unlikely(test_tsk_thread_flag(current
, TIF_IO_BITMAP
)))
261 * If copy_thread() if failing, don't leak the shadow stack possibly
262 * allocated in shstk_alloc_thread_stack() above.
270 static void pkru_flush_thread(void)
273 * If PKRU is enabled the default PKRU value has to be loaded into
274 * the hardware right here (similar to context switch).
276 pkru_write_default();
279 void flush_thread(void)
281 struct task_struct
*tsk
= current
;
283 flush_ptrace_hw_breakpoint(tsk
);
284 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
290 void disable_TSC(void)
293 if (!test_and_set_thread_flag(TIF_NOTSC
))
295 * Must flip the CPU state synchronously with
296 * TIF_NOTSC in the current running context.
298 cr4_set_bits(X86_CR4_TSD
);
302 static void enable_TSC(void)
305 if (test_and_clear_thread_flag(TIF_NOTSC
))
307 * Must flip the CPU state synchronously with
308 * TIF_NOTSC in the current running context.
310 cr4_clear_bits(X86_CR4_TSD
);
314 int get_tsc_mode(unsigned long adr
)
318 if (test_thread_flag(TIF_NOTSC
))
319 val
= PR_TSC_SIGSEGV
;
323 return put_user(val
, (unsigned int __user
*)adr
);
326 int set_tsc_mode(unsigned int val
)
328 if (val
== PR_TSC_SIGSEGV
)
330 else if (val
== PR_TSC_ENABLE
)
338 DEFINE_PER_CPU(u64
, msr_misc_features_shadow
);
340 static void set_cpuid_faulting(bool on
)
344 msrval
= this_cpu_read(msr_misc_features_shadow
);
345 msrval
&= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
346 msrval
|= (on
<< MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT
);
347 this_cpu_write(msr_misc_features_shadow
, msrval
);
348 wrmsrl(MSR_MISC_FEATURES_ENABLES
, msrval
);
351 static void disable_cpuid(void)
354 if (!test_and_set_thread_flag(TIF_NOCPUID
)) {
356 * Must flip the CPU state synchronously with
357 * TIF_NOCPUID in the current running context.
359 set_cpuid_faulting(true);
364 static void enable_cpuid(void)
367 if (test_and_clear_thread_flag(TIF_NOCPUID
)) {
369 * Must flip the CPU state synchronously with
370 * TIF_NOCPUID in the current running context.
372 set_cpuid_faulting(false);
377 static int get_cpuid_mode(void)
379 return !test_thread_flag(TIF_NOCPUID
);
382 static int set_cpuid_mode(unsigned long cpuid_enabled
)
384 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT
))
396 * Called immediately after a successful exec.
398 void arch_setup_new_exec(void)
400 /* If cpuid was previously disabled for this task, re-enable it. */
401 if (test_thread_flag(TIF_NOCPUID
))
405 * Don't inherit TIF_SSBD across exec boundary when
406 * PR_SPEC_DISABLE_NOEXEC is used.
408 if (test_thread_flag(TIF_SSBD
) &&
409 task_spec_ssb_noexec(current
)) {
410 clear_thread_flag(TIF_SSBD
);
411 task_clear_spec_ssb_disable(current
);
412 task_clear_spec_ssb_noexec(current
);
413 speculation_ctrl_update(read_thread_flags());
416 mm_reset_untag_mask(current
->mm
);
419 #ifdef CONFIG_X86_IOPL_IOPERM
420 static inline void switch_to_bitmap(unsigned long tifp
)
423 * Invalidate I/O bitmap if the previous task used it. This prevents
424 * any possible leakage of an active I/O bitmap.
426 * If the next task has an I/O bitmap it will handle it on exit to
429 if (tifp
& _TIF_IO_BITMAP
)
430 tss_invalidate_io_bitmap();
433 static void tss_copy_io_bitmap(struct tss_struct
*tss
, struct io_bitmap
*iobm
)
436 * Copy at least the byte range of the incoming tasks bitmap which
437 * covers the permitted I/O ports.
439 * If the previous task which used an I/O bitmap had more bits
440 * permitted, then the copy needs to cover those as well so they
443 memcpy(tss
->io_bitmap
.bitmap
, iobm
->bitmap
,
444 max(tss
->io_bitmap
.prev_max
, iobm
->max
));
447 * Store the new max and the sequence number of this bitmap
448 * and a pointer to the bitmap itself.
450 tss
->io_bitmap
.prev_max
= iobm
->max
;
451 tss
->io_bitmap
.prev_sequence
= iobm
->sequence
;
455 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
457 void native_tss_update_io_bitmap(void)
459 struct tss_struct
*tss
= this_cpu_ptr(&cpu_tss_rw
);
460 struct thread_struct
*t
= ¤t
->thread
;
461 u16
*base
= &tss
->x86_tss
.io_bitmap_base
;
463 if (!test_thread_flag(TIF_IO_BITMAP
)) {
464 native_tss_invalidate_io_bitmap();
468 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM
) && t
->iopl_emul
== 3) {
469 *base
= IO_BITMAP_OFFSET_VALID_ALL
;
471 struct io_bitmap
*iobm
= t
->io_bitmap
;
474 * Only copy bitmap data when the sequence number differs. The
475 * update time is accounted to the incoming task.
477 if (tss
->io_bitmap
.prev_sequence
!= iobm
->sequence
)
478 tss_copy_io_bitmap(tss
, iobm
);
480 /* Enable the bitmap */
481 *base
= IO_BITMAP_OFFSET_VALID_MAP
;
485 * Make sure that the TSS limit is covering the IO bitmap. It might have
486 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
487 * access from user space to trigger a #GP because tbe bitmap is outside
492 #else /* CONFIG_X86_IOPL_IOPERM */
493 static inline void switch_to_bitmap(unsigned long tifp
) { }
499 struct ssb_state
*shared_state
;
501 unsigned int disable_state
;
502 unsigned long local_state
;
507 static DEFINE_PER_CPU(struct ssb_state
, ssb_state
);
509 void speculative_store_bypass_ht_init(void)
511 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
512 unsigned int this_cpu
= smp_processor_id();
518 * Shared state setup happens once on the first bringup
519 * of the CPU. It's not destroyed on CPU hotunplug.
521 if (st
->shared_state
)
524 raw_spin_lock_init(&st
->lock
);
527 * Go over HT siblings and check whether one of them has set up the
528 * shared state pointer already.
530 for_each_cpu(cpu
, topology_sibling_cpumask(this_cpu
)) {
534 if (!per_cpu(ssb_state
, cpu
).shared_state
)
537 /* Link it to the state of the sibling: */
538 st
->shared_state
= per_cpu(ssb_state
, cpu
).shared_state
;
543 * First HT sibling to come up on the core. Link shared state of
544 * the first HT sibling to itself. The siblings on the same core
545 * which come up later will see the shared state pointer and link
546 * themselves to the state of this CPU.
548 st
->shared_state
= st
;
552 * Logic is: First HT sibling enables SSBD for both siblings in the core
553 * and last sibling to disable it, disables it for the whole core. This how
554 * MSR_SPEC_CTRL works in "hardware":
556 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
558 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
560 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
561 u64 msr
= x86_amd_ls_cfg_base
;
563 if (!static_cpu_has(X86_FEATURE_ZEN
)) {
564 msr
|= ssbd_tif_to_amd_ls_cfg(tifn
);
565 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
569 if (tifn
& _TIF_SSBD
) {
571 * Since this can race with prctl(), block reentry on the
574 if (__test_and_set_bit(LSTATE_SSB
, &st
->local_state
))
577 msr
|= x86_amd_ls_cfg_ssbd_mask
;
579 raw_spin_lock(&st
->shared_state
->lock
);
580 /* First sibling enables SSBD: */
581 if (!st
->shared_state
->disable_state
)
582 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
583 st
->shared_state
->disable_state
++;
584 raw_spin_unlock(&st
->shared_state
->lock
);
586 if (!__test_and_clear_bit(LSTATE_SSB
, &st
->local_state
))
589 raw_spin_lock(&st
->shared_state
->lock
);
590 st
->shared_state
->disable_state
--;
591 if (!st
->shared_state
->disable_state
)
592 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
593 raw_spin_unlock(&st
->shared_state
->lock
);
597 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
599 u64 msr
= x86_amd_ls_cfg_base
| ssbd_tif_to_amd_ls_cfg(tifn
);
601 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
605 static __always_inline
void amd_set_ssb_virt_state(unsigned long tifn
)
608 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
609 * so ssbd_tif_to_spec_ctrl() just works.
611 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, ssbd_tif_to_spec_ctrl(tifn
));
615 * Update the MSRs managing speculation control, during context switch.
617 * tifp: Previous task's thread flags
618 * tifn: Next task's thread flags
620 static __always_inline
void __speculation_ctrl_update(unsigned long tifp
,
623 unsigned long tif_diff
= tifp
^ tifn
;
624 u64 msr
= x86_spec_ctrl_base
;
627 lockdep_assert_irqs_disabled();
629 /* Handle change of TIF_SSBD depending on the mitigation method. */
630 if (static_cpu_has(X86_FEATURE_VIRT_SSBD
)) {
631 if (tif_diff
& _TIF_SSBD
)
632 amd_set_ssb_virt_state(tifn
);
633 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD
)) {
634 if (tif_diff
& _TIF_SSBD
)
635 amd_set_core_ssb_state(tifn
);
636 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
637 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
638 updmsr
|= !!(tif_diff
& _TIF_SSBD
);
639 msr
|= ssbd_tif_to_spec_ctrl(tifn
);
642 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
643 if (IS_ENABLED(CONFIG_SMP
) &&
644 static_branch_unlikely(&switch_to_cond_stibp
)) {
645 updmsr
|= !!(tif_diff
& _TIF_SPEC_IB
);
646 msr
|= stibp_tif_to_spec_ctrl(tifn
);
650 update_spec_ctrl_cond(msr
);
653 static unsigned long speculation_ctrl_update_tif(struct task_struct
*tsk
)
655 if (test_and_clear_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
)) {
656 if (task_spec_ssb_disable(tsk
))
657 set_tsk_thread_flag(tsk
, TIF_SSBD
);
659 clear_tsk_thread_flag(tsk
, TIF_SSBD
);
661 if (task_spec_ib_disable(tsk
))
662 set_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
664 clear_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
666 /* Return the updated threadinfo flags*/
667 return read_task_thread_flags(tsk
);
670 void speculation_ctrl_update(unsigned long tif
)
674 /* Forced update. Make sure all relevant TIF flags are different */
675 local_irq_save(flags
);
676 __speculation_ctrl_update(~tif
, tif
);
677 local_irq_restore(flags
);
680 /* Called from seccomp/prctl update */
681 void speculation_ctrl_update_current(void)
684 speculation_ctrl_update(speculation_ctrl_update_tif(current
));
688 static inline void cr4_toggle_bits_irqsoff(unsigned long mask
)
690 unsigned long newval
, cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
694 this_cpu_write(cpu_tlbstate
.cr4
, newval
);
699 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
)
701 unsigned long tifp
, tifn
;
703 tifn
= read_task_thread_flags(next_p
);
704 tifp
= read_task_thread_flags(prev_p
);
706 switch_to_bitmap(tifp
);
708 propagate_user_return_notify(prev_p
, next_p
);
710 if ((tifp
& _TIF_BLOCKSTEP
|| tifn
& _TIF_BLOCKSTEP
) &&
711 arch_has_block_step()) {
712 unsigned long debugctl
, msk
;
714 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
715 debugctl
&= ~DEBUGCTLMSR_BTF
;
716 msk
= tifn
& _TIF_BLOCKSTEP
;
717 debugctl
|= (msk
>> TIF_BLOCKSTEP
) << DEBUGCTLMSR_BTF_SHIFT
;
718 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
721 if ((tifp
^ tifn
) & _TIF_NOTSC
)
722 cr4_toggle_bits_irqsoff(X86_CR4_TSD
);
724 if ((tifp
^ tifn
) & _TIF_NOCPUID
)
725 set_cpuid_faulting(!!(tifn
& _TIF_NOCPUID
));
727 if (likely(!((tifp
| tifn
) & _TIF_SPEC_FORCE_UPDATE
))) {
728 __speculation_ctrl_update(tifp
, tifn
);
730 speculation_ctrl_update_tif(prev_p
);
731 tifn
= speculation_ctrl_update_tif(next_p
);
733 /* Enforce MSR update to ensure consistent state */
734 __speculation_ctrl_update(~tifn
, tifn
);
739 * Idle related variables and functions
741 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
742 EXPORT_SYMBOL(boot_option_idle_override
);
745 * We use this if we don't have any better idle routine..
747 void __cpuidle
default_idle(void)
750 raw_local_irq_disable();
752 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
753 EXPORT_SYMBOL(default_idle
);
756 DEFINE_STATIC_CALL_NULL(x86_idle
, default_idle
);
758 static bool x86_idle_set(void)
760 return !!static_call_query(x86_idle
);
764 static inline void __noreturn
play_dead(void)
770 void arch_cpu_idle_enter(void)
772 tsc_verify_tsc_adjust(false);
776 void __noreturn
arch_cpu_idle_dead(void)
782 * Called from the generic idle code.
784 void __cpuidle
arch_cpu_idle(void)
786 static_call(x86_idle
)();
788 EXPORT_SYMBOL_GPL(arch_cpu_idle
);
791 bool xen_set_default_idle(void)
793 bool ret
= x86_idle_set();
795 static_call_update(x86_idle
, default_idle
);
801 struct cpumask cpus_stop_mask
;
803 void __noreturn
stop_this_cpu(void *dummy
)
805 struct cpuinfo_x86
*c
= this_cpu_ptr(&cpu_info
);
806 unsigned int cpu
= smp_processor_id();
811 * Remove this CPU from the online mask and disable it
812 * unconditionally. This might be redundant in case that the reboot
813 * vector was handled late and stop_other_cpus() sent an NMI.
815 * According to SDM and APM NMIs can be accepted even after soft
816 * disabling the local APIC.
818 set_cpu_online(cpu
, false);
819 disable_local_APIC();
823 * Use wbinvd on processors that support SME. This provides support
824 * for performing a successful kexec when going from SME inactive
825 * to SME active (or vice-versa). The cache must be cleared so that
826 * if there are entries with the same physical address, both with and
827 * without the encryption bit, they don't race each other when flushed
828 * and potentially end up with the wrong entry being committed to
831 * Test the CPUID bit directly because the machine might've cleared
832 * X86_FEATURE_SME due to cmdline options.
834 if (c
->extended_cpuid_level
>= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
838 * This brings a cache line back and dirties it, but
839 * native_stop_other_cpus() will overwrite cpus_stop_mask after it
840 * observed that all CPUs reported stop. This write will invalidate
841 * the related cache line on this CPU.
843 cpumask_clear_cpu(cpu
, &cpus_stop_mask
);
847 * Use native_halt() so that memory contents don't change
848 * (stack usage and variables) after possibly issuing the
849 * native_wbinvd() above.
856 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
857 * states (local apic timer and TSC stop).
859 * XXX this function is completely buggered vs RCU and tracing.
861 static void amd_e400_idle(void)
864 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
865 * gets set after static_cpu_has() places have been converted via
868 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
873 tick_broadcast_enter();
877 tick_broadcast_exit();
881 * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
882 * exists and whenever MONITOR/MWAIT extensions are present there is at
883 * least one C1 substate.
885 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
886 * is passed to kernel commandline parameter.
888 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
890 u32 eax
, ebx
, ecx
, edx
;
892 /* User has disallowed the use of MWAIT. Fallback to HALT */
893 if (boot_option_idle_override
== IDLE_NOMWAIT
)
896 /* MWAIT is not supported on this platform. Fallback to HALT */
897 if (!cpu_has(c
, X86_FEATURE_MWAIT
))
900 /* Monitor has a bug. Fallback to HALT */
901 if (boot_cpu_has_bug(X86_BUG_MONITOR
))
904 cpuid(CPUID_MWAIT_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
907 * If MWAIT extensions are not available, it is safe to use MWAIT
910 if (!(ecx
& CPUID5_ECX_EXTENSIONS_SUPPORTED
))
914 * If MWAIT extensions are available, there should be at least one
915 * MWAIT C1 substate present.
917 return (edx
& MWAIT_C1_SUBSTATE_MASK
);
921 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
922 * with interrupts enabled and no flags, which is backwards compatible with the
923 * original MWAIT implementation.
925 static __cpuidle
void mwait_idle(void)
927 if (!current_set_polling_and_test()) {
928 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
930 clflush((void *)¤t_thread_info()->flags
);
934 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
935 if (!need_resched()) {
937 raw_local_irq_disable();
940 __current_clr_polling();
943 void select_idle_routine(const struct cpuinfo_x86
*c
)
946 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
947 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
949 if (x86_idle_set() || boot_option_idle_override
== IDLE_POLL
)
952 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
953 pr_info("using AMD E400 aware idle routine\n");
954 static_call_update(x86_idle
, amd_e400_idle
);
955 } else if (prefer_mwait_c1_over_halt(c
)) {
956 pr_info("using mwait in idle threads\n");
957 static_call_update(x86_idle
, mwait_idle
);
958 } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST
)) {
959 pr_info("using TDX aware idle routine\n");
960 static_call_update(x86_idle
, tdx_safe_halt
);
962 static_call_update(x86_idle
, default_idle
);
965 void amd_e400_c1e_apic_setup(void)
967 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
968 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
970 tick_broadcast_force();
975 void __init
arch_post_acpi_subsys_init(void)
979 if (!boot_cpu_has_bug(X86_BUG_AMD_E400
))
983 * AMD E400 detection needs to happen after ACPI has been enabled. If
984 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
985 * MSR_K8_INT_PENDING_MSG.
987 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
988 if (!(lo
& K8_INTP_C1E_ACTIVE_MASK
))
991 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E
);
993 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
994 mark_tsc_unstable("TSC halt in AMD C1E");
995 pr_info("System has AMD C1E enabled\n");
998 static int __init
idle_setup(char *str
)
1003 if (!strcmp(str
, "poll")) {
1004 pr_info("using polling idle threads\n");
1005 boot_option_idle_override
= IDLE_POLL
;
1006 cpu_idle_poll_ctrl(true);
1007 } else if (!strcmp(str
, "halt")) {
1009 * When the boot option of idle=halt is added, halt is
1010 * forced to be used for CPU idle. In such case CPU C2/C3
1011 * won't be used again.
1012 * To continue to load the CPU idle driver, don't touch
1013 * the boot_option_idle_override.
1015 static_call_update(x86_idle
, default_idle
);
1016 boot_option_idle_override
= IDLE_HALT
;
1017 } else if (!strcmp(str
, "nomwait")) {
1019 * If the boot option of "idle=nomwait" is added,
1020 * it means that mwait will be disabled for CPU C1/C2/C3
1023 boot_option_idle_override
= IDLE_NOMWAIT
;
1029 early_param("idle", idle_setup
);
1031 unsigned long arch_align_stack(unsigned long sp
)
1033 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
1034 sp
-= get_random_u32_below(8192);
1038 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
1040 return randomize_page(mm
->brk
, 0x02000000);
1044 * Called from fs/proc with a reference on @p to find the function
1045 * which called into schedule(). This needs to be done carefully
1046 * because the task might wake up and we might look at a stack
1047 * changing under us.
1049 unsigned long __get_wchan(struct task_struct
*p
)
1051 struct unwind_state state
;
1052 unsigned long addr
= 0;
1054 if (!try_get_task_stack(p
))
1057 for (unwind_start(&state
, p
, NULL
, NULL
); !unwind_done(&state
);
1058 unwind_next_frame(&state
)) {
1059 addr
= unwind_get_return_address(&state
);
1062 if (in_sched_functions(addr
))
1072 long do_arch_prctl_common(int option
, unsigned long arg2
)
1075 case ARCH_GET_CPUID
:
1076 return get_cpuid_mode();
1077 case ARCH_SET_CPUID
:
1078 return set_cpuid_mode(arg2
);
1079 case ARCH_GET_XCOMP_SUPP
:
1080 case ARCH_GET_XCOMP_PERM
:
1081 case ARCH_REQ_XCOMP_PERM
:
1082 case ARCH_GET_XCOMP_GUEST_PERM
:
1083 case ARCH_REQ_XCOMP_GUEST_PERM
:
1084 return fpu_xstate_prctl(option
, arg2
);